Unverified Commit bc467f06 authored by Luca Boccassi's avatar Luca Boccassi Committed by GitHub

Merge pull request #2944 from sigiesec/unity

Problem: insufficient unit tests for poller concept and ypipe
parents e57afec8 94743fd2
......@@ -580,7 +580,136 @@ set (cxx-sources
udp_address.cpp
scatter.cpp
gather.cpp
zap_client.cpp)
zap_client.cpp
# at least for VS, the header files must also be listed
address.hpp
array.hpp
atomic_counter.hpp
atomic_ptr.hpp
blob.hpp
client.hpp
clock.hpp
command.hpp
condition_variable.hpp
config.hpp
ctx.hpp
curve_client.hpp
curve_client_tools.hpp
curve_mechanism_base.hpp
curve_server.hpp
dbuffer.hpp
dealer.hpp
decoder.hpp
decoder_allocators.hpp
devpoll.hpp
dgram.hpp
dish.hpp
dist.hpp
encoder.hpp
epoll.hpp
err.hpp
fd.hpp
fq.hpp
gather.hpp
gssapi_client.hpp
gssapi_mechanism_base.hpp
gssapi_server.hpp
i_decoder.hpp
i_encoder.hpp
i_engine.hpp
i_mailbox.hpp
i_poll_events.hpp
io_object.hpp
io_thread.hpp
ip.hpp
ipc_address.hpp
ipc_connecter.hpp
ipc_listener.hpp
kqueue.hpp
lb.hpp
likely.hpp
macros.hpp
mailbox.hpp
mailbox_safe.hpp
mechanism.hpp
mechanism_base.hpp
metadata.hpp
msg.hpp
mtrie.hpp
mutex.hpp
norm_engine.hpp
null_mechanism.hpp
object.hpp
options.hpp
own.hpp
pair.hpp
pgm_receiver.hpp
pgm_sender.hpp
pgm_socket.hpp
pipe.hpp
plain_client.hpp
plain_server.hpp
poll.hpp
poller.hpp
poller_base.hpp
pollset.hpp
precompiled.hpp
proxy.hpp
pub.hpp
pull.hpp
push.hpp
radio.hpp
random.hpp
raw_decoder.hpp
raw_encoder.hpp
reaper.hpp
rep.hpp
req.hpp
router.hpp
scatter.hpp
select.hpp
server.hpp
session_base.hpp
signaler.hpp
socket_base.hpp
socket_poller.hpp
socks.hpp
socks_connecter.hpp
stdint.hpp
stream.hpp
stream_engine.hpp
sub.hpp
tcp.hpp
tcp_address.hpp
tcp_connecter.hpp
tcp_listener.hpp
thread.hpp
timers.hpp
tipc_address.hpp
tipc_connecter.hpp
tipc_listener.hpp
trie.hpp
udp_address.hpp
udp_engine.hpp
v1_decoder.hpp
v1_encoder.hpp
v2_decoder.hpp
v2_encoder.hpp
v2_protocol.hpp
vmci.hpp
vmci_address.hpp
vmci_connecter.hpp
vmci_listener.hpp
windows.hpp
wire.hpp
xpub.hpp
xsub.hpp
ypipe.hpp
ypipe_base.hpp
ypipe_conflate.hpp
yqueue.hpp
zap_client.hpp
)
if (MINGW)
# Generate the right type when using -m32 or -m64
......
......@@ -625,7 +625,8 @@ tests_test_atomics_SOURCES = tests/test_atomics.cpp
tests_test_atomics_LDADD = src/libzmq.la
tests_test_sockopt_hwm_SOURCES = tests/test_sockopt_hwm.cpp
tests_test_sockopt_hwm_LDADD = src/libzmq.la
tests_test_sockopt_hwm_LDADD = src/libzmq.la ${UNITY_LIBS}
tests_test_sockopt_hwm_CPPFLAGS = ${UNITY_CPPFLAGS}
tests_test_setsockopt_SOURCES = tests/test_setsockopt.cpp
tests_test_setsockopt_LDADD = src/libzmq.la
......
version: build-{build}
clone_depth: 1
shallow_clone: true
skip_tags: true
......@@ -15,6 +15,15 @@ environment:
configuration: Release
WITH_LIBSODIUM: ON
ENABLE_CURVE: ON
- platform: Win32
configuration: Release
POLLER: poll
WITH_LIBSODIUM: ON
ENABLE_CURVE: ON
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
CMAKE_GENERATOR: "Visual Studio 15 2017"
MSVCVERSION: "v141"
MSVCYEAR: "vs2017"
- platform: Win32
configuration: Debug
WITH_LIBSODIUM: ON
......@@ -51,15 +60,6 @@ environment:
CMAKE_GENERATOR: "Visual Studio 15 2017"
MSVCVERSION: "v141"
MSVCYEAR: "vs2017"
- platform: Win32
configuration: Release
POLLER: poll
WITH_LIBSODIUM: ON
ENABLE_CURVE: ON
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
CMAKE_GENERATOR: "Visual Studio 15 2017"
MSVCVERSION: "v141"
MSVCYEAR: "vs2017"
matrix:
fast_finish: false
......
......@@ -44,5 +44,6 @@ if [ "$DO_CLANG_FORMAT_CHECK" -eq "1" ] ; then
exit 1
fi
else
( PKG_CONFIG_PATH=${BUILD_PREFIX}/lib/pkgconfig cmake "${CMAKE_OPTS[@]}" .. && make -j5 all VERBOSE=1 && make install && make -j5 test ) || exit 1
export CTEST_OUTPUT_ON_FAILURE=1
( PKG_CONFIG_PATH=${BUILD_PREFIX}/lib/pkgconfig cmake "${CMAKE_OPTS[@]}" .. && make -j5 all VERBOSE=1 && make install && make -j5 test ARGS="-V" ) || exit 1
fi
......@@ -37,7 +37,7 @@
#elif defined ZMQ_HAVE_ATOMIC_INTRINSICS
#define ZMQ_ATOMIC_COUNTER_INTRINSIC
#elif (defined __cplusplus && __cplusplus >= 201103L) \
|| (defined _MSC_VER && _MSC_VER >= 1700)
|| (defined _MSC_VER && _MSC_VER >= 1900)
#define ZMQ_ATOMIC_COUNTER_CXX11
#elif (defined __i386__ || defined __x86_64__) && defined __GNUC__
#define ZMQ_ATOMIC_COUNTER_X86
......
......@@ -34,8 +34,8 @@
#define ZMQ_ATOMIC_PTR_MUTEX
#elif defined ZMQ_HAVE_ATOMIC_INTRINSICS
#define ZMQ_ATOMIC_PTR_INTRINSIC
#elif ((defined __cplusplus && __cplusplus >= 201103L) \
|| (defined _MSC_VER && _MSC_VER >= 1700))
#elif (defined __cplusplus && __cplusplus >= 201103L) \
|| (defined _MSC_VER && _MSC_VER >= 1900)
#define ZMQ_ATOMIC_PTR_CXX11
#elif (defined __i386__ || defined __x86_64__) && defined __GNUC__
#define ZMQ_ATOMIC_PTR_X86
......
......@@ -35,6 +35,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <algorithm>
#include <new>
......@@ -45,8 +46,7 @@
#include "i_poll_events.hpp"
zmq::epoll_t::epoll_t (const zmq::thread_ctx_t &ctx_) :
ctx (ctx_),
stopping (false)
worker_poller_base_t (ctx_)
{
#ifdef ZMQ_USE_EPOLL_CLOEXEC
// Setting this option result in sane behaviour when exec() functions
......@@ -62,7 +62,7 @@ zmq::epoll_t::epoll_t (const zmq::thread_ctx_t &ctx_) :
zmq::epoll_t::~epoll_t ()
{
// Wait till the worker thread exits.
worker.stop ();
stop_worker ();
close (epoll_fd);
for (retired_t::iterator it = retired.begin (); it != retired.end ();
......@@ -73,6 +73,7 @@ zmq::epoll_t::~epoll_t ()
zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_)
{
check_thread ();
poll_entry_t *pe = new (std::nothrow) poll_entry_t;
alloc_assert (pe);
......@@ -96,6 +97,7 @@ zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_)
void zmq::epoll_t::rm_fd (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev);
errno_assert (rc != -1);
......@@ -110,6 +112,7 @@ void zmq::epoll_t::rm_fd (handle_t handle_)
void zmq::epoll_t::set_pollin (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
pe->ev.events |= EPOLLIN;
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
......@@ -118,6 +121,7 @@ void zmq::epoll_t::set_pollin (handle_t handle_)
void zmq::epoll_t::reset_pollin (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
pe->ev.events &= ~((short) EPOLLIN);
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
......@@ -126,6 +130,7 @@ void zmq::epoll_t::reset_pollin (handle_t handle_)
void zmq::epoll_t::set_pollout (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
pe->ev.events |= EPOLLOUT;
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
......@@ -134,20 +139,16 @@ void zmq::epoll_t::set_pollout (handle_t handle_)
void zmq::epoll_t::reset_pollout (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
pe->ev.events &= ~((short) EPOLLOUT);
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
errno_assert (rc != -1);
}
void zmq::epoll_t::start ()
{
ctx.start_thread (worker, worker_routine, this);
}
void zmq::epoll_t::stop ()
{
stopping = true;
check_thread ();
}
int zmq::epoll_t::max_fds ()
......@@ -159,10 +160,18 @@ void zmq::epoll_t::loop ()
{
epoll_event ev_buf[max_io_events];
while (!stopping) {
while (true) {
// Execute any due timers.
int timeout = (int) execute_timers ();
if (get_load () == 0) {
if (timeout == 0)
break;
// TODO sleep for timeout
continue;
}
// Wait for events.
int n = epoll_wait (epoll_fd, &ev_buf[0], max_io_events,
timeout ? timeout : -1);
......@@ -199,9 +208,4 @@ void zmq::epoll_t::loop ()
}
}
void zmq::epoll_t::worker_routine (void *arg_)
{
((epoll_t *) arg_)->loop ();
}
#endif
......@@ -50,7 +50,7 @@ struct i_poll_events;
// This class implements socket polling mechanism using the Linux-specific
// epoll mechanism.
class epoll_t : public poller_base_t
class epoll_t : public worker_poller_base_t
{
public:
typedef void *handle_t;
......@@ -65,21 +65,14 @@ class epoll_t : public poller_base_t
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
void start ();
void stop ();
static int max_fds ();
private:
// Main worker thread routine.
static void worker_routine (void *arg_);
// Main event loop.
void loop ();
// Reference to ZMQ context.
const thread_ctx_t &ctx;
// Main epoll file descriptor
fd_t epoll_fd;
......@@ -94,9 +87,6 @@ class epoll_t : public poller_base_t
typedef std::vector<poll_entry_t *> retired_t;
retired_t retired;
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
......
......@@ -78,8 +78,8 @@ int wsa_error_to_errno (int errcode);
if (unlikely (!(x))) { \
const char *errstr = zmq::wsa_error (); \
if (errstr != NULL) { \
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
__FILE__, __LINE__); \
fprintf (stderr, "Assertion failed: %s [%i] (%s:%d)\n", \
errstr, WSAGetLastError (), __FILE__, __LINE__); \
fflush (stderr); \
zmq::zmq_abort (errstr); \
} \
......
......@@ -112,8 +112,7 @@ zmq::poller_t *zmq::io_thread_t::get_poller ()
void zmq::io_thread_t::process_stop ()
{
if (mailbox_handle) {
poller->rm_fd (mailbox_handle);
}
zmq_assert (mailbox_handle);
poller->rm_fd (mailbox_handle);
poller->stop ();
}
This diff is collapsed.
......@@ -57,6 +57,16 @@ int set_nosigpipe (fd_t s_);
// Binds the underlying socket to the given device, eg. VRF or interface
void bind_to_device (fd_t s_, std::string &bound_device_);
// Initialize network subsystem. May be called multiple times. Each call must be matched by a call to shutdown_network.
bool initialize_network ();
// Shutdown network subsystem. Must be called once for each call to initialize_network before terminating.
void shutdown_network ();
// Creates a pair of sockets (using signaler_port on OS using TCP sockets).
// Returns -1 if we could not make the socket pair successfully
int make_fdpair (fd_t *r_, fd_t *w_);
}
#endif
......@@ -55,8 +55,7 @@
#endif
zmq::kqueue_t::kqueue_t (const zmq::thread_ctx_t &ctx_) :
ctx (ctx_),
stopping (false)
worker_poller_base_t (ctx_)
{
// Create event queue
kqueue_fd = kqueue ();
......@@ -68,12 +67,13 @@ zmq::kqueue_t::kqueue_t (const zmq::thread_ctx_t &ctx_) :
zmq::kqueue_t::~kqueue_t ()
{
worker.stop ();
stop_worker ();
close (kqueue_fd);
}
void zmq::kqueue_t::kevent_add (fd_t fd_, short filter_, void *udata_)
{
check_thread ();
struct kevent ev;
EV_SET (&ev, fd_, filter_, EV_ADD, 0, 0, (kevent_udata_t) udata_);
......@@ -93,6 +93,7 @@ void zmq::kqueue_t::kevent_delete (fd_t fd_, short filter_)
zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_,
i_poll_events *reactor_)
{
check_thread ();
poll_entry_t *pe = new (std::nothrow) poll_entry_t;
alloc_assert (pe);
......@@ -108,6 +109,7 @@ zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_,
void zmq::kqueue_t::rm_fd (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
if (pe->flag_pollin)
kevent_delete (pe->fd, EVFILT_READ);
......@@ -121,6 +123,7 @@ void zmq::kqueue_t::rm_fd (handle_t handle_)
void zmq::kqueue_t::set_pollin (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (!pe->flag_pollin)) {
pe->flag_pollin = true;
......@@ -130,6 +133,7 @@ void zmq::kqueue_t::set_pollin (handle_t handle_)
void zmq::kqueue_t::reset_pollin (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (pe->flag_pollin)) {
pe->flag_pollin = false;
......@@ -139,6 +143,7 @@ void zmq::kqueue_t::reset_pollin (handle_t handle_)
void zmq::kqueue_t::set_pollout (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (!pe->flag_pollout)) {
pe->flag_pollout = true;
......@@ -148,6 +153,7 @@ void zmq::kqueue_t::set_pollout (handle_t handle_)
void zmq::kqueue_t::reset_pollout (handle_t handle_)
{
check_thread ();
poll_entry_t *pe = (poll_entry_t *) handle_;
if (likely (pe->flag_pollout)) {
pe->flag_pollout = false;
......@@ -155,14 +161,8 @@ void zmq::kqueue_t::reset_pollout (handle_t handle_)
}
}
void zmq::kqueue_t::start ()
{
ctx.start_thread (worker, worker_routine, this);
}
void zmq::kqueue_t::stop ()
{
stopping = true;
}
int zmq::kqueue_t::max_fds ()
......@@ -172,10 +172,18 @@ int zmq::kqueue_t::max_fds ()
void zmq::kqueue_t::loop ()
{
while (!stopping) {
while (true) {
// Execute any due timers.
int timeout = (int) execute_timers ();
if (get_load () == 0) {
if (timeout == 0)
break;
// TODO sleep for timeout
continue;
}
// Wait for events.
struct kevent ev_buf[max_io_events];
timespec ts = {timeout / 1000, (timeout % 1000) * 1000000};
......@@ -219,9 +227,4 @@ void zmq::kqueue_t::loop ()
}
}
void zmq::kqueue_t::worker_routine (void *arg_)
{
((kqueue_t *) arg_)->loop ();
}
#endif
......@@ -49,7 +49,7 @@ struct i_poll_events;
// Implements socket polling mechanism using the BSD-specific
// kqueue interface.
class kqueue_t : public poller_base_t
class kqueue_t : public worker_poller_base_t
{
public:
typedef void *handle_t;
......@@ -64,21 +64,14 @@ class kqueue_t : public poller_base_t
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
void start ();
void stop ();
static int max_fds ();
private:
// Main worker thread routine.
static void worker_routine (void *arg_);
// Main event loop.
void loop ();
// Reference to ZMQ context.
const thread_ctx_t &ctx;
// File descriptor referring to the kernel event queue.
fd_t kqueue_fd;
......@@ -100,12 +93,6 @@ class kqueue_t : public poller_base_t
typedef std::vector<poll_entry_t *> retired_t;
retired_t retired;
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
kqueue_t (const kqueue_t &);
const kqueue_t &operator= (const kqueue_t &);
......
......@@ -40,22 +40,94 @@ namespace zmq
{
struct i_poll_events;
// A build of libzmq must provide an implementation of the poller_t concept. By
// convention, this is done via a typedef.
//
// At the time of writing, the following implementations of the poller_t
// concept exist: zmq::devpoll_t, zmq::epoll_t, zmq::kqueue_t, zmq::poll_t,
// zmq::pollset_t, zmq::select_t
//
// An implementation of the poller_t concept must provide the following public
// methods:
// Returns load of the poller.
// int get_load() const;
//
// Add a timeout to expire in timeout_ milliseconds. After the
// expiration, timer_event on sink_ object will be called with
// argument set to id_.
// void add_timer(int timeout_, zmq::i_poll_events *sink_, int id_);
//
// Cancel the timer created by sink_ object with ID equal to id_.
// void cancel_timer(zmq::i_poll_events *sink_, int id_);
//
// Adds a fd to the poller. Initially, no events are activated. These must
// be activated by the set_* methods using the returned handle_.
// handle_t add_fd(fd_t fd_, zmq::i_poll_events *events_);
//
// Deactivates any events that may be active for the given handle_, and
// removes the fd associated with the given handle_.
// void rm_fd(handle_t handle_);
//
// The set_* and reset_* methods activate resp. deactivate polling for
// input/output readiness on the respective handle_, such that the
// in_event/out_event methods on the associated zmq::i_poll_events object
// will be called.
// Note: while handle_t and fd_t may be the same type, and may even have the
// same values for some implementation, this may not be assumed in general.
// The methods may only be called with the handle returned by add_fd.
// void set_pollin(handle_t handle_);
// void reset_pollin(handle_t handle_);
// void set_pollout(handle_t handle_);//
// void reset_pollout(handle_t handle_);
//
// Starts operation of the poller. See below for details.
// void start();
//
// Request termination of the poller.
// TODO: might be removed in the future, as it has no effect.
// void stop();
//
// Returns the maximum number of fds that can be added to an instance of the
// poller at the same time, or -1 if there is no such fixed limit.
// static int max_fds();
//
// Most of the methods may only be called from a zmq::i_poll_events callback
// function when invoked by the poller (and, therefore, typically from the
// poller's worker thread), with the following exceptions:
// - get_load may be called from outside
// - add_fd and add_timer may be called from outside before start
// - start may be called from outside once
//
// After a poller is started, it waits for the registered events (input/output
// readiness, timeout) to happen, and calls the respective functions on the
// zmq::i_poll_events object. It terminates when no further registrations (fds
// or timers) exist.
//
// Before start, add_fd must have been called at least once. Behavior may be
// undefined otherwise.
//
// If the poller is implemented by a single worker thread (the
// worker_poller_base_t base class may be used to implement such a poller),
// no synchronization is required for the data structures modified by
// add_fd, rm_fd, add_timer, cancel_timer, (re)set_poll(in|out). However,
// reentrancy must be considered, e.g. when one of the functions modifies
// a container that is being iterated by the poller.
// A class that can be used as a base class for implementations of the poller
// concept.
//
// For documentation of the public methods, see the description of the poller_t
// concept.
class poller_base_t
{
public:
poller_base_t ();
virtual ~poller_base_t ();
// Returns load of the poller. Note that this function can be
// invoked from a different thread!
// Methods from the poller concept.
int get_load () const;
// Add a timeout to expire in timeout_ milliseconds. After the
// expiration timer_event on sink_ object will be called with
// argument set to id_.
void add_timer (int timeout_, zmq::i_poll_events *sink_, int id_);
// Cancel the timer created by sink_ object with ID equal to id_.
void cancel_timer (zmq::i_poll_events *sink_, int id_);
protected:
......@@ -87,19 +159,26 @@ class poller_base_t
const poller_base_t &operator= (const poller_base_t &);
};
// base class for a poller with a single worker thread.
// Base class for a poller with a single worker thread.
class worker_poller_base_t : public poller_base_t
{
public:
worker_poller_base_t (const thread_ctx_t &ctx_);
void stop_worker ();
// Starts the poller.
// Methods from the poller concept.
void start ();
protected:
// Checks whether the currently executing thread is the worker thread
// via an assertion.
// Should be called by the add_fd, removed_fd, set_*, reset_* functions
// to ensure correct usage.
void check_thread ();
// Stops the worker thread. Should be called from the destructor of the
// leaf class.
void stop_worker ();
private:
// Main worker thread routine.
static void worker_routine (void *arg_);
......
......@@ -118,7 +118,7 @@ void zmq::select_t::trigger_events (const fd_entries_t &fd_entries_,
// Size is cached to avoid iteration through recently added descriptors.
for (fd_entries_t::size_type i = 0, size = fd_entries_.size ();
i < size && event_count_ > 0; ++i) {
// fd_entries_[i] may not be stored, since calls to
// fd_entries_[i] may not be stored, since calls to
// in_event/out_event may reallocate the vector
if (is_retired_fd (fd_entries_[i]))
......
This diff is collapsed.
......@@ -66,10 +66,6 @@ class signaler_t
#endif
private:
// Creates a pair of file descriptors that will be used
// to pass the signals.
static int make_fdpair (fd_t *r_, fd_t *w_);
// Underlying write & read file descriptor
// Will be -1 if an error occurred during initialization, e.g. we
// exceeded the number of available handles
......
......@@ -108,7 +108,7 @@ class tcp_connecter_t : public own_t, public io_object_t
// Underlying socket.
fd_t s;
// Handle corresponding to the listening socket, if file descriptor is
// Handle corresponding to the listening socket, if file descriptor is
// registered with the poller, or NULL.
handle_t handle;
......
......@@ -87,6 +87,7 @@ struct iovec
#include "signaler.hpp"
#include "socket_poller.hpp"
#include "timers.hpp"
#include "ip.hpp"
#if defined ZMQ_HAVE_OPENPGM
#define __PGM_WININT_H__
......@@ -121,42 +122,11 @@ int zmq_errno (void)
void *zmq_ctx_new (void)
{
#if defined ZMQ_HAVE_OPENPGM
// Init PGM transport. Ensure threading and timer are enabled. Find PGM
// protocol ID. Note that if you want to use gettimeofday and sleep for
// openPGM timing, set environment variables PGM_TIMER to "GTOD" and
// PGM_SLEEP to "USLEEP".
pgm_error_t *pgm_error = NULL;
const bool ok = pgm_init (&pgm_error);
if (ok != TRUE) {
// Invalid parameters don't set pgm_error_t
zmq_assert (pgm_error != NULL);
if (pgm_error->domain == PGM_ERROR_DOMAIN_TIME
&& (pgm_error->code == PGM_ERROR_FAILED)) {
// Failed to access RTC or HPET device.
pgm_error_free (pgm_error);
errno = EINVAL;
return NULL;
}
// PGM_ERROR_DOMAIN_ENGINE: WSAStartup errors or missing WSARecvMsg.
zmq_assert (false);
}
#endif
#ifdef ZMQ_HAVE_WINDOWS
// Intialise Windows sockets. Note that WSAStartup can be called multiple
// times given that WSACleanup will be called for each WSAStartup.
// We do this before the ctx constructor since its embedded mailbox_t
// object needs Winsock to be up and running.
WORD version_requested = MAKEWORD (2, 2);
WSADATA wsa_data;
int rc = WSAStartup (version_requested, &wsa_data);
zmq_assert (rc == 0);
zmq_assert (LOBYTE (wsa_data.wVersion) == 2
&& HIBYTE (wsa_data.wVersion) == 2);
#endif
// object needs the network to be up and running (at least on Windows).
if (!zmq::initialize_network ()) {
return NULL;
}
// Create 0MQ context.
zmq::ctx_t *ctx = new (std::nothrow) zmq::ctx_t;
......@@ -181,17 +151,7 @@ int zmq_ctx_term (void *ctx_)
// Shut down only if termination was not interrupted by a signal.
if (!rc || en != EINTR) {
#ifdef ZMQ_HAVE_WINDOWS
// On Windows, uninitialise socket layer.
rc = WSACleanup ();
wsa_assert (rc != SOCKET_ERROR);
#endif
#if defined ZMQ_HAVE_OPENPGM
// Shut down the OpenPGM library.
if (pgm_shutdown () != TRUE)
zmq_assert (false);
#endif
zmq::shutdown_network ();
}
errno = en;
......@@ -722,7 +682,7 @@ const char *zmq_msg_gets (const zmq_msg_t *msg_, const char *property_)
}
}
// Polling.
// Polling.
#if defined ZMQ_HAVE_POLLER
inline int zmq_poller_poll (zmq_pollitem_t *items_, int nitems_, long timeout_)
......
......@@ -208,6 +208,11 @@ if(ZMQ_HAVE_CURVE)
set_tests_properties(test_security_curve PROPERTIES TIMEOUT 60)
endif()
if(WIN32 AND ${POLLER} MATCHES "poll")
set_tests_properties(test_many_sockets PROPERTIES TIMEOUT 30)
set_tests_properties(test_immediate PROPERTIES TIMEOUT 30)
endif()
#add additional required flags
#ZMQ_USE_TWEETNACL will already be defined when not using sodium
if(ZMQ_HAVE_CURVE AND NOT ZMQ_USE_TWEETNACL)
......
......@@ -29,6 +29,16 @@
#include "testutil.hpp"
#include <unity.h>
void setUp ()
{
}
void tearDown ()
{
}
const int MAX_SENDS = 10000;
void test_change_before_connected ()
......@@ -41,9 +51,9 @@ void test_change_before_connected ()
int val = 2;
rc = zmq_setsockopt (connect_socket, ZMQ_RCVHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
rc = zmq_setsockopt (bind_socket, ZMQ_SNDHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
zmq_connect (connect_socket, "inproc://a");
zmq_bind (bind_socket, "inproc://a");
......@@ -51,15 +61,15 @@ void test_change_before_connected ()
size_t placeholder = sizeof (val);
val = 0;
rc = zmq_getsockopt (bind_socket, ZMQ_SNDHWM, &val, &placeholder);
assert (rc == 0);
assert (val == 2);
TEST_ASSERT_EQUAL_INT (0, rc);
TEST_ASSERT_EQUAL_INT (2, val);
int send_count = 0;
while (send_count < MAX_SENDS
&& zmq_send (bind_socket, NULL, 0, ZMQ_DONTWAIT) == 0)
++send_count;
assert (send_count == 4);
TEST_ASSERT_EQUAL_INT (4, send_count);
zmq_close (bind_socket);
zmq_close (connect_socket);
......@@ -76,29 +86,29 @@ void test_change_after_connected ()
int val = 1;
rc = zmq_setsockopt (connect_socket, ZMQ_RCVHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
rc = zmq_setsockopt (bind_socket, ZMQ_SNDHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
zmq_connect (connect_socket, "inproc://a");
zmq_bind (bind_socket, "inproc://a");
val = 5;
rc = zmq_setsockopt (bind_socket, ZMQ_SNDHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
size_t placeholder = sizeof (val);
val = 0;
rc = zmq_getsockopt (bind_socket, ZMQ_SNDHWM, &val, &placeholder);
assert (rc == 0);
assert (val == 5);
TEST_ASSERT_EQUAL_INT (0, rc);
TEST_ASSERT_EQUAL_INT (5, val);
int send_count = 0;
while (send_count < MAX_SENDS
&& zmq_send (bind_socket, NULL, 0, ZMQ_DONTWAIT) == 0)
++send_count;
assert (send_count == 6);
TEST_ASSERT_EQUAL_INT (6, send_count);
zmq_close (bind_socket);
zmq_close (connect_socket);
......@@ -120,7 +130,8 @@ int test_fill_up_to_hwm (void *socket, int sndhwm)
{
int send_count = send_until_wouldblock (socket);
fprintf (stderr, "sndhwm==%i, send_count==%i\n", sndhwm, send_count);
assert (send_count <= sndhwm + 1 && send_count > (sndhwm / 10));
TEST_ASSERT_LESS_OR_EQUAL_INT (sndhwm + 1, send_count);
TEST_ASSERT_GREATER_THAN_INT (sndhwm / 10, send_count);
return send_count;
}
......@@ -134,11 +145,11 @@ void test_decrease_when_full ()
int val = 1;
rc = zmq_setsockopt (connect_socket, ZMQ_RCVHWM, &val, sizeof (val));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
int sndhwm = 100;
rc = zmq_setsockopt (bind_socket, ZMQ_SNDHWM, &sndhwm, sizeof (sndhwm));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
zmq_bind (bind_socket, "inproc://a");
zmq_connect (connect_socket, "inproc://a");
......@@ -149,14 +160,14 @@ void test_decrease_when_full ()
// Decrease snd hwm
sndhwm = 70;
rc = zmq_setsockopt (bind_socket, ZMQ_SNDHWM, &sndhwm, sizeof (sndhwm));
assert (rc == 0);
TEST_ASSERT_EQUAL_INT (0, rc);
int sndhwm_read = 0;
size_t sndhwm_read_size = sizeof (sndhwm_read);
rc =
zmq_getsockopt (bind_socket, ZMQ_SNDHWM, &sndhwm_read, &sndhwm_read_size);
assert (rc == 0);
assert (sndhwm_read == sndhwm);
TEST_ASSERT_EQUAL_INT (0, rc);
TEST_ASSERT_EQUAL_INT (sndhwm, sndhwm_read);
msleep (SETTLE_TIME);
......@@ -167,11 +178,11 @@ void test_decrease_when_full ()
read_count < MAX_SENDS
&& zmq_recv (connect_socket, &read_data, sizeof (read_data), ZMQ_DONTWAIT)
== sizeof (read_data)) {
assert (read_count == read_data);
TEST_ASSERT_EQUAL_INT (read_data, read_count);
++read_count;
}
assert (read_count == send_count);
TEST_ASSERT_EQUAL_INT (send_count, read_count);
// Give io thread some time to catch up
msleep (SETTLE_TIME);
......@@ -187,7 +198,12 @@ void test_decrease_when_full ()
int main ()
{
test_change_before_connected ();
test_change_after_connected ();
test_decrease_when_full ();
setup_test_environment ();
UNITY_BEGIN ();
RUN_TEST (test_change_before_connected);
RUN_TEST (test_change_after_connected);
RUN_TEST (test_decrease_when_full);
return UNITY_END ();
}
......@@ -20,9 +20,15 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "../tests/testutil.hpp"
#include <poller.hpp>
#include <i_poll_events.hpp>
#include <ip.hpp>
#include <unity.h>
#ifndef _WIN32
#define closesocket close
#endif
void setUp ()
{
}
......@@ -36,12 +42,177 @@ void test_create ()
zmq::poller_t poller (thread_ctx);
}
#if 0
// TODO this triggers an assertion. should it be a valid use case?
void test_start_empty ()
{
zmq::thread_ctx_t thread_ctx;
zmq::poller_t poller (thread_ctx);
poller.start ();
msleep (SETTLE_TIME);
}
#endif
struct test_events_t : zmq::i_poll_events
{
test_events_t (zmq::fd_t fd_, zmq::poller_t &poller_) :
fd (fd_),
poller (poller_)
{
}
virtual void in_event ()
{
in_events.add (1);
poller.rm_fd (handle);
handle = (zmq::poller_t::handle_t) NULL;
}
virtual void out_event ()
{
// TODO
}
virtual void timer_event (int id_)
{
LIBZMQ_UNUSED (id_);
timer_events.add (1);
poller.rm_fd (handle);
handle = (zmq::poller_t::handle_t) NULL;
}
void set_handle (zmq::poller_t::handle_t handle_) { handle = handle_; }
zmq::atomic_counter_t in_events, timer_events;
private:
zmq::fd_t fd;
zmq::poller_t &poller;
zmq::poller_t::handle_t handle;
};
void wait_in_events (test_events_t &events)
{
void *watch = zmq_stopwatch_start ();
while (events.in_events.get () < 1) {
#ifdef ZMQ_BUILD_DRAFT
TEST_ASSERT_LESS_OR_EQUAL_MESSAGE (SETTLE_TIME,
zmq_stopwatch_intermediate (watch),
"Timeout waiting for in event");
#endif
}
zmq_stopwatch_stop (watch);
}
void wait_timer_events (test_events_t &events)
{
void *watch = zmq_stopwatch_start ();
while (events.timer_events.get () < 1) {
#ifdef ZMQ_BUILD_DRAFT
TEST_ASSERT_LESS_OR_EQUAL_MESSAGE (SETTLE_TIME,
zmq_stopwatch_intermediate (watch),
"Timeout waiting for timer event");
#endif
}
zmq_stopwatch_stop (watch);
}
void create_nonblocking_fdpair (zmq::fd_t *r, zmq::fd_t *w)
{
int rc = zmq::make_fdpair (r, w);
TEST_ASSERT_EQUAL_INT (0, rc);
TEST_ASSERT_NOT_EQUAL (zmq::retired_fd, *r);
TEST_ASSERT_NOT_EQUAL (zmq::retired_fd, *w);
zmq::unblock_socket (*r);
zmq::unblock_socket (*w);
}
void send_signal (zmq::fd_t w)
{
#if defined ZMQ_HAVE_EVENTFD
const uint64_t inc = 1;
ssize_t sz = write (w, &inc, sizeof (inc));
assert (sz == sizeof (inc));
#else
{
char msg[] = "test";
int rc = send (w, msg, sizeof (msg), 0);
assert (rc == sizeof (msg));
}
#endif
}
void close_fdpair (zmq::fd_t w, zmq::fd_t r)
{
int rc = closesocket (w);
TEST_ASSERT_EQUAL_INT (0, rc);
#if !defined ZMQ_HAVE_EVENTFD
rc = closesocket (r);
TEST_ASSERT_EQUAL_INT (0, rc);
#else
LIBZMQ_UNUSED (r);
#endif
}
void test_add_fd_and_start_and_receive_data ()
{
zmq::thread_ctx_t thread_ctx;
zmq::poller_t poller (thread_ctx);
zmq::fd_t r, w;
create_nonblocking_fdpair (&r, &w);
test_events_t events (r, poller);
zmq::poller_t::handle_t handle = poller.add_fd (r, &events);
events.set_handle (handle);
poller.set_pollin (handle);
poller.start ();
send_signal (w);
wait_in_events (events);
// required cleanup
close_fdpair (w, r);
}
void test_add_fd_and_remove_by_timer ()
{
zmq::fd_t r, w;
create_nonblocking_fdpair (&r, &w);
zmq::thread_ctx_t thread_ctx;
zmq::poller_t poller (thread_ctx);
test_events_t events (r, poller);
zmq::poller_t::handle_t handle = poller.add_fd (r, &events);
events.set_handle (handle);
poller.add_timer (50, &events, 0);
poller.start ();
wait_timer_events (events);
// required cleanup
close_fdpair (w, r);
}
int main (void)
{
UNITY_BEGIN ();
zmq::initialize_network ();
setup_test_environment ();
UNITY_BEGIN ();
RUN_TEST (test_create);
RUN_TEST (test_add_fd_and_start_and_receive_data);
RUN_TEST (test_add_fd_and_remove_by_timer);
zmq::shutdown_network ();
return UNITY_END ();
}
......@@ -35,12 +35,53 @@ void test_create ()
zmq::ypipe_t<int, 1> ypipe;
}
void test_check_read_empty ()
{
zmq::ypipe_t<int, 1> ypipe;
TEST_ASSERT_FALSE (ypipe.check_read ());
}
void test_read_empty ()
{
zmq::ypipe_t<int, 1> ypipe;
int read_value = -1;
TEST_ASSERT_FALSE (ypipe.read (&read_value));
TEST_ASSERT_EQUAL (-1, read_value);
}
void test_write_complete_and_check_read_and_read ()
{
const int value = 42;
zmq::ypipe_t<int, 1> ypipe;
ypipe.write (value, false);
TEST_ASSERT_FALSE (ypipe.check_read ());
int read_value = -1;
TEST_ASSERT_FALSE (ypipe.read (&read_value));
TEST_ASSERT_EQUAL_INT (-1, read_value);
}
void test_write_complete_and_flush_and_check_read_and_read ()
{
const int value = 42;
zmq::ypipe_t<int, 1> ypipe;
ypipe.write (value, false);
ypipe.flush ();
TEST_ASSERT_TRUE (ypipe.check_read ());
int read_value = -1;
TEST_ASSERT_TRUE (ypipe.read (&read_value));
TEST_ASSERT_EQUAL_INT (value, read_value);
}
int main (void)
{
setup_test_environment ();
UNITY_BEGIN ();
RUN_TEST (test_create);
RUN_TEST (test_check_read_empty);
RUN_TEST (test_read_empty);
RUN_TEST (test_write_complete_and_check_read_and_read);
RUN_TEST (test_write_complete_and_flush_and_check_read_and_read);
return UNITY_END ();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment