ipc_connecter.cpp 6.88 KB
Newer Older
1
/*
2 3
    Copyright (c) 2011 250bpm s.r.o.
    Copyright (c) 2011 Other contributors as noted in the AUTHORS file
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

    This file is part of 0MQ.

    0MQ is free software; you can redistribute it and/or modify it under
    the terms of the GNU Lesser General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    0MQ is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Lesser General Public License for more details.

    You should have received a copy of the GNU Lesser General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

21 22 23 24
#include "ipc_connecter.hpp"

#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS

25 26 27
#include <new>
#include <string>

28
#include "stream_engine.hpp"
29 30 31 32
#include "io_thread.hpp"
#include "platform.hpp"
#include "random.hpp"
#include "err.hpp"
33
#include "ip.hpp"
34 35
#include "address.hpp"
#include "ipc_address.hpp"
36
#include "session_base.hpp"
37 38 39 40

#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
41
#include <sys/un.h>
42 43

zmq::ipc_connecter_t::ipc_connecter_t (class io_thread_t *io_thread_,
44
      class session_base_t *session_, const options_t &options_,
45
      const address_t *addr_, bool delayed_start_) :
46 47
    own_t (io_thread_, options_),
    io_object_t (io_thread_),
48
    addr (addr_),
49 50
    s (retired_fd),
    handle_valid (false),
51 52
    delayed_start (delayed_start_),
    timer_started (false),
53 54 55
    session (session_),
    current_reconnect_ivl(options.reconnect_ivl)
{
56 57
    zmq_assert (addr);
    zmq_assert (addr->protocol == "ipc");
58
    addr->to_string (endpoint);
59 60 61 62
}

zmq::ipc_connecter_t::~ipc_connecter_t ()
{
63
    zmq_assert (!timer_started);
64 65
    zmq_assert (!handle_valid);
    zmq_assert (s == retired_fd);
66 67 68 69
}

void zmq::ipc_connecter_t::process_plug ()
{
70 71
    if (delayed_start)
        add_reconnect_timer ();
72 73 74 75
    else
        start_connecting ();
}

76 77
void zmq::ipc_connecter_t::process_term (int linger_)
{
78
    if (timer_started) {
79
        cancel_timer (reconnect_timer_id);
80
        timer_started = false;
81 82 83 84 85 86 87 88 89 90 91 92 93
    }

    if (handle_valid) {
        rm_fd (handle);
        handle_valid = false;
    }

    if (s != retired_fd)
        close ();

    own_t::process_term (linger_);
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
void zmq::ipc_connecter_t::in_event ()
{
    //  We are not polling for incomming data, so we are actually called
    //  because of error here. However, we can get error on out event as well
    //  on some platforms, so we'll simply handle both events in the same way.
    out_event ();
}

void zmq::ipc_connecter_t::out_event ()
{
    fd_t fd = connect ();
    rm_fd (handle);
    handle_valid = false;

    //  Handle the error condition by attempt to reconnect.
    if (fd == retired_fd) {
        close ();
        add_reconnect_timer();
        return;
    }
    //  Create the engine object for this connection.
115
    stream_engine_t *engine = new (std::nothrow) stream_engine_t (fd, options, endpoint);
116 117 118 119 120 121 122
    alloc_assert (engine);

    //  Attach the engine to the corresponding session object.
    send_attach (session, engine);

    //  Shut the connecter down.
    terminate ();
123 124

    session->monitor_event (ZMQ_EVENT_CONNECTED, endpoint.c_str(), fd);
125 126 127 128 129
}

void zmq::ipc_connecter_t::timer_event (int id_)
{
    zmq_assert (id_ == reconnect_timer_id);
130
    timer_started = false;
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    start_connecting ();
}

void zmq::ipc_connecter_t::start_connecting ()
{
    //  Open the connecting socket.
    int rc = open ();

    //  Connect may succeed in synchronous manner.
    if (rc == 0) {
        handle = add_fd (s);
        handle_valid = true;
        out_event ();
    }

146
    //  Connection establishment may be delayed. Poll for its completion.
147
    else if (rc == -1 && errno == EINPROGRESS) {
148 149 150
        handle = add_fd (s);
        handle_valid = true;
        set_pollout (handle);
151
        session->monitor_event (ZMQ_EVENT_CONNECT_DELAYED, endpoint.c_str(), zmq_errno());
152 153 154
    }

    //  Handle any other error condition by eventual reconnect.
155
    else {
156 157
        if (s != retired_fd)
            close ();
158 159
        add_reconnect_timer ();
    }
160 161 162 163
}

void zmq::ipc_connecter_t::add_reconnect_timer()
{
164 165 166
    int rc_ivl = get_new_reconnect_ivl();
    add_timer (rc_ivl, reconnect_timer_id);
    session->monitor_event (ZMQ_EVENT_CONNECT_RETRIED, endpoint.c_str(), rc_ivl);
167
    timer_started = true;
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
}

int zmq::ipc_connecter_t::get_new_reconnect_ivl ()
{
    //  The new interval is the current interval + random value.
    int this_interval = current_reconnect_ivl +
        (generate_random () % options.reconnect_ivl);

    //  Only change the current reconnect interval  if the maximum reconnect
    //  interval was set and if it's larger than the reconnect interval.
    if (options.reconnect_ivl_max > 0 && 
        options.reconnect_ivl_max > options.reconnect_ivl) {

        //  Calculate the next interval
        current_reconnect_ivl = current_reconnect_ivl * 2;
        if(current_reconnect_ivl >= options.reconnect_ivl_max) {
            current_reconnect_ivl = options.reconnect_ivl_max;
        }   
    }
    return this_interval;
}

int zmq::ipc_connecter_t::open ()
{
    zmq_assert (s == retired_fd);

    //  Create the socket.
195
    s = open_socket (AF_UNIX, SOCK_STREAM, 0);
196
    if (s == -1)
197 198
        return -1;

199 200
    //  Set the non-blocking flag.
    unblock_socket (s);
201 202

    //  Connect to the remote peer.
203 204 205
    int rc = ::connect (
        s, addr->resolved.ipc_addr->addr (),
        addr->resolved.ipc_addr->addrlen ());
206 207 208 209

    //  Connect was successfull immediately.
    if (rc == 0)
        return 0;
210 211 212 213 214 215 216
        
    //  Translate other error codes indicating asynchronous connect has been
    //  launched to a uniform EINPROGRESS.
    if (rc == -1 && errno == EINTR) {
        errno = EINPROGRESS;
        return -1;
    }
217

218
    //  Forward the error.
219 220 221 222 223 224 225
    return -1;
}

int zmq::ipc_connecter_t::close ()
{
    zmq_assert (s != retired_fd);
    int rc = ::close (s);
226
    errno_assert (rc == 0);
227
    session->monitor_event (ZMQ_EVENT_CLOSED, endpoint.c_str(), s);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
    s = retired_fd;
    return 0;
}

zmq::fd_t zmq::ipc_connecter_t::connect ()
{
    //  Following code should handle both Berkeley-derived socket
    //  implementations and Solaris.
    int err = 0;
#if defined ZMQ_HAVE_HPUX
    int len = sizeof (err);
#else
    socklen_t len = sizeof (err);
#endif
    int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, (char*) &err, &len);
    if (rc == -1)
        err = errno;
    if (err != 0) {

        //  Assert if the error was caused by 0MQ bug.
        //  Networking problems are OK. No need to assert.
        errno = err;
        errno_assert (errno == ECONNREFUSED || errno == ECONNRESET ||
            errno == ETIMEDOUT || errno == EHOSTUNREACH ||
            errno == ENETUNREACH || errno == ENETDOWN);

        return retired_fd;
    }

    fd_t result = s;
    s = retired_fd;
    return result;
}

#endif
263