ipc_connecter.cpp 6.62 KB
Newer Older
1
/*
2 3
    Copyright (c) 2011 250bpm s.r.o.
    Copyright (c) 2011 Other contributors as noted in the AUTHORS file
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

    This file is part of 0MQ.

    0MQ is free software; you can redistribute it and/or modify it under
    the terms of the GNU Lesser General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    0MQ is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Lesser General Public License for more details.

    You should have received a copy of the GNU Lesser General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

21 22 23 24
#include "ipc_connecter.hpp"

#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS

25 26 27
#include <new>
#include <string>

28
#include "stream_engine.hpp"
29 30 31 32
#include "io_thread.hpp"
#include "platform.hpp"
#include "random.hpp"
#include "err.hpp"
33
#include "ip.hpp"
34 35
#include "address.hpp"
#include "ipc_address.hpp"
36
#include "session_base.hpp"
37 38 39 40

#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
41
#include <sys/un.h>
42 43

zmq::ipc_connecter_t::ipc_connecter_t (class io_thread_t *io_thread_,
44
      class session_base_t *session_, const options_t &options_,
45
      const address_t *addr_, bool wait_) :
46 47
    own_t (io_thread_, options_),
    io_object_t (io_thread_),
48
    addr (addr_),
49 50 51 52 53 54
    s (retired_fd),
    handle_valid (false),
    wait (wait_),
    session (session_),
    current_reconnect_ivl(options.reconnect_ivl)
{
55 56
    zmq_assert (addr);
    zmq_assert (addr->protocol == "ipc");
57
    addr->to_string (endpoint);
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
}

zmq::ipc_connecter_t::~ipc_connecter_t ()
{
    if (wait)
        cancel_timer (reconnect_timer_id);
    if (handle_valid)
        rm_fd (handle);

    if (s != retired_fd)
        close ();
}

void zmq::ipc_connecter_t::process_plug ()
{
    if (wait)
        add_reconnect_timer();
    else
        start_connecting ();
}

void zmq::ipc_connecter_t::in_event ()
{
    //  We are not polling for incomming data, so we are actually called
    //  because of error here. However, we can get error on out event as well
    //  on some platforms, so we'll simply handle both events in the same way.
    out_event ();
}

void zmq::ipc_connecter_t::out_event ()
{
    fd_t fd = connect ();
    rm_fd (handle);
    handle_valid = false;

    //  Handle the error condition by attempt to reconnect.
    if (fd == retired_fd) {
        close ();
        wait = true;
        add_reconnect_timer();
        return;
    }

    //  Create the engine object for this connection.
102
    stream_engine_t *engine = new (std::nothrow) stream_engine_t (fd, options);
103 104 105 106 107 108 109
    alloc_assert (engine);

    //  Attach the engine to the corresponding session object.
    send_attach (session, engine);

    //  Shut the connecter down.
    terminate ();
110 111

    session->monitor_event (ZMQ_EVENT_CONNECTED, endpoint.c_str(), fd);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
}

void zmq::ipc_connecter_t::timer_event (int id_)
{
    zmq_assert (id_ == reconnect_timer_id);
    wait = false;
    start_connecting ();
}

void zmq::ipc_connecter_t::start_connecting ()
{
    //  Open the connecting socket.
    int rc = open ();

    //  Connect may succeed in synchronous manner.
    if (rc == 0) {
        handle = add_fd (s);
        handle_valid = true;
        out_event ();
        return;
    }

134
    //  Connection establishment may be delayed. Poll for its completion.
135
    else if (rc == -1 && errno == EINPROGRESS) {
136 137 138
        handle = add_fd (s);
        handle_valid = true;
        set_pollout (handle);
139
        session->monitor_event (ZMQ_EVENT_CONNECT_DELAYED, endpoint.c_str(), zmq_errno());
140 141 142 143
        return;
    }

    //  Handle any other error condition by eventual reconnect.
144
    close ();
145 146 147 148 149 150
    wait = true;
    add_reconnect_timer();
}

void zmq::ipc_connecter_t::add_reconnect_timer()
{
151 152 153
    int rc_ivl = get_new_reconnect_ivl();
    add_timer (rc_ivl, reconnect_timer_id);
    session->monitor_event (ZMQ_EVENT_CONNECT_RETRIED, endpoint.c_str(), rc_ivl);
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
}

int zmq::ipc_connecter_t::get_new_reconnect_ivl ()
{
    //  The new interval is the current interval + random value.
    int this_interval = current_reconnect_ivl +
        (generate_random () % options.reconnect_ivl);

    //  Only change the current reconnect interval  if the maximum reconnect
    //  interval was set and if it's larger than the reconnect interval.
    if (options.reconnect_ivl_max > 0 && 
        options.reconnect_ivl_max > options.reconnect_ivl) {

        //  Calculate the next interval
        current_reconnect_ivl = current_reconnect_ivl * 2;
        if(current_reconnect_ivl >= options.reconnect_ivl_max) {
            current_reconnect_ivl = options.reconnect_ivl_max;
        }   
    }
    return this_interval;
}

int zmq::ipc_connecter_t::open ()
{
    zmq_assert (s == retired_fd);

    //  Create the socket.
181
    s = open_socket (AF_UNIX, SOCK_STREAM, 0);
182
    if (s == -1)
183 184
        return -1;

185 186
    //  Set the non-blocking flag.
    unblock_socket (s);
187 188

    //  Connect to the remote peer.
189 190 191
    int rc = ::connect (
        s, addr->resolved.ipc_addr->addr (),
        addr->resolved.ipc_addr->addrlen ());
192 193 194 195

    //  Connect was successfull immediately.
    if (rc == 0)
        return 0;
196 197 198 199 200 201 202
        
    //  Translate other error codes indicating asynchronous connect has been
    //  launched to a uniform EINPROGRESS.
    if (rc == -1 && errno == EINTR) {
        errno = EINPROGRESS;
        return -1;
    }
203

204
    //  Forward the error.
205 206 207 208 209 210 211
    return -1;
}

int zmq::ipc_connecter_t::close ()
{
    zmq_assert (s != retired_fd);
    int rc = ::close (s);
212 213
    if (rc != 0) {
        session->monitor_event (ZMQ_EVENT_CLOSE_FAILED, endpoint.c_str(), zmq_errno());
214
        return -1;
215 216
    }
    session->monitor_event (ZMQ_EVENT_CLOSED, endpoint.c_str(), s);
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
    s = retired_fd;
    return 0;
}

zmq::fd_t zmq::ipc_connecter_t::connect ()
{
    //  Following code should handle both Berkeley-derived socket
    //  implementations and Solaris.
    int err = 0;
#if defined ZMQ_HAVE_HPUX
    int len = sizeof (err);
#else
    socklen_t len = sizeof (err);
#endif
    int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, (char*) &err, &len);
    if (rc == -1)
        err = errno;
    if (err != 0) {

        //  Assert if the error was caused by 0MQ bug.
        //  Networking problems are OK. No need to assert.
        errno = err;
        errno_assert (errno == ECONNREFUSED || errno == ECONNRESET ||
            errno == ETIMEDOUT || errno == EHOSTUNREACH ||
            errno == ENETUNREACH || errno == ENETDOWN);

        return retired_fd;
    }

    fd_t result = s;
    s = retired_fd;
    return result;
}

#endif
252