yqueue.hpp 7.14 KB
Newer Older
Martin Sustrik's avatar
Martin Sustrik committed
1
/*
2
    Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file
Martin Sustrik's avatar
Martin Sustrik committed
3

4
    This file is part of libzmq, the ZeroMQ core engine in C++.
Martin Sustrik's avatar
Martin Sustrik committed
5

6 7 8
    libzmq is free software; you can redistribute it and/or modify it under
    the terms of the GNU Lesser General Public License (LGPL) as published
    by the Free Software Foundation; either version 3 of the License, or
Martin Sustrik's avatar
Martin Sustrik committed
9 10
    (at your option) any later version.

11 12 13 14 15 16 17 18 19 20 21 22 23 24
    As a special exception, the Contributors give you permission to link
    this library with independent modules to produce an executable,
    regardless of the license terms of these independent modules, and to
    copy and distribute the resulting executable under terms of your choice,
    provided that you also meet, for each linked independent module, the
    terms and conditions of the license of that module. An independent
    module is a module which is not derived from or based on this library.
    If you modify this library, you must extend this exception to your
    version of the library.

    libzmq is distributed in the hope that it will be useful, but WITHOUT
    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
    License for more details.
Martin Sustrik's avatar
Martin Sustrik committed
25

26
    You should have received a copy of the GNU Lesser General Public License
Martin Sustrik's avatar
Martin Sustrik committed
27 28 29
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

Martin Sustrik's avatar
Martin Sustrik committed
30 31
#ifndef __ZMQ_YQUEUE_HPP_INCLUDED__
#define __ZMQ_YQUEUE_HPP_INCLUDED__
Martin Sustrik's avatar
Martin Sustrik committed
32

33
#include <stdlib.h>
Martin Sustrik's avatar
Martin Sustrik committed
34 35 36
#include <stddef.h>

#include "err.hpp"
37
#include "atomic_ptr.hpp"
Martin Sustrik's avatar
Martin Sustrik committed
38

Martin Sustrik's avatar
Martin Sustrik committed
39
namespace zmq
Martin Sustrik's avatar
Martin Sustrik committed
40
{
41 42 43 44 45 46 47 48 49 50 51 52
//  yqueue is an efficient queue implementation. The main goal is
//  to minimise number of allocations/deallocations needed. Thus yqueue
//  allocates/deallocates elements in batches of N.
//
//  yqueue allows one thread to use push/back function and another one
//  to use pop/front functions. However, user must ensure that there's no
//  pop on the empty queue and that both threads don't access the same
//  element in unsynchronised manner.
//
//  T is the type of the object in the queue.
//  N is granularity of the queue (how many pushes have to be done till
//  actual memory allocation is required).
53
#ifdef HAVE_POSIX_MEMALIGN
54 55 56 57
// ALIGN is the memory alignment size to use in the case where we have
// posix_memalign available. Default value is 64, this alignment will
// prevent two queue chunks from occupying the same CPU cache line on
// architectures where cache lines are <= 64 bytes (e.g. most things
58 59 60
// except POWER). It is detected at build time to try to account for other
// platforms like POWER and s390x.
template <typename T, int N, size_t ALIGN = ZMQ_CACHELINE_SIZE> class yqueue_t
61
#else
62
template <typename T, int N> class yqueue_t
63
#endif
64 65 66 67 68
{
  public:
    //  Create the queue.
    inline yqueue_t ()
    {
69 70 71 72 73 74 75
        _begin_chunk = allocate_chunk ();
        alloc_assert (_begin_chunk);
        _begin_pos = 0;
        _back_chunk = NULL;
        _back_pos = 0;
        _end_chunk = _begin_chunk;
        _end_pos = 0;
76 77 78 79
    }

    //  Destroy the queue.
    inline ~yqueue_t ()
Martin Sustrik's avatar
Martin Sustrik committed
80
    {
81
        while (true) {
82 83
            if (_begin_chunk == _end_chunk) {
                free (_begin_chunk);
84 85
                break;
            }
86 87
            chunk_t *o = _begin_chunk;
            _begin_chunk = _begin_chunk->next;
88
            free (o);
Martin Sustrik's avatar
Martin Sustrik committed
89 90
        }

91
        chunk_t *sc = _spare_chunk.xchg (NULL);
92 93
        free (sc);
    }
94

95 96
    //  Returns reference to the front element of the queue.
    //  If the queue is empty, behaviour is undefined.
97
    inline T &front () { return _begin_chunk->values[_begin_pos]; }
Martin Sustrik's avatar
Martin Sustrik committed
98

99 100
    //  Returns reference to the back element of the queue.
    //  If the queue is empty, behaviour is undefined.
101
    inline T &back () { return _back_chunk->values[_back_pos]; }
Martin Sustrik's avatar
Martin Sustrik committed
102

103 104 105
    //  Adds an element to the back end of the queue.
    inline void push ()
    {
106 107
        _back_chunk = _end_chunk;
        _back_pos = _end_pos;
108

109
        if (++_end_pos != N)
110 111
            return;

112
        chunk_t *sc = _spare_chunk.xchg (NULL);
113
        if (sc) {
114 115
            _end_chunk->next = sc;
            sc->prev = _end_chunk;
116
        } else {
117 118 119
            _end_chunk->next = allocate_chunk ();
            alloc_assert (_end_chunk->next);
            _end_chunk->next->prev = _end_chunk;
Martin Sustrik's avatar
Martin Sustrik committed
120
        }
121 122
        _end_chunk = _end_chunk->next;
        _end_pos = 0;
123 124 125 126 127 128 129 130 131 132 133 134
    }

    //  Removes element from the back end of the queue. In other words
    //  it rollbacks last push to the queue. Take care: Caller is
    //  responsible for destroying the object being unpushed.
    //  The caller must also guarantee that the queue isn't empty when
    //  unpush is called. It cannot be done automatically as the read
    //  side of the queue can be managed by different, completely
    //  unsynchronised thread.
    inline void unpush ()
    {
        //  First, move 'back' one position backwards.
135 136
        if (_back_pos)
            --_back_pos;
137
        else {
138 139
            _back_pos = N - 1;
            _back_chunk = _back_chunk->prev;
Martin Sustrik's avatar
Martin Sustrik committed
140 141
        }

142 143 144 145
        //  Now, move 'end' position backwards. Note that obsolete end chunk
        //  is not used as a spare chunk. The analysis shows that doing so
        //  would require free and atomic operation per chunk deallocated
        //  instead of a simple free.
146 147
        if (_end_pos)
            --_end_pos;
148
        else {
149 150 151 152
            _end_pos = N - 1;
            _end_chunk = _end_chunk->prev;
            free (_end_chunk->next);
            _end_chunk->next = NULL;
153
        }
154
    }
155

156 157 158
    //  Removes an element from the front end of the queue.
    inline void pop ()
    {
159 160 161 162 163
        if (++_begin_pos == N) {
            chunk_t *o = _begin_chunk;
            _begin_chunk = _begin_chunk->next;
            _begin_chunk->prev = NULL;
            _begin_pos = 0;
164

165
            //  'o' has been more recently used than _spare_chunk,
166 167
            //  so for cache reasons we'll get rid of the spare and
            //  use 'o' as the spare.
168
            chunk_t *cs = _spare_chunk.xchg (o);
169
            free (cs);
Martin Sustrik's avatar
Martin Sustrik committed
170
        }
171
    }
Martin Sustrik's avatar
Martin Sustrik committed
172

173 174 175 176 177 178 179 180
  private:
    //  Individual memory chunk to hold N elements.
    struct chunk_t
    {
        T values[N];
        chunk_t *prev;
        chunk_t *next;
    };
Martin Sustrik's avatar
Martin Sustrik committed
181

182 183
    inline chunk_t *allocate_chunk ()
    {
184
#ifdef HAVE_POSIX_MEMALIGN
185 186 187 188
        void *pv;
        if (posix_memalign (&pv, ALIGN, sizeof (chunk_t)) == 0)
            return (chunk_t *) pv;
        return NULL;
189
#else
190
        return (chunk_t *) malloc (sizeof (chunk_t));
191
#endif
192 193 194 195 196 197
    }

    //  Back position may point to invalid memory if the queue is empty,
    //  while begin & end positions are always valid. Begin position is
    //  accessed exclusively be queue reader (front/pop), while back and
    //  end positions are accessed exclusively by queue writer (back/push).
198 199 200 201 202 203
    chunk_t *_begin_chunk;
    int _begin_pos;
    chunk_t *_back_chunk;
    int _back_pos;
    chunk_t *_end_chunk;
    int _end_pos;
204 205 206 207

    //  People are likely to produce and consume at similar rates.  In
    //  this scenario holding onto the most recently freed chunk saves
    //  us from having to call malloc/free.
208
    atomic_ptr_t<chunk_t> _spare_chunk;
209 210 211 212 213

    //  Disable copying of yqueue.
    yqueue_t (const yqueue_t &);
    const yqueue_t &operator= (const yqueue_t &);
};
Martin Sustrik's avatar
Martin Sustrik committed
214 215 216
}

#endif