Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
L
libzmq
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
libzmq
Commits
e04e2cdb
Commit
e04e2cdb
authored
Mar 09, 2010
by
Martin Sustrik
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
rollback functionality added to pipe
parent
9481c69b
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
63 additions
and
1 deletion
+63
-1
pipe.cpp
src/pipe.cpp
+13
-0
pipe.hpp
src/pipe.hpp
+3
-0
ypipe.hpp
src/ypipe.hpp
+11
-0
yqueue.hpp
src/yqueue.hpp
+36
-1
No files found.
src/pipe.cpp
View file @
e04e2cdb
...
...
@@ -146,6 +146,19 @@ bool zmq::writer_t::write (zmq_msg_t *msg_)
// TODO: Adjust size of the pipe.
}
void
zmq
::
writer_t
::
rollback
()
{
while
(
true
)
{
zmq_msg_t
msg
;
if
(
!
pipe
->
unwrite
(
&
msg
))
break
;
zmq_msg_close
(
&
msg
);
}
// TODO: We don't have to inform the reader side of the pipe about
// the event. We'll simply adjust the pipe size and keep calm.
}
void
zmq
::
writer_t
::
flush
()
{
if
(
!
pipe
->
flush
())
...
...
src/pipe.hpp
View file @
e04e2cdb
...
...
@@ -100,6 +100,9 @@ namespace zmq
// message cannot be written because high watermark was reached.
bool
write
(
zmq_msg_t
*
msg_
);
// Remove any unflushed messages from the pipe.
void
rollback
();
// Flush the messages downsteam.
void
flush
();
...
...
src/ypipe.hpp
View file @
e04e2cdb
...
...
@@ -78,6 +78,17 @@ namespace zmq
#pragma message restore
#endif
// Pop an unflushed message from the pipe. Returns true is such
// message exists, false otherwise.
inline
bool
unwrite
(
T
*
value_
)
{
if
(
w
==
&
queue
.
back
())
return
false
;
*
value_
=
queue
.
back
();
queue
.
unpush
();
return
true
;
}
// Flush the messages into the pipe. Returns false if the reader
// thread is sleeping. In that case, caller is obliged to wake the
// reader up before using the pipe again.
...
...
src/yqueue.hpp
View file @
e04e2cdb
...
...
@@ -102,20 +102,54 @@ namespace zmq
chunk_t
*
sc
=
spare_chunk
.
xchg
(
NULL
);
if
(
sc
)
{
end_chunk
->
next
=
sc
;
sc
->
prev
=
end_chunk
;
}
else
{
end_chunk
->
next
=
(
chunk_t
*
)
malloc
(
sizeof
(
chunk_t
));
zmq_assert
(
end_chunk
->
next
);
end_chunk
->
next
->
prev
=
end_chunk
;
}
end_chunk
=
end_chunk
->
next
;
end_pos
=
0
;
}
// Removes element from the back end of the queue. In other words
// it rollbacks last push to the queue. Take care: Caller is
// responsible for destroying the object being unpushed.
// The caller must also guarantee that the queue isn't empty when
// unpush is called. It cannot be done automatically as the read
// side of the queue can be managed by different, completely
// unsynchronised thread.
inline
void
unpush
()
{
// First, move 'back' one position backwards.
if
(
back_pos
)
--
back_pos
;
else
{
back_pos
=
N
-
1
;
back_chunk
=
back_chunk
->
prev
;
}
// Now, move 'end' position backwards. Note that obsolete end chunk
// is not used as a spare chunk. The analysis shows that doing so
// would require free and atomic operation per chunk deallocated
// instead of a simple free.
if
(
end_pos
)
--
end_pos
;
else
{
end_pos
=
N
-
1
;
end_chunk
=
end_chunk
->
prev
;
free
(
end_chunk
->
next
);
end_chunk
->
next
=
NULL
;
}
}
// Removes an element from the front end of the queue.
inline
void
pop
()
{
if
(
++
begin_pos
==
N
)
{
chunk_t
*
o
=
begin_chunk
;
begin_chunk
=
begin_chunk
->
next
;
begin_chunk
->
prev
=
NULL
;
begin_pos
=
0
;
// 'o' has been more recently used than spare_chunk,
...
...
@@ -133,6 +167,7 @@ namespace zmq
struct
chunk_t
{
T
values
[
N
];
chunk_t
*
prev
;
chunk_t
*
next
;
};
...
...
@@ -149,7 +184,7 @@ namespace zmq
// People are likely to produce and consume at similar rates. In
// this scenario holding onto the most recently freed chunk saves
// us from having to call
new/delet
e.
// us from having to call
malloc/fre
e.
atomic_ptr_t
<
chunk_t
>
spare_chunk
;
// Disable copying of yqueue.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment