Commit bf69c94b authored by Kenton Varda's avatar Kenton Varda

Lockless EventLoop, but at what cost? This code got weird.

parent b2f663a0
...@@ -199,6 +199,7 @@ libkj_async_la_LIBADD = libkj.la $(PTHREAD_LIBS) -lpthread ...@@ -199,6 +199,7 @@ libkj_async_la_LIBADD = libkj.la $(PTHREAD_LIBS) -lpthread
libkj_async_la_LDFLAGS = -release $(VERSION) -no-undefined libkj_async_la_LDFLAGS = -release $(VERSION) -no-undefined
libkj_async_la_SOURCES= \ libkj_async_la_SOURCES= \
src/kj/work-queue.h \ src/kj/work-queue.h \
src/kj/work-queue.c++ \
src/kj/async.c++ \ src/kj/async.c++ \
src/kj/async-unix.c++ \ src/kj/async-unix.c++ \
src/kj/async-io.c++ src/kj/async-io.c++
......
...@@ -48,7 +48,6 @@ AS_IF([test "$external_capnp" != "no"], [ ...@@ -48,7 +48,6 @@ AS_IF([test "$external_capnp" != "no"], [
]) ])
AM_CONDITIONAL([USE_EXTERNAL_CAPNP], [test "$external_capnp" != "no"]) AM_CONDITIONAL([USE_EXTERNAL_CAPNP], [test "$external_capnp" != "no"])
# Only used in a test. Cleanup?
AC_SEARCH_LIBS(sched_yield, rt) AC_SEARCH_LIBS(sched_yield, rt)
LIBS="$PTHREAD_LIBS $LIBS" LIBS="$PTHREAD_LIBS $LIBS"
......
...@@ -108,10 +108,16 @@ public: ...@@ -108,10 +108,16 @@ public:
inline SignalPromiseAdapter(PromiseFulfiller<siginfo_t>& fulfiller, inline SignalPromiseAdapter(PromiseFulfiller<siginfo_t>& fulfiller,
const _::WorkQueue<SignalJob>& signalQueue, const _::WorkQueue<SignalJob>& signalQueue,
int signum) int signum)
: job(signalQueue.add(fulfiller, signum)) {} : job(signalQueue.createJob(fulfiller, signum)) {
job->addToQueue();
}
~SignalPromiseAdapter() noexcept(false) {
job->cancel();
}
private: private:
Own<const SignalJob> job; Own<_::WorkQueue<SignalJob>::JobWrapper> job;
}; };
class UnixEventLoop::PollJob { class UnixEventLoop::PollJob {
...@@ -142,10 +148,16 @@ public: ...@@ -142,10 +148,16 @@ public:
inline PollPromiseAdapter(PromiseFulfiller<short>& fulfiller, inline PollPromiseAdapter(PromiseFulfiller<short>& fulfiller,
const _::WorkQueue<PollJob>& pollQueue, const _::WorkQueue<PollJob>& pollQueue,
int fd, short eventMask) int fd, short eventMask)
: job(pollQueue.add(fulfiller, fd, eventMask)) {} : job(pollQueue.createJob(fulfiller, fd, eventMask)) {
job->addToQueue();
}
~PollPromiseAdapter() noexcept(false) {
job->cancel();
}
private: private:
Own<const PollJob> job; Own<_::WorkQueue<PollJob>::JobWrapper> job;
}; };
UnixEventLoop::UnixEventLoop(): impl(heap<Impl>(*this)) { UnixEventLoop::UnixEventLoop(): impl(heap<Impl>(*this)) {
......
...@@ -79,14 +79,7 @@ bool EventLoop::isCurrent() const { ...@@ -79,14 +79,7 @@ bool EventLoop::isCurrent() const {
return threadLocalEventLoop == this; return threadLocalEventLoop == this;
} }
void EventLoop::EventListHead::fire() { EventLoop::EventLoop() {}
KJ_FAIL_ASSERT("Fired event list head.");
}
EventLoop::EventLoop(): queue(*this), insertPoint(&queue) {
queue.next = &queue;
queue.prev = &queue;
}
void EventLoop::waitImpl(Own<_::PromiseNode> node, _::ExceptionOrValue& result) { void EventLoop::waitImpl(Own<_::PromiseNode> node, _::ExceptionOrValue& result) {
EventLoop* oldEventLoop = threadLocalEventLoop; EventLoop* oldEventLoop = threadLocalEventLoop;
...@@ -97,36 +90,22 @@ void EventLoop::waitImpl(Own<_::PromiseNode> node, _::ExceptionOrValue& result) ...@@ -97,36 +90,22 @@ void EventLoop::waitImpl(Own<_::PromiseNode> node, _::ExceptionOrValue& result)
event.fired = node->onReady(event); event.fired = node->onReady(event);
while (!event.fired) { while (!event.fired) {
queue.mutex.lock(_::Mutex::EXCLUSIVE); KJ_IF_MAYBE(event, queue.peek(nullptr)) {
// Arrange for events armed during the event callback to be inserted at the beginning
// of the queue.
insertionPoint = nullptr;
// Get the first event in the queue. // Fire the first event.
Event* event = queue.next; event->complete(0);
if (event == &queue) { } else {
// No events in the queue. // No events in the queue. Wait for callback.
prepareToSleep(); prepareToSleep();
queue.mutex.unlock(_::Mutex::EXCLUSIVE); if (queue.peek(*this) != nullptr) {
// Whoa, new job was just added.
wake();
}
sleep(); sleep();
continue;
} }
// Remove it from the queue.
queue.next = event->next;
event->next->prev = &queue;
event->next = nullptr;
event->prev = nullptr;
// New events should be inserted at the beginning of the queue, but in order.
insertPoint = queue.next;
// Lock it before we unlock the queue mutex.
event->mutex.lock(_::Mutex::EXCLUSIVE);
// Now we can unlock the queue.
queue.mutex.unlock(_::Mutex::EXCLUSIVE);
// Fire the event, making sure we unlock the mutex afterwards.
KJ_DEFER(event->mutex.unlock(_::Mutex::EXCLUSIVE));
event->fire();
} }
node->get(result); node->get(result);
...@@ -136,72 +115,35 @@ Promise<void> EventLoop::yieldIfSameThread() const { ...@@ -136,72 +115,35 @@ Promise<void> EventLoop::yieldIfSameThread() const {
return Promise<void>(false, kj::heap<YieldPromiseNode>()); return Promise<void>(false, kj::heap<YieldPromiseNode>());
} }
EventLoop::Event::~Event() noexcept(false) { void EventLoop::receivedNewJob() const {
if (this != &loop.queue) { wake();
KJ_ASSERT(next == this,
"Event destroyed while armed. You must call disarm() in the subclass's destructor "
"in order to ensure that fire() is not running when the event is destroyed.") {
break;
}
}
} }
void EventLoop::Event::arm(bool preemptIfSameThread) { EventLoop::Event::Event(const EventLoop& loop)
loop.queue.mutex.lock(_::Mutex::EXCLUSIVE); : loop(loop),
KJ_DEFER(loop.queue.mutex.unlock(_::Mutex::EXCLUSIVE)); jobs { loop.queue.createJob(*this), loop.queue.createJob(*this) } {}
if (next == nullptr) {
bool queueIsEmpty = loop.queue.next == &loop.queue;
if (preemptIfSameThread && threadLocalEventLoop == &loop) {
// Insert the event into the queue. We put it at the front rather than the back so that
// related events are executed together and so that increasing the granularity of events
// does not cause your code to "lose priority" compared to simultaneously-running code
// with less granularity.
next = loop.insertPoint;
prev = next->prev;
next->prev = this;
prev->next = this;
} else {
// Insert the node at the *end* of the queue.
prev = loop.queue.prev;
next = prev->next;
prev->next = this;
next->prev = this;
if (loop.insertPoint == &loop.queue) {
loop.insertPoint = this;
}
}
if (queueIsEmpty) { EventLoop::Event::~Event() noexcept(false) {}
// Queue was empty previously. Make sure to wake it up if it is sleeping.
loop.wake(); void EventLoop::Event::arm(bool preemptIfSameThread) {
} EventLoop* localLoop = threadLocalEventLoop;
if (preemptIfSameThread && localLoop == &loop) {
// Insert the event into the queue. We put it at the front rather than the back so that
// related events are executed together and so that increasing the granularity of events
// does not cause your code to "lose priority" compared to simultaneously-running code
// with less granularity.
jobs[currentJob]->insertAfter(localLoop->insertionPoint, localLoop->queue);
localLoop->insertionPoint = *jobs[currentJob];
} else {
// Insert the node at the *end* of the queue.
jobs[currentJob]->addToQueue();
} }
currentJob = !currentJob;
} }
void EventLoop::Event::disarm() { void EventLoop::Event::disarm() {
loop.queue.mutex.lock(_::Mutex::EXCLUSIVE); jobs[0]->cancel();
jobs[1]->cancel();
if (next != nullptr && next != this) {
if (loop.insertPoint == this) {
loop.insertPoint = next;
}
next->prev = prev;
prev->next = next;
next = nullptr;
prev = nullptr;
}
next = this;
loop.queue.mutex.unlock(_::Mutex::EXCLUSIVE);
// Ensure that if fire() is currently running, it completes before disarm() returns.
mutex.lock(_::Mutex::EXCLUSIVE);
mutex.unlock(_::Mutex::EXCLUSIVE);
} }
// ======================================================================================= // =======================================================================================
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "exception.h" #include "exception.h"
#include "mutex.h" #include "mutex.h"
#include "refcount.h" #include "refcount.h"
#include "work-queue.h"
namespace kj { namespace kj {
...@@ -198,7 +199,7 @@ template <typename Func, typename T> ...@@ -198,7 +199,7 @@ template <typename Func, typename T>
using PromiseForResultNoChaining = Promise<_::DisallowChain<_::ReturnType<Func, T>>>; using PromiseForResultNoChaining = Promise<_::DisallowChain<_::ReturnType<Func, T>>>;
// Like PromiseForResult but chaining (continuations that return another promise) is now allowed. // Like PromiseForResult but chaining (continuations that return another promise) is now allowed.
class EventLoop { class EventLoop: private _::NewJobCallback {
// Represents a queue of events being executed in a loop. Most code won't interact with // Represents a queue of events being executed in a loop. Most code won't interact with
// EventLoop directly, but instead use `Promise`s to interact with it indirectly. See the // EventLoop directly, but instead use `Promise`s to interact with it indirectly. See the
// documentation for `Promise`. // documentation for `Promise`.
...@@ -222,6 +223,8 @@ class EventLoop { ...@@ -222,6 +223,8 @@ class EventLoop {
// return 0; // return 0;
// } // }
class EventJob;
public: public:
EventLoop(); EventLoop();
...@@ -301,13 +304,13 @@ public: ...@@ -301,13 +304,13 @@ public:
// conditions. // conditions.
public: public:
Event(const EventLoop& loop): loop(loop), next(nullptr), prev(nullptr) {} Event(const EventLoop& loop);
~Event() noexcept(false); ~Event() noexcept(false);
KJ_DISALLOW_COPY(Event); KJ_DISALLOW_COPY(Event);
void arm(bool preemptIfSameThread = true); void arm(bool preemptIfSameThread = true);
// Enqueue this event so that run() will be called from the event loop soon. Does nothing // Enqueue this event so that run() will be called from the event loop soon. It is an error
// if the event is already armed. // to call this when the event is already armed.
// //
// If called from the event loop's own thread (i.e. from within an event handler fired from // If called from the event loop's own thread (i.e. from within an event handler fired from
// this event loop), and `preemptIfSameThread` is true, the event will be scheduled // this event loop), and `preemptIfSameThread` is true, the event will be scheduled
...@@ -336,13 +339,11 @@ public: ...@@ -336,13 +339,11 @@ public:
private: private:
friend class EventLoop; friend class EventLoop;
const EventLoop& loop; const EventLoop& loop;
Event* next; // if == this, disarm() has been called.
Event* prev;
mutable kj::_::Mutex mutex; // TODO(cleanup): This is dumb. We allocate two jobs and alternate between them so that an
// Hack: The mutex on the list head is treated as protecting the next/prev links across the // event's fire() can re-arm itself without deadlocking or causing other trouble.
// whole list. The mutex on each Event other than the head is treated as protecting that Own<_::WorkQueue<EventJob>::JobWrapper> jobs[2];
// event's armed/disarmed state. uint currentJob = 0;
}; };
protected: protected:
...@@ -365,21 +366,21 @@ protected: ...@@ -365,21 +366,21 @@ protected:
// is armed; it should return quickly if the loop isn't prepared to sleep. // is armed; it should return quickly if the loop isn't prepared to sleep.
private: private:
class EventListHead: public Event { class EventJob {
public: public:
inline EventListHead(EventLoop& loop): Event(loop) {} EventJob(Event& event): event(event) {}
void fire() override; // throws
inline void complete(int dummyArg) { event.fire(); }
inline void cancel() {}
private:
Event& event;
}; };
mutable EventListHead queue; _::WorkQueue<EventJob> queue;
// Head of the event list. queue.mutex protects all next/prev pointers across the list, as well
// as `insertPoint`. Each actual event's mutex protects its own `fire()` callback.
mutable Event* insertPoint; Maybe<_::WorkQueue<EventJob>::JobWrapper&> insertionPoint;
// The next event after the one that is currently firing. New events are inserted just before // Where to insert preemptively-scheduled events into the queue.
// this event. When the fire callback completes, the loop continues at the beginning of the
// queue -- thus, it starts by running any events that were just enqueued by the previous
// callback. This keeps related events together.
template <typename T, typename Func, typename ErrorFunc> template <typename T, typename Func, typename ErrorFunc>
Own<_::PromiseNode> thereImpl(Promise<T>&& promise, Func&& func, ErrorFunc&& errorHandler) const; Own<_::PromiseNode> thereImpl(Promise<T>&& promise, Func&& func, ErrorFunc&& errorHandler) const;
...@@ -393,6 +394,9 @@ private: ...@@ -393,6 +394,9 @@ private:
// currently on the queue are fired. Otherwise, returns an already-resolved promise. Used to // currently on the queue are fired. Otherwise, returns an already-resolved promise. Used to
// implement evalLater(). // implement evalLater().
void receivedNewJob() const override;
// Implements NewJobCallback.
template <typename> template <typename>
friend class Promise; friend class Promise;
}; };
...@@ -1272,9 +1276,11 @@ public: ...@@ -1272,9 +1276,11 @@ public:
} }
private: private:
Adapter adapter;
ExceptionOr<T> result; ExceptionOr<T> result;
bool waiting = true; bool waiting = true;
Adapter adapter;
// `adapter` must come last so that it is destroyed first, since fulfill() could be called up
// until that point.
void fulfill(T&& value) override { void fulfill(T&& value) override {
if (waiting) { if (waiting) {
......
...@@ -149,28 +149,68 @@ void Once::runOnce(Initializer& init) { ...@@ -149,28 +149,68 @@ void Once::runOnce(Initializer& init) {
} }
} else { } else {
for (;;) { for (;;) {
if (state == INITIALIZED) { if (state == INITIALIZED || state == DISABLED) {
break; break;
} else if (state == INITIALIZING) { } else if (state == INITIALIZING) {
// Initialization is taking place in another thread. Indicate that we're waiting. // Initialization is taking place in another thread. Indicate that we're waiting.
if (!__atomic_compare_exchange_n(&futex, &state, INITIALIZING_WITH_WAITERS, true, if (!__atomic_compare_exchange_n(&futex, &state, INITIALIZING_WITH_WAITERS, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED)) { __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
// State changed, retry. // State changed, retry.
continue; continue;
} }
} else {
KJ_DASSERT(state == INITIALIZING_WITH_WAITERS);
} }
// Wait for initialization. // Wait for initialization.
syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, INITIALIZING_WITH_WAITERS, NULL, NULL, 0); syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, INITIALIZING_WITH_WAITERS, NULL, NULL, 0);
state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE); state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE);
} }
}
}
void Once::reset() {
uint state = INITIALIZED;
if (!__atomic_compare_exchange_n(&futex, &state, UNINITIALIZED,
false, __ATOMIC_RELEASE, __ATOMIC_RELAXED)) {
KJ_REQUIRE(state == DISABLED, "reset() called while not initialized.");
}
}
void Once::disable() noexcept {
uint state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE);
for (;;) {
switch (state) {
case DISABLED:
default:
return;
case UNINITIALIZED:
case INITIALIZED:
// Try to transition the state to DISABLED.
if (!__atomic_compare_exchange_n(&futex, &state, DISABLED, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
// State changed, retry.
continue;
}
// Success.
return;
case INITIALIZING:
// Initialization is taking place in another thread. Indicate that we're waiting.
if (!__atomic_compare_exchange_n(&futex, &state, INITIALIZING_WITH_WAITERS, true,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
// State changed, retry.
continue;
}
// no break
// The docs for __atomic_compare_exchange_n claim that the memmodel for the failure case cannot case INITIALIZING_WITH_WAITERS:
// be stronger than the success case. That's disappointing, because what we really want is // Wait for initialization.
// for the two cmpxchg calls above to do an acquire barrier in the failure case only, while syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, INITIALIZING_WITH_WAITERS, NULL, NULL, 0);
// being relaxed if successful, so that once the state is INITIALIZED we know we've acquired state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE);
// it. Oh well, we'll just do an acquire barrier on the way out instead. break;
KJ_ASSERT(__atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED); }
} }
} }
...@@ -236,7 +276,7 @@ void Mutex::assertLockedByCaller(Exclusivity exclusivity) { ...@@ -236,7 +276,7 @@ void Mutex::assertLockedByCaller(Exclusivity exclusivity) {
} }
} }
Once::Once(): initialized(false) { Once::Once(bool startInitialized): state(startInitialized ? INITIALIZED : UNINITIALIZED) {
KJ_PTHREAD_CALL(pthread_mutex_init(&mutex, nullptr)); KJ_PTHREAD_CALL(pthread_mutex_init(&mutex, nullptr));
} }
Once::~Once() { Once::~Once() {
...@@ -247,13 +287,28 @@ void Once::runOnce(Initializer& init) { ...@@ -247,13 +287,28 @@ void Once::runOnce(Initializer& init) {
KJ_PTHREAD_CALL(pthread_mutex_lock(&mutex)); KJ_PTHREAD_CALL(pthread_mutex_lock(&mutex));
KJ_DEFER(KJ_PTHREAD_CALL(pthread_mutex_unlock(&mutex))); KJ_DEFER(KJ_PTHREAD_CALL(pthread_mutex_unlock(&mutex)));
if (initialized) { if (state != UNINITIALIZED) {
return; return;
} }
init.run(); init.run();
__atomic_store_n(&initialized, true, __ATOMIC_RELEASE); __atomic_store_n(&state, INITIALIZED, __ATOMIC_RELEASE);
}
void Once::reset() {
State oldState = INITIALIZED;
if (!__atomic_compare_exchange_n(&state, &oldState, UNINITIALIZED,
false, __ATOMIC_RELEASE, __ATOMIC_RELAXED)) {
KJ_REQUIRE(oldState == DISABLED, "reset() called while not initialized.");
}
}
void Once::disable() noexcept {
KJ_PTHREAD_CALL(pthread_mutex_lock(&mutex));
KJ_DEFER(KJ_PTHREAD_CALL(pthread_mutex_unlock(&mutex)));
__atomic_store_n(&state, DISABLED, __ATOMIC_RELAXED);
} }
#endif #endif
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "memory.h" #include "memory.h"
#if __linux__ && !defined(KJ_FUTEX) #if __linux__ && !defined(KJ_USE_FUTEX)
#define KJ_USE_FUTEX 1 #define KJ_USE_FUTEX 1
#endif #endif
...@@ -87,9 +87,10 @@ class Once { ...@@ -87,9 +87,10 @@ class Once {
public: public:
#if KJ_USE_FUTEX #if KJ_USE_FUTEX
inline Once(): futex(UNINITIALIZED) {} inline Once(bool startInitialized = false)
: futex(startInitialized ? INITIALIZED : UNINITIALIZED) {}
#else #else
Once(); Once(bool startInitialized = false);
~Once(); ~Once();
#endif #endif
KJ_DISALLOW_COPY(Once); KJ_DISALLOW_COPY(Once);
...@@ -106,7 +107,26 @@ public: ...@@ -106,7 +107,26 @@ public:
#if KJ_USE_FUTEX #if KJ_USE_FUTEX
return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED; return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED;
#else #else
return __atomic_load_n(&initialized, __ATOMIC_ACQUIRE); return __atomic_load_n(&state, __ATOMIC_ACQUIRE) == INITIALIZED;
#endif
}
void reset();
// Returns the state from initialized to uninitialized. It is an error to call this when
// not already initialized, or when runOnce() or isInitialized() might be called concurrently in
// another thread.
void disable() noexcept;
// Prevent future calls to runOnce() and reset() from having any effect, and make isInitialized()
// return false forever. If an initializer is currently running, block until it completes.
bool isDisabled() noexcept {
// Returns true if `disable()` has been called.
#if KJ_USE_FUTEX
return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == DISABLED;
#else
return __atomic_load_n(&state, __ATOMIC_ACQUIRE) == DISABLED;
#endif #endif
} }
...@@ -114,13 +134,21 @@ private: ...@@ -114,13 +134,21 @@ private:
#if KJ_USE_FUTEX #if KJ_USE_FUTEX
uint futex; uint futex;
static constexpr uint UNINITIALIZED = 0; enum State {
static constexpr uint INITIALIZING = 1; UNINITIALIZED,
static constexpr uint INITIALIZING_WITH_WAITERS = 2; INITIALIZING,
static constexpr uint INITIALIZED = 3; INITIALIZING_WITH_WAITERS,
INITIALIZED,
DISABLED
};
#else #else
bool initialized; enum State {
UNINITIALIZED,
INITIALIZED,
DISABLED
};
State state;
pthread_mutex_t mutex; pthread_mutex_t mutex;
#endif #endif
}; };
......
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "work-queue.h"
#include <sched.h>
namespace kj {
namespace _ { // private
void yieldThread() {
sched_yield();
}
} // namespace _ (private)
} // namespace kj
This diff is collapsed.
linux-gcc-4.7 1722 ./super-test.sh tmpdir capnp-gcc-4.7 quick linux-gcc-4.7 1731 ./super-test.sh tmpdir capnp-gcc-4.7 quick
linux-gcc-4.8 1725 ./super-test.sh tmpdir capnp-gcc-4.8 quick gcc-4.8 linux-gcc-4.8 1734 ./super-test.sh tmpdir capnp-gcc-4.8 quick gcc-4.8
linux-clang 1745 ./super-test.sh tmpdir capnp-clang quick clang linux-clang 1754 ./super-test.sh tmpdir capnp-clang quick clang
mac 802 ./super-test.sh remote beat caffeinate quick mac 802 ./super-test.sh remote beat caffeinate quick
cygwin 805 ./super-test.sh remote Kenton@flashman quick cygwin 810 ./super-test.sh remote Kenton@flashman quick
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment