Commit 5a898e14 authored by gejun's avatar gejun

r35401: Refactored interruptions on bthreads. The semantic is more similar to…

r35401: Refactored interruptions on bthreads. The semantic is more similar to pthread's and EINTR is returned instead of ESTOP.
parent 412143ed
...@@ -45,7 +45,7 @@ BAIDU_CASSERT(sizeof(TaskControl*) == sizeof(butil::atomic<TaskControl*>), atomi ...@@ -45,7 +45,7 @@ BAIDU_CASSERT(sizeof(TaskControl*) == sizeof(butil::atomic<TaskControl*>), atomi
pthread_mutex_t g_task_control_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t g_task_control_mutex = PTHREAD_MUTEX_INITIALIZER;
// Referenced in rpc, needs to be extern. // Referenced in rpc, needs to be extern.
// Notice that we can't declare the variable as atomic<TaskControl*> which // Notice that we can't declare the variable as atomic<TaskControl*> which
// may not initialized before creating bthreads before main(). // are not constructed before main().
TaskControl* g_task_control = NULL; TaskControl* g_task_control = NULL;
extern BAIDU_THREAD_LOCAL TaskGroup* tls_task_group; extern BAIDU_THREAD_LOCAL TaskGroup* tls_task_group;
...@@ -106,8 +106,6 @@ start_from_non_worker(bthread_t* __restrict tid, ...@@ -106,8 +106,6 @@ start_from_non_worker(bthread_t* __restrict tid,
tid, attr, fn, arg); tid, attr, fn, arg);
} }
int stop_butex_wait(bthread_t tid);
struct TidTraits { struct TidTraits {
static const size_t BLOCK_SIZE = 63; static const size_t BLOCK_SIZE = 63;
static const size_t MAX_ENTRIES = 65536; static const size_t MAX_ENTRIES = 65536;
...@@ -169,23 +167,17 @@ void bthread_flush() __THROW { ...@@ -169,23 +167,17 @@ void bthread_flush() __THROW {
} }
} }
int bthread_interrupt(bthread_t tid) __THROW {
return bthread::TaskGroup::interrupt(tid, bthread::get_task_control());
}
int bthread_stop(bthread_t tid) __THROW { int bthread_stop(bthread_t tid) __THROW {
if (bthread::stop_butex_wait(tid) < 0) { bthread::TaskGroup::set_stopped(tid);
return errno; return bthread_interrupt(tid);
}
bthread::TaskGroup* g = bthread::tls_task_group;
if (!g) {
bthread::TaskControl* c = bthread::get_or_new_task_control();
if (!c) {
return ENOMEM;
}
g = c->choose_one_group();
}
return g->stop_usleep(tid);
} }
int bthread_stopped(bthread_t tid) __THROW { int bthread_stopped(bthread_t tid) __THROW {
return bthread::TaskGroup::stopped(tid); return (int)bthread::TaskGroup::is_stopped(tid);
} }
bthread_t bthread_self(void) __THROW { bthread_t bthread_self(void) __THROW {
...@@ -319,7 +311,6 @@ int bthread_usleep(uint64_t microseconds) __THROW { ...@@ -319,7 +311,6 @@ int bthread_usleep(uint64_t microseconds) __THROW {
if (NULL != g && !g->is_current_pthread_task()) { if (NULL != g && !g->is_current_pthread_task()) {
return bthread::TaskGroup::usleep(&g, microseconds); return bthread::TaskGroup::usleep(&g, microseconds);
} }
// TODO: return ESTOP for pthread_task
return ::usleep(microseconds); return ::usleep(microseconds);
} }
......
...@@ -33,16 +33,16 @@ ...@@ -33,16 +33,16 @@
__BEGIN_DECLS __BEGIN_DECLS
// Create bthread `fn(arg)' with attributes `attr' and put the identifier into // Create bthread `fn(args)' with attributes `attr' and put the identifier into
// `tid'. Switch to the new thread and schedule old thread to run. Use this // `tid'. Switch to the new thread and schedule old thread to run. Use this
// function when the new thread is more urgent. // function when the new thread is more urgent.
// Returns 0 on success, errno otherwise. // Returns 0 on success, errno otherwise.
extern int bthread_start_urgent(bthread_t* __restrict tid, extern int bthread_start_urgent(bthread_t* __restrict tid,
const bthread_attr_t* __restrict attr, const bthread_attr_t* __restrict attr,
void * (*fn)(void*), void * (*fn)(void*),
void* __restrict arg) __THROW; void* __restrict args) __THROW;
// Create bthread `fn(arg)' with attributes `attr' and put the identifier into // Create bthread `fn(args)' with attributes `attr' and put the identifier into
// `tid'. This function behaves closer to pthread_create: after scheduling the // `tid'. This function behaves closer to pthread_create: after scheduling the
// new thread to run, it returns. In another word, the new thread may take // new thread to run, it returns. In another word, the new thread may take
// longer time than bthread_start_urgent() to run. // longer time than bthread_start_urgent() to run.
...@@ -50,16 +50,38 @@ extern int bthread_start_urgent(bthread_t* __restrict tid, ...@@ -50,16 +50,38 @@ extern int bthread_start_urgent(bthread_t* __restrict tid,
extern int bthread_start_background(bthread_t* __restrict tid, extern int bthread_start_background(bthread_t* __restrict tid,
const bthread_attr_t* __restrict attr, const bthread_attr_t* __restrict attr,
void * (*fn)(void*), void * (*fn)(void*),
void* __restrict arg) __THROW; void* __restrict args) __THROW;
// Wake up operations blocking the thread. Different functions may behave
// differently:
// bthread_usleep(): returns -1 and sets errno to ESTOP if bthread_stop()
// is called, or to EINTR otherwise.
// butex_wait(): returns -1 and sets errno to EINTR
// bthread_mutex_*lock: unaffected (still blocking)
// bthread_cond_*wait: wakes up and returns 0.
// bthread_*join: unaffected.
// Common usage of interruption is to make a thread to quit ASAP.
// [Thread1] [Thread2]
// set stopping flag
// bthread_interrupt(Thread2)
// wake up
// see the flag and quit
// may block again if the flag is unchanged
// bthread_interrupt() guarantees that Thread2 is woken up reliably no matter
// how the 2 threads are interleaved.
// Returns 0 on success, errno otherwise.
extern int bthread_interrupt(bthread_t tid) __THROW;
// Ask the bthread `tid' to stop. Operations which would suspend the thread // Make bthread_stopped() on the bthread return true and interrupt the bthread.
// except bthread_join will not block, instead they return ESTOP. // Note that current bthread_stop() solely sets the built-in "stop flag" and
// This is a cooperative stopping mechanism. // calls bthread_interrupt(), which is different from earlier versions of
// bthread, and replaceable by user-defined stop flags plus calls to
// bthread_interrupt().
// Returns 0 on success, errno otherwise. // Returns 0 on success, errno otherwise.
extern int bthread_stop(bthread_t tid) __THROW; extern int bthread_stop(bthread_t tid) __THROW;
// Returns 1 iff bthread_stop() was called on the thread or the thread does // Returns 1 iff bthread_stop(tid) was called or the thread does not exist,
// not exist, 0 otherwise. // 0 otherwise.
extern int bthread_stopped(bthread_t tid) __THROW; extern int bthread_stopped(bthread_t tid) __THROW;
// Returns identifier of caller if caller is a bthread, 0 otherwise(Id of a // Returns identifier of caller if caller is a bthread, 0 otherwise(Id of a
...@@ -75,10 +97,12 @@ extern int bthread_equal(bthread_t t1, bthread_t t2) __THROW; ...@@ -75,10 +97,12 @@ extern int bthread_equal(bthread_t t1, bthread_t t2) __THROW;
extern void bthread_exit(void* retval) __attribute__((__noreturn__)); extern void bthread_exit(void* retval) __attribute__((__noreturn__));
// Make calling thread wait for termination of bthread `bt'. Return immediately // Make calling thread wait for termination of bthread `bt'. Return immediately
// if `bt' is already terminated. The exit status of the bthread shall be // if `bt' is already terminated.
// stored in *bthread_return (if it's not NULL), however at present it's // Notes:
// always set to NULL. There's no "detachment" in bthreads, all bthreads are // - All bthreads are "detached" but still joinable.
// "detached" as default and still joinable. // - *bthread_return is always set to null. If you need to return value
// from a bthread, pass the value via the `args' created the bthread.
// - bthread_join() is not affected by bthread_interrupt.
// Returns 0 on success, errno otherwise. // Returns 0 on success, errno otherwise.
extern int bthread_join(bthread_t bt, void** bthread_return) __THROW; extern int bthread_join(bthread_t bt, void** bthread_return) __THROW;
...@@ -125,6 +149,7 @@ extern int bthread_setconcurrency(int num) __THROW; ...@@ -125,6 +149,7 @@ extern int bthread_setconcurrency(int num) __THROW;
extern int bthread_yield(void) __THROW; extern int bthread_yield(void) __THROW;
// Suspend current thread for at least `microseconds' // Suspend current thread for at least `microseconds'
// Interruptible by bthread_interrupt().
extern int bthread_usleep(uint64_t microseconds) __THROW; extern int bthread_usleep(uint64_t microseconds) __THROW;
// --------------------------------------------- // ---------------------------------------------
......
This diff is collapsed.
...@@ -64,14 +64,9 @@ int butex_requeue(void* butex1, void* butex2); ...@@ -64,14 +64,9 @@ int butex_requeue(void* butex1, void* butex2);
// abstime is not NULL. // abstime is not NULL.
// About |abstime|: // About |abstime|:
// Different from FUTEX_WAIT, butex_wait uses absolute time. // Different from FUTEX_WAIT, butex_wait uses absolute time.
// Returns 0 on success, -1 otherwise and errno is set.
int butex_wait(void* butex, int expected_value, const timespec* abstime); int butex_wait(void* butex, int expected_value, const timespec* abstime);
// Same with butex_wait except that this function cannot be woken up by
// bthread_stop(), although this function still returns -1(ESTOP) after
// wake-up.
int butex_wait_uninterruptible(void* butex, int expected_value,
const timespec* abstime);
} // namespace bthread } // namespace bthread
#endif // BAIDU_BTHREAD_BUTEX_H #endif // BAIDU_BTHREAD_BUTEX_H
...@@ -95,7 +95,18 @@ int bthread_cond_wait(bthread_cond_t* __restrict c, ...@@ -95,7 +95,18 @@ int bthread_cond_wait(bthread_cond_t* __restrict c,
bthread_mutex_unlock(m); bthread_mutex_unlock(m);
int rc1 = 0; int rc1 = 0;
if (bthread::butex_wait(ic->seq, expected_seq, NULL) < 0 && if (bthread::butex_wait(ic->seq, expected_seq, NULL) < 0 &&
errno != EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR/*note*/) {
// EINTR should not be returned by cond_*wait according to docs on
// pthread, however spurious wake-up is OK, just as we do here
// so that users can check flags in the loop often companioning
// with the cond_wait ASAP. For example:
// mutex.lock();
// while (!stop && other-predicates) {
// cond_wait(&mutex);
// }
// mutex.unlock();
// After interruption, above code should wake up from the cond_wait
// soon and check the `stop' flag and other predicates.
rc1 = errno; rc1 = errno;
} }
const int rc2 = bthread_mutex_lock_contended(m); const int rc2 = bthread_mutex_lock_contended(m);
...@@ -118,7 +129,8 @@ int bthread_cond_timedwait(bthread_cond_t* __restrict c, ...@@ -118,7 +129,8 @@ int bthread_cond_timedwait(bthread_cond_t* __restrict c,
bthread_mutex_unlock(m); bthread_mutex_unlock(m);
int rc1 = 0; int rc1 = 0;
if (bthread::butex_wait(ic->seq, expected_seq, abstime) < 0 && if (bthread::butex_wait(ic->seq, expected_seq, abstime) < 0 &&
errno != EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR/*note*/) {
// note: see comments in bthread_cond_wait on EINTR.
rc1 = errno; rc1 = errno;
} }
const int rc2 = bthread_mutex_lock_contended(m); const int rc2 = bthread_mutex_lock_contended(m);
......
...@@ -50,15 +50,18 @@ void CountdownEvent::signal(int sig) { ...@@ -50,15 +50,18 @@ void CountdownEvent::signal(int sig) {
butex_wake_all(saved_butex); butex_wake_all(saved_butex);
} }
void CountdownEvent::wait() { int CountdownEvent::wait() {
_wait_was_invoked = true; _wait_was_invoked = true;
for (;;) { for (;;) {
const int seen_counter = const int seen_counter =
((butil::atomic<int>*)_butex)->load(butil::memory_order_acquire); ((butil::atomic<int>*)_butex)->load(butil::memory_order_acquire);
if (seen_counter <= 0) { if (seen_counter <= 0) {
return; return 0;
}
if (butex_wait(_butex, seen_counter, NULL) < 0 &&
errno != EWOULDBLOCK && errno != EINTR) {
return errno;
} }
butex_wait(_butex, seen_counter, NULL);
} }
} }
...@@ -93,8 +96,8 @@ int CountdownEvent::timed_wait(const timespec& duetime) { ...@@ -93,8 +96,8 @@ int CountdownEvent::timed_wait(const timespec& duetime) {
if (seen_counter <= 0) { if (seen_counter <= 0) {
return 0; return 0;
} }
const int rc = butex_wait(_butex, seen_counter, &duetime); if (butex_wait(_butex, seen_counter, &duetime) < 0 &&
if (rc < 0 && errno != EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR) {
return errno; return errno;
} }
} }
......
...@@ -38,11 +38,14 @@ public: ...@@ -38,11 +38,14 @@ public:
// Decrease the counter by |sig| // Decrease the counter by |sig|
void signal(int sig = 1); void signal(int sig = 1);
// Block current thread until the counter reaches 0 // Block current thread until the counter reaches 0.
void wait(); // Returns 0 on success, error code otherwise.
// This method never returns EINTR.
int wait();
// Block the current thread until the counter reaches 0 or duetime has expired // Block the current thread until the counter reaches 0 or duetime has expired
// Returns 0 on success, ETIMEDOUT otherwise. // Returns 0 on success, error code otherwise. ETIMEDOUT is for timeout.
// This method never returns EINTR.
int timed_wait(const timespec& duetime); int timed_wait(const timespec& duetime);
private: private:
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
// Define errno in bthread/errno.h // Define errno in bthread/errno.h
extern const int ESTOP = -20; extern const int ESTOP = -20;
BAIDU_REGISTER_ERRNO(ESTOP, "the thread is stopping") BAIDU_REGISTER_ERRNO(ESTOP, "The structure is stopping")
extern "C" { extern "C" {
......
...@@ -206,17 +206,19 @@ void ExecutionQueueBase::_on_recycle() { ...@@ -206,17 +206,19 @@ void ExecutionQueueBase::_on_recycle() {
int ExecutionQueueBase::join(uint64_t id) { int ExecutionQueueBase::join(uint64_t id) {
const slot_id_t slot = slot_of_id(id); const slot_id_t slot = slot_of_id(id);
ExecutionQueueBase* const m = butil::address_resource(slot); ExecutionQueueBase* const m = butil::address_resource(slot);
if (BAIDU_LIKELY(m != NULL)) { if (m == NULL) {
// The queue is not created yet, this join is definitely wrong.
return EINVAL;
}
int expected = _version_of_id(id); int expected = _version_of_id(id);
// 1: acquire fence to make the join thread sees the newest changes // acquire fence makes this thread see changes before changing _join_butex.
// when it sees the unmatch of _join_butex and id while (expected == m->_join_butex->load(butil::memory_order_acquire)) {
while (expected == if (butex_wait(m->_join_butex, expected, NULL) < 0 &&
m->_join_butex->load(butil::memory_order_acquire/*1*/)) { errno != EWOULDBLOCK && errno != EINTR) {
butex_wait(m->_join_butex, expected, NULL); return errno;
} }
return 0;
} }
return EINVAL; return 0;
} }
int ExecutionQueueBase::stop() { int ExecutionQueueBase::stop() {
......
...@@ -231,12 +231,11 @@ public: ...@@ -231,12 +231,11 @@ public:
return -1; return -1;
} }
#endif #endif
const int rc = butex_wait(butex, expected_val, abstime); if (butex_wait(butex, expected_val, abstime) < 0 &&
if (rc < 0 && errno == EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR) {
// EpollThread did wake up, there's data. return -1;
return 0;
} }
return rc; return 0;
} }
int fd_close(int fd) { int fd_close(int fd) {
......
...@@ -436,11 +436,10 @@ int bthread_id_lock_and_reset_range_verbose( ...@@ -436,11 +436,10 @@ int bthread_id_lock_and_reset_range_verbose(
uint32_t expected_ver = *butex; uint32_t expected_ver = *butex;
meta->mutex.unlock(); meta->mutex.unlock();
ever_contended = true; ever_contended = true;
if (bthread::butex_wait(butex, expected_ver, NULL) < 0) { if (bthread::butex_wait(butex, expected_ver, NULL) < 0 &&
if (errno != EWOULDBLOCK && errno != ESTOP) { errno != EWOULDBLOCK && errno != EINTR) {
return errno; return errno;
} }
}
meta->mutex.lock(); meta->mutex.lock();
} else { // bthread_id_about_to_destroy was called. } else { // bthread_id_about_to_destroy was called.
meta->mutex.unlock(); meta->mutex.unlock();
...@@ -511,30 +510,25 @@ int bthread_id_join(bthread_id_t id) __THROW { ...@@ -511,30 +510,25 @@ int bthread_id_join(bthread_id_t id) __THROW {
const bthread::IdResourceId slot = bthread::get_slot(id); const bthread::IdResourceId slot = bthread::get_slot(id);
bthread::Id* const meta = address_resource(slot); bthread::Id* const meta = address_resource(slot);
if (!meta) { if (!meta) {
// The id is not created yet, this join is definitely wrong.
return EINVAL; return EINVAL;
} }
const uint32_t id_ver = bthread::get_version(id); const uint32_t id_ver = bthread::get_version(id);
uint32_t* join_butex = meta->join_butex; uint32_t* join_butex = meta->join_butex;
bool stopped = false;
while (1) { while (1) {
meta->mutex.lock(); meta->mutex.lock();
const bool has_ver = meta->has_version(id_ver); const bool has_ver = meta->has_version(id_ver);
const uint32_t expected_ver = *join_butex; const uint32_t expected_ver = *join_butex;
meta->mutex.unlock(); meta->mutex.unlock();
if (has_ver) { if (!has_ver) {
if (bthread::butex_wait(join_butex, expected_ver, NULL) < 0) {
if (errno != EWOULDBLOCK && errno != ESTOP) {
return errno;
}
if (errno == ESTOP) {
stopped = true;
}
}
} else {
break; break;
} }
if (bthread::butex_wait(join_butex, expected_ver, NULL) < 0 &&
errno != EWOULDBLOCK && errno != EINTR) {
return errno;
}
} }
return stopped ? ESTOP : 0; return 0;
} }
int bthread_id_trylock(bthread_id_t id, void** pdata) __THROW { int bthread_id_trylock(bthread_id_t id, void** pdata) __THROW {
......
...@@ -622,8 +622,10 @@ BAIDU_CASSERT(sizeof(unsigned) == sizeof(MutexInternal), ...@@ -622,8 +622,10 @@ BAIDU_CASSERT(sizeof(unsigned) == sizeof(MutexInternal),
inline int mutex_lock_contended(bthread_mutex_t* m) { inline int mutex_lock_contended(bthread_mutex_t* m) {
butil::atomic<unsigned>* whole = (butil::atomic<unsigned>*)m->butex; butil::atomic<unsigned>* whole = (butil::atomic<unsigned>*)m->butex;
while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) { while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) {
if (bthread::butex_wait(whole, BTHREAD_MUTEX_CONTENDED, NULL) < 0 if (bthread::butex_wait(whole, BTHREAD_MUTEX_CONTENDED, NULL) < 0 &&
&& errno != EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR/*note*/) {
// a mutex lock should ignore interrruptions in general since
// user code is unlikely to check the return value.
return errno; return errno;
} }
} }
...@@ -634,8 +636,10 @@ inline int mutex_timedlock_contended( ...@@ -634,8 +636,10 @@ inline int mutex_timedlock_contended(
bthread_mutex_t* m, const struct timespec* __restrict abstime) { bthread_mutex_t* m, const struct timespec* __restrict abstime) {
butil::atomic<unsigned>* whole = (butil::atomic<unsigned>*)m->butex; butil::atomic<unsigned>* whole = (butil::atomic<unsigned>*)m->butex;
while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) { while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) {
if (bthread::butex_wait(whole, BTHREAD_MUTEX_CONTENDED, abstime) < 0 if (bthread::butex_wait(whole, BTHREAD_MUTEX_CONTENDED, abstime) < 0 &&
&& errno != EWOULDBLOCK) { errno != EWOULDBLOCK && errno != EINTR/*note*/) {
// a mutex lock should ignore interrruptions in general since
// user code is unlikely to check the return value.
return errno; return errno;
} }
} }
......
This diff is collapsed.
...@@ -117,8 +117,9 @@ public: ...@@ -117,8 +117,9 @@ public:
// Returns 0 on success, -1 otherwise and errno is set. // Returns 0 on success, -1 otherwise and errno is set.
static int get_attr(bthread_t tid, bthread_attr_t* attr); static int get_attr(bthread_t tid, bthread_attr_t* attr);
// Returns non-zero the `tid' is stopped, 0 otherwise. // Get/set TaskMeta.stop of the tid.
static int stopped(bthread_t tid); static void set_stopped(bthread_t tid);
static bool is_stopped(bthread_t tid);
// The bthread running run_main_task(); // The bthread running run_main_task();
bthread_t main_tid() const { return _main_tid; } bthread_t main_tid() const { return _main_tid; }
...@@ -163,9 +164,9 @@ public: ...@@ -163,9 +164,9 @@ public:
// Call this instead of delete. // Call this instead of delete.
void destroy_self(); void destroy_self();
// Wake up `tid' if it's sleeping. // Wake up blocking ops in the thread.
// Returns 0 on success, error code otherwise. // Returns 0 on success, errno otherwise.
int stop_usleep(bthread_t tid); static int interrupt(bthread_t tid, TaskControl* c);
// Get the meta associate with the task. // Get the meta associate with the task.
static TaskMeta* address_meta(bthread_t tid); static TaskMeta* address_meta(bthread_t tid);
......
...@@ -50,8 +50,13 @@ struct TaskMeta { ...@@ -50,8 +50,13 @@ struct TaskMeta {
butil::atomic<ButexWaiter*> current_waiter; butil::atomic<ButexWaiter*> current_waiter;
uint64_t current_sleep; uint64_t current_sleep;
// A builtin flag to mark if the thread is stopping.
bool stop; bool stop;
bool interruptible;
// The thread is interrupted and should wake up from some blocking ops.
bool interrupted;
// Scheduling of the thread can be delayed.
bool about_to_quit; bool about_to_quit;
// [Not Reset] guarantee visibility of version_butex. // [Not Reset] guarantee visibility of version_butex.
......
...@@ -223,7 +223,7 @@ TEST(ButexTest, stop_after_running) { ...@@ -223,7 +223,7 @@ TEST(ButexTest, stop_after_running) {
const bthread_attr_t attr = const bthread_attr_t attr =
(i == 0 ? BTHREAD_ATTR_PTHREAD : BTHREAD_ATTR_NORMAL); (i == 0 ? BTHREAD_ATTR_PTHREAD : BTHREAD_ATTR_NORMAL);
bthread_t th; bthread_t th;
ButexWaitArg arg = { butex, *butex, WAIT_MSEC, ESTOP }; ButexWaitArg arg = { butex, *butex, WAIT_MSEC, EINTR };
tm.start(); tm.start();
ASSERT_EQ(0, bthread_start_urgent(&th, &attr, wait_butex, &arg)); ASSERT_EQ(0, bthread_start_urgent(&th, &attr, wait_butex, &arg));
...@@ -250,7 +250,7 @@ TEST(ButexTest, stop_before_running) { ...@@ -250,7 +250,7 @@ TEST(ButexTest, stop_before_running) {
const bthread_attr_t attr = const bthread_attr_t attr =
(i == 0 ? BTHREAD_ATTR_PTHREAD : BTHREAD_ATTR_NORMAL) | BTHREAD_NOSIGNAL; (i == 0 ? BTHREAD_ATTR_PTHREAD : BTHREAD_ATTR_NORMAL) | BTHREAD_NOSIGNAL;
bthread_t th; bthread_t th;
ButexWaitArg arg = { butex, *butex, WAIT_MSEC, ESTOP }; ButexWaitArg arg = { butex, *butex, WAIT_MSEC, EINTR };
tm.start(); tm.start();
ASSERT_EQ(0, bthread_start_background(&th, &attr, wait_butex, &arg)); ASSERT_EQ(0, bthread_start_background(&th, &attr, wait_butex, &arg));
...@@ -268,7 +268,7 @@ TEST(ButexTest, stop_before_running) { ...@@ -268,7 +268,7 @@ TEST(ButexTest, stop_before_running) {
} }
void* join_the_waiter(void* arg) { void* join_the_waiter(void* arg) {
EXPECT_EQ(ESTOP, bthread_join((bthread_t)arg, NULL)); EXPECT_EQ(0, bthread_join((bthread_t)arg, NULL));
return NULL; return NULL;
} }
...@@ -277,7 +277,7 @@ TEST(ButexTest, join_cant_be_wakeup) { ...@@ -277,7 +277,7 @@ TEST(ButexTest, join_cant_be_wakeup) {
int* butex = bthread::butex_create_checked<int>(); int* butex = bthread::butex_create_checked<int>();
*butex = 7; *butex = 7;
butil::Timer tm; butil::Timer tm;
ButexWaitArg arg = { butex, *butex, 1000, ESTOP }; ButexWaitArg arg = { butex, *butex, 1000, EINTR };
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
const bthread_attr_t attr = const bthread_attr_t attr =
......
...@@ -292,7 +292,7 @@ struct StoppedWaiterArgs { ...@@ -292,7 +292,7 @@ struct StoppedWaiterArgs {
void* stopped_waiter(void* void_arg) { void* stopped_waiter(void* void_arg) {
StoppedWaiterArgs* args = (StoppedWaiterArgs*)void_arg; StoppedWaiterArgs* args = (StoppedWaiterArgs*)void_arg;
args->thread_started = true; args->thread_started = true;
EXPECT_EQ(ESTOP, bthread_id_join(args->id)); EXPECT_EQ(0, bthread_id_join(args->id));
EXPECT_EQ(get_version(args->id) + 4, bthread::id_value(args->id)); EXPECT_EQ(get_version(args->id) + 4, bthread::id_value(args->id));
return NULL; return NULL;
} }
...@@ -312,11 +312,6 @@ TEST(BthreadIdTest, stop_a_wait_after_fight_before_signal) { ...@@ -312,11 +312,6 @@ TEST(BthreadIdTest, stop_a_wait_after_fight_before_signal) {
args[i].thread_started = false; args[i].thread_started = false;
ASSERT_EQ(0, bthread_start_urgent(&th[i], NULL, stopped_waiter, &args[i])); ASSERT_EQ(0, bthread_start_urgent(&th[i], NULL, stopped_waiter, &args[i]));
} }
for (size_t i = 0; i < ARRAY_SIZE(th); ++i) {
if (!args[i].thread_started) {
bthread_usleep(1000);
}
}
// stop does not wake up bthread_id_join // stop does not wake up bthread_id_join
for (size_t i = 0; i < ARRAY_SIZE(th); ++i) { for (size_t i = 0; i < ARRAY_SIZE(th); ++i) {
bthread_stop(th[i]); bthread_stop(th[i]);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment