Commit 83d6540d authored by gejun's avatar gejun

Patch svn r35154

parent 9554a130
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
#include <stdlib.h> // posix_memalign #include <stdlib.h> // posix_memalign
#include "base/macros.h" // BAIDU_CASSERT #include "base/macros.h" // BAIDU_CASSERT
#include "base/memory/singleton_on_pthread_once.h" #include "base/memory/singleton_on_pthread_once.h"
#include "bvar/reducer.h" // bvar::Adder #include "base/third_party/dynamic_annotations/dynamic_annotations.h" // RunningOnValgrind
#include "base/third_party/valgrind/valgrind.h" // VALGRIND_STACK_REGISTER
#include "bvar/passive_status.h"
#include "bthread/types.h" // BTHREAD_STACKTYPE_* #include "bthread/types.h" // BTHREAD_STACKTYPE_*
#include "bthread/stack.h" #include "bthread/stack.h"
...@@ -29,78 +31,103 @@ BAIDU_CASSERT(BTHREAD_STACKTYPE_NORMAL == STACK_TYPE_NORMAL, must_match); ...@@ -29,78 +31,103 @@ BAIDU_CASSERT(BTHREAD_STACKTYPE_NORMAL == STACK_TYPE_NORMAL, must_match);
BAIDU_CASSERT(BTHREAD_STACKTYPE_LARGE == STACK_TYPE_LARGE, must_match); BAIDU_CASSERT(BTHREAD_STACKTYPE_LARGE == STACK_TYPE_LARGE, must_match);
BAIDU_CASSERT(STACK_TYPE_MAIN == 0, must_be_0); BAIDU_CASSERT(STACK_TYPE_MAIN == 0, must_be_0);
extern const int PAGESIZE = getpagesize(); static base::static_atomic<int64_t> s_stack_count = BASE_STATIC_ATOMIC_INIT(0);
extern const int PAGESIZE_M1 = PAGESIZE - 1; static int64_t get_stack_count(void*) {
return s_stack_count.load(base::memory_order_relaxed);
const int MIN_STACKSIZE = PAGESIZE * 2;
const int MIN_GUARDSIZE = PAGESIZE;
struct StackCount : public bvar::Adder<int64_t> {
StackCount() : bvar::Adder<int64_t>("bthread_stack_count") {}
};
inline bvar::Adder<int64_t>& stack_count() {
return *base::get_leaky_singleton<StackCount>();
} }
static bvar::PassiveStatus<int64_t> bvar_stack_count(
void* allocate_stack(int* inout_stacksize, int* inout_guardsize) { "bthread_stack_count", get_stack_count, NULL);
// Align stack and guard size.
int stacksize = int allocate_stack_storage(StackStorage* s, int stacksize_in, int guardsize_in) {
(std::max(*inout_stacksize, MIN_STACKSIZE) + PAGESIZE_M1) & const static int PAGESIZE = getpagesize();
~PAGESIZE_M1; const int PAGESIZE_M1 = PAGESIZE - 1;
int guardsize = const int MIN_STACKSIZE = PAGESIZE * 2;
(std::max(*inout_guardsize, MIN_GUARDSIZE) + PAGESIZE_M1) & const int MIN_GUARDSIZE = PAGESIZE;
// Align stacksize
const int stacksize =
(std::max(stacksize_in, MIN_STACKSIZE) + PAGESIZE_M1) &
~PAGESIZE_M1; ~PAGESIZE_M1;
if (FLAGS_guard_page_size <= 0) { if (guardsize_in <= 0) {
void* mem = malloc(stacksize); void* mem = malloc(stacksize);
if (NULL == mem) { if (NULL == mem) {
return NULL; PLOG_EVERY_SECOND(ERROR) << "Fail to malloc (size="
<< stacksize << ")";
return -1;
} }
stack_count() << 1; s_stack_count.fetch_add(1, base::memory_order_relaxed);
*inout_stacksize = stacksize; s->bottom = (char*)mem + stacksize;
*inout_guardsize = 0; s->stacksize = stacksize;
return (char*)mem + stacksize; s->guardsize = 0;
if (RunningOnValgrind()) {
s->valgrind_stack_id = VALGRIND_STACK_REGISTER(
s->bottom, (char*)s->bottom - stacksize);
} else {
s->valgrind_stack_id = 0;
}
return 0;
} else { } else {
// Align guardsize
const int guardsize =
(std::max(guardsize_in, MIN_GUARDSIZE) + PAGESIZE_M1) &
~PAGESIZE_M1;
const int memsize = stacksize + guardsize; const int memsize = stacksize + guardsize;
void* const mem = mmap(NULL, memsize, (PROT_READ | PROT_WRITE), void* const mem = mmap(NULL, memsize, (PROT_READ | PROT_WRITE),
(MAP_PRIVATE | MAP_ANONYMOUS), -1, 0); (MAP_PRIVATE | MAP_ANONYMOUS), -1, 0);
if (MAP_FAILED == mem) { if (MAP_FAILED == mem) {
PLOG_EVERY_SECOND(ERROR) PLOG_EVERY_SECOND(ERROR)
<< "Fail to mmap, which is likely to be limited by the value" << "Fail to mmap size=" << memsize << " stack_count="
" in /proc/sys/vm/max_map_count"; << s_stack_count.load(base::memory_order_relaxed)
<< ", possibly limited by /proc/sys/vm/max_map_count";
// may fail due to limit of max_map_count (65536 in default) // may fail due to limit of max_map_count (65536 in default)
return NULL; return -1;
} }
char* aligned_mem = (char*)(((intptr_t)mem + PAGESIZE_M1) & ~PAGESIZE_M1); void* aligned_mem = (void*)(((intptr_t)mem + PAGESIZE_M1) & ~PAGESIZE_M1);
const int offset = aligned_mem - (char*)mem; if (aligned_mem != mem) {
LOG_ONCE(ERROR) << "addr=" << mem << " returned by mmap is not "
"aligned by pagesize=" << PAGESIZE;
}
const int offset = (char*)aligned_mem - (char*)mem;
if (guardsize <= offset || if (guardsize <= offset ||
mprotect(aligned_mem, guardsize - offset, PROT_NONE) != 0) { mprotect(aligned_mem, guardsize - offset, PROT_NONE) != 0) {
munmap(mem, memsize); munmap(mem, memsize);
return NULL; PLOG_EVERY_SECOND(ERROR)
<< "Fail to mprotect " << (void*)aligned_mem << " length="
<< guardsize - offset;
return -1;
}
s_stack_count.fetch_add(1, base::memory_order_relaxed);
s->bottom = (char*)mem + memsize;
s->stacksize = stacksize;
s->guardsize = guardsize;
if (RunningOnValgrind()) {
s->valgrind_stack_id = VALGRIND_STACK_REGISTER(
s->bottom, (char*)s->bottom - stacksize);
} else {
s->valgrind_stack_id = 0;
} }
return 0;
stack_count() << 1;
*inout_stacksize = stacksize;
*inout_guardsize = guardsize;
return (char*)mem + memsize;
} }
} }
void deallocate_stack(void* mem, int stacksize, int guardsize) { void deallocate_stack_storage(StackStorage* s) {
const int memsize = stacksize + guardsize; if (RunningOnValgrind()) {
if (FLAGS_guard_page_size <= 0) { VALGRIND_STACK_DEREGISTER(s->valgrind_stack_id);
if ((char*)mem > (char*)NULL + memsize) { }
stack_count() << -1; const int memsize = s->stacksize + s->guardsize;
free((char*)mem - memsize); if ((char*)s->bottom <= (char*)NULL + memsize) {
} return;
}
s_stack_count.fetch_sub(1, base::memory_order_relaxed);
if (s->guardsize <= 0) {
free((char*)s->bottom - memsize);
} else { } else {
if ((char*)mem > (char*)NULL + memsize) { munmap((char*)s->bottom - memsize, memsize);
stack_count() << -1;
munmap((char*)mem - memsize, memsize);
}
} }
} }
......
...@@ -12,19 +12,31 @@ ...@@ -12,19 +12,31 @@
#include "bthread/types.h" #include "bthread/types.h"
#include "bthread/context.h" // bthread_fcontext_t #include "bthread/context.h" // bthread_fcontext_t
#include "base/object_pool.h" #include "base/object_pool.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h" // RunningOnValgrind
#include "base/third_party/valgrind/valgrind.h" // VALGRIND_STACK_REGISTER
namespace bthread { namespace bthread {
struct StackContainer { struct StackStorage {
bthread_fcontext_t context; int stacksize;
int stacksize; int guardsize;
int guardsize; // Assume stack grows upwards.
void* stack; // http://www.boost.org/doc/libs/1_55_0/libs/context/doc/html/context/stack.html
int stacktype; void* bottom;
unsigned valgrind_stack_id; unsigned valgrind_stack_id;
// Clears all members.
void zeroize() {
stacksize = 0;
guardsize = 0;
bottom = NULL;
valgrind_stack_id = 0;
}
}; };
// Allocate a piece of stack.
int allocate_stack_storage(StackStorage* s, int stacksize, int guardsize);
// Deallocate a piece of stack. Parameters MUST be returned or set by the
// corresponding allocate_stack_storage() otherwise behavior is undefined.
void deallocate_stack_storage(StackStorage* s);
enum StackType { enum StackType {
STACK_TYPE_MAIN = 0, STACK_TYPE_MAIN = 0,
...@@ -34,12 +46,20 @@ enum StackType { ...@@ -34,12 +46,20 @@ enum StackType {
STACK_TYPE_LARGE = BTHREAD_STACKTYPE_LARGE STACK_TYPE_LARGE = BTHREAD_STACKTYPE_LARGE
}; };
inline StackContainer* get_stack(StackType type, void (*entry)(intptr_t)); struct ContextualStack {
inline void return_stack(StackContainer* sc); bthread_fcontext_t context;
StackType stacktype;
StackStorage storage;
};
// Allocate/deallocate stacks with guard pages. // Get a stack in the `type' and run `entry' at the first time that the
void* allocate_stack(int* stacksize, int* guardsize); // stack is jumped.
void deallocate_stack(void* mem, int stacksize, int guardsize); ContextualStack* get_stack(StackType type, void (*entry)(intptr_t));
// Recycle a stack. NULL does nothing.
void return_stack(ContextualStack*);
// Jump from stack `from' to stack `to'. `from' must be the stack of callsite
// (to save contexts before jumping)
void jump_stack(ContextualStack* from, ContextualStack* to);
} // namespace bthread } // namespace bthread
......
...@@ -17,172 +17,164 @@ struct MainStackClass {}; ...@@ -17,172 +17,164 @@ struct MainStackClass {};
struct SmallStackClass { struct SmallStackClass {
static int* stack_size_flag; static int* stack_size_flag;
const static int stacktype = STACK_TYPE_SMALL; // Older gcc does not allow static const enum, use int instead.
static const int stacktype = (int)STACK_TYPE_SMALL;
}; };
struct NormalStackClass { struct NormalStackClass {
static int* stack_size_flag; static int* stack_size_flag;
const static int stacktype = STACK_TYPE_NORMAL; static const int stacktype = (int)STACK_TYPE_NORMAL;
}; };
struct LargeStackClass { struct LargeStackClass {
static int* stack_size_flag; static int* stack_size_flag;
const static int stacktype = STACK_TYPE_LARGE; static const int stacktype = (int)STACK_TYPE_LARGE;
}; };
template <typename StackClass> struct StackContainerFactory { template <typename StackClass> struct StackFactory {
struct Wrapper : public StackContainer { struct Wrapper : public ContextualStack {
explicit Wrapper(void (*entry)(intptr_t)) { explicit Wrapper(void (*entry)(intptr_t)) {
stacksize = *StackClass::stack_size_flag; if (allocate_stack_storage(&storage, *StackClass::stack_size_flag,
guardsize = FLAGS_guard_page_size; FLAGS_guard_page_size) != 0) {
stack = allocate_stack(&stacksize, &guardsize); storage.zeroize();
valgrind_stack_id = 0;
if (BAIDU_UNLIKELY(NULL == stack)) {
context = NULL; context = NULL;
return; return;
} }
// TODO: Growth direction of stack is arch-dependent(not handled by context = bthread_make_fcontext(storage.bottom, storage.stacksize, entry);
// fcontext). We temporarily assume stack grows upwards. stacktype = (StackType)StackClass::stacktype;
// http://www.boost.org/doc/libs/1_55_0/libs/context/doc/html/context/stack.html
if (RunningOnValgrind()) {
valgrind_stack_id = VALGRIND_STACK_REGISTER(
stack, (char*)stack - stacksize);
}
context = bthread_make_fcontext(stack, stacksize, entry);
stacktype = StackClass::stacktype;
} }
~Wrapper() { ~Wrapper() {
if (stack) { if (context) {
if (RunningOnValgrind()) { context = NULL;
VALGRIND_STACK_DEREGISTER(valgrind_stack_id); deallocate_stack_storage(&storage);
valgrind_stack_id = 0; storage.zeroize();
}
deallocate_stack(stack, stacksize, guardsize);
stack = NULL;
} }
context = NULL;
} }
}; };
static StackContainer* get_stack(void (*entry)(intptr_t)) { static ContextualStack* get_stack(void (*entry)(intptr_t)) {
return base::get_object<Wrapper>(entry); return base::get_object<Wrapper>(entry);
} }
static void return_stack(StackContainer* sc) { static void return_stack(ContextualStack* sc) {
base::return_object(static_cast<Wrapper*>(sc)); base::return_object(static_cast<Wrapper*>(sc));
} }
}; };
template <> struct StackContainerFactory<MainStackClass> { template <> struct StackFactory<MainStackClass> {
static StackContainer* get_stack(void (*)(intptr_t)) { static ContextualStack* get_stack(void (*)(intptr_t)) {
StackContainer* sc = new (std::nothrow) StackContainer; ContextualStack* s = new (std::nothrow) ContextualStack;
if (NULL == sc) { if (NULL == s) {
return NULL; return NULL;
} }
sc->stacksize = 0; s->context = NULL;
sc->guardsize = 0; s->stacktype = STACK_TYPE_MAIN;
sc->stacktype = STACK_TYPE_MAIN; s->storage.zeroize();
return sc; return s;
} }
static void return_stack(StackContainer* sc) { static void return_stack(ContextualStack* s) {
delete sc; delete s;
} }
}; };
StackContainer* get_stack(StackType type, void (*entry)(intptr_t)) { inline ContextualStack* get_stack(StackType type, void (*entry)(intptr_t)) {
switch (type) { switch (type) {
case STACK_TYPE_PTHREAD: case STACK_TYPE_PTHREAD:
return NULL; return NULL;
case STACK_TYPE_SMALL: case STACK_TYPE_SMALL:
return StackContainerFactory<SmallStackClass>::get_stack(entry); return StackFactory<SmallStackClass>::get_stack(entry);
case STACK_TYPE_NORMAL: case STACK_TYPE_NORMAL:
return StackContainerFactory<NormalStackClass>::get_stack(entry); return StackFactory<NormalStackClass>::get_stack(entry);
case STACK_TYPE_LARGE: case STACK_TYPE_LARGE:
return StackContainerFactory<LargeStackClass>::get_stack(entry); return StackFactory<LargeStackClass>::get_stack(entry);
case STACK_TYPE_MAIN: case STACK_TYPE_MAIN:
return StackContainerFactory<MainStackClass>::get_stack(entry); return StackFactory<MainStackClass>::get_stack(entry);
} }
return NULL; return NULL;
} }
void return_stack(StackContainer* sc) { inline void return_stack(ContextualStack* s) {
if (NULL == sc) { if (NULL == s) {
return; return;
} }
switch (sc->stacktype) { switch (s->stacktype) {
case STACK_TYPE_PTHREAD: case STACK_TYPE_PTHREAD:
assert(false); assert(false);
return; return;
case STACK_TYPE_SMALL: case STACK_TYPE_SMALL:
return StackContainerFactory<SmallStackClass>::return_stack(sc); return StackFactory<SmallStackClass>::return_stack(s);
case STACK_TYPE_NORMAL: case STACK_TYPE_NORMAL:
return StackContainerFactory<NormalStackClass>::return_stack(sc); return StackFactory<NormalStackClass>::return_stack(s);
case STACK_TYPE_LARGE: case STACK_TYPE_LARGE:
return StackContainerFactory<LargeStackClass>::return_stack(sc); return StackFactory<LargeStackClass>::return_stack(s);
case STACK_TYPE_MAIN: case STACK_TYPE_MAIN:
return StackContainerFactory<MainStackClass>::return_stack(sc); return StackFactory<MainStackClass>::return_stack(s);
} }
} }
inline void jump_stack(ContextualStack* from, ContextualStack* to) {
bthread_jump_fcontext(&from->context, to->context, 0/*not skip remained*/);
}
} // namespace bthread } // namespace bthread
namespace base { namespace base {
template <> struct ObjectPoolBlockMaxItem< template <> struct ObjectPoolBlockMaxItem<
bthread::StackContainerFactory<bthread::LargeStackClass>::Wrapper> { bthread::StackFactory<bthread::LargeStackClass>::Wrapper> {
static const size_t value = 64; static const size_t value = 64;
}; };
template <> struct ObjectPoolBlockMaxItem< template <> struct ObjectPoolBlockMaxItem<
bthread::StackContainerFactory<bthread::NormalStackClass>::Wrapper> { bthread::StackFactory<bthread::NormalStackClass>::Wrapper> {
static const size_t value = 64; static const size_t value = 64;
}; };
template <> struct ObjectPoolBlockMaxItem< template <> struct ObjectPoolBlockMaxItem<
bthread::StackContainerFactory<bthread::SmallStackClass>::Wrapper> { bthread::StackFactory<bthread::SmallStackClass>::Wrapper> {
static const size_t value = 64; static const size_t value = 64;
}; };
template <> struct ObjectPoolFreeChunkMaxItem< template <> struct ObjectPoolFreeChunkMaxItem<
bthread::StackContainerFactory<bthread::SmallStackClass>::Wrapper> { bthread::StackFactory<bthread::SmallStackClass>::Wrapper> {
inline static size_t value() { inline static size_t value() {
return (FLAGS_tc_stack_small <= 0 ? 0 : FLAGS_tc_stack_small); return (FLAGS_tc_stack_small <= 0 ? 0 : FLAGS_tc_stack_small);
} }
}; };
template <> struct ObjectPoolFreeChunkMaxItem< template <> struct ObjectPoolFreeChunkMaxItem<
bthread::StackContainerFactory<bthread::NormalStackClass>::Wrapper> { bthread::StackFactory<bthread::NormalStackClass>::Wrapper> {
inline static size_t value() { inline static size_t value() {
return (FLAGS_tc_stack_normal <= 0 ? 0 : FLAGS_tc_stack_normal); return (FLAGS_tc_stack_normal <= 0 ? 0 : FLAGS_tc_stack_normal);
} }
}; };
template <> struct ObjectPoolFreeChunkMaxItem< template <> struct ObjectPoolFreeChunkMaxItem<
bthread::StackContainerFactory<bthread::LargeStackClass>::Wrapper> { bthread::StackFactory<bthread::LargeStackClass>::Wrapper> {
inline static size_t value() { return 1UL; } inline static size_t value() { return 1UL; }
}; };
template <> struct ObjectPoolValidator< template <> struct ObjectPoolValidator<
bthread::StackContainerFactory<bthread::LargeStackClass>::Wrapper> { bthread::StackFactory<bthread::LargeStackClass>::Wrapper> {
inline static bool validate( inline static bool validate(
const bthread::StackContainerFactory<bthread::LargeStackClass>::Wrapper* w) { const bthread::StackFactory<bthread::LargeStackClass>::Wrapper* w) {
return w->stack != NULL; return w->context != NULL;
} }
}; };
template <> struct ObjectPoolValidator< template <> struct ObjectPoolValidator<
bthread::StackContainerFactory<bthread::NormalStackClass>::Wrapper> { bthread::StackFactory<bthread::NormalStackClass>::Wrapper> {
inline static bool validate( inline static bool validate(
const bthread::StackContainerFactory<bthread::NormalStackClass>::Wrapper* w) { const bthread::StackFactory<bthread::NormalStackClass>::Wrapper* w) {
return w->stack != NULL; return w->context != NULL;
} }
}; };
template <> struct ObjectPoolValidator< template <> struct ObjectPoolValidator<
bthread::StackContainerFactory<bthread::SmallStackClass>::Wrapper> { bthread::StackFactory<bthread::SmallStackClass>::Wrapper> {
inline static bool validate( inline static bool validate(
const bthread::StackContainerFactory<bthread::SmallStackClass>::Wrapper* w) { const bthread::StackFactory<bthread::SmallStackClass>::Wrapper* w) {
return w->stack != NULL; return w->context != NULL;
} }
}; };
......
...@@ -168,7 +168,7 @@ void TaskGroup::run_main_task() { ...@@ -168,7 +168,7 @@ void TaskGroup::run_main_task() {
while (wait_task(&tid)) { while (wait_task(&tid)) {
TaskGroup::sched_to(&dummy, tid); TaskGroup::sched_to(&dummy, tid);
DCHECK_EQ(this, dummy); DCHECK_EQ(this, dummy);
DCHECK_EQ(_cur_meta->stack_container, _main_stack_container); DCHECK_EQ(_cur_meta->stack, _main_stack);
if (_cur_meta->tid != _main_tid) { if (_cur_meta->tid != _main_tid) {
TaskGroup::task_runner(1/*skip remained*/); TaskGroup::task_runner(1/*skip remained*/);
} }
...@@ -200,7 +200,7 @@ TaskGroup::TaskGroup(TaskControl* c) ...@@ -200,7 +200,7 @@ TaskGroup::TaskGroup(TaskControl* c)
, _last_context_remained(NULL) , _last_context_remained(NULL)
, _last_context_remained_arg(NULL) , _last_context_remained_arg(NULL)
, _pl(NULL) , _pl(NULL)
, _main_stack_container(NULL) , _main_stack(NULL)
, _main_tid(0) , _main_tid(0)
, _remote_num_nosignal(0) , _remote_num_nosignal(0)
, _remote_nsignaled(0) , _remote_nsignaled(0)
...@@ -214,7 +214,7 @@ TaskGroup::TaskGroup(TaskControl* c) ...@@ -214,7 +214,7 @@ TaskGroup::TaskGroup(TaskControl* c)
TaskGroup::~TaskGroup() { TaskGroup::~TaskGroup() {
if (_main_tid) { if (_main_tid) {
TaskMeta* m = address_meta(_main_tid); TaskMeta* m = address_meta(_main_tid);
CHECK(_main_stack_container == m->stack_container); CHECK(_main_stack == m->stack);
return_stack(m->release_stack()); return_stack(m->release_stack());
return_resource(get_slot(_main_tid)); return_resource(get_slot(_main_tid));
_main_tid = 0; _main_tid = 0;
...@@ -230,8 +230,8 @@ int TaskGroup::init(size_t runqueue_capacity) { ...@@ -230,8 +230,8 @@ int TaskGroup::init(size_t runqueue_capacity) {
LOG(FATAL) << "Fail to init _remote_rq"; LOG(FATAL) << "Fail to init _remote_rq";
return -1; return -1;
} }
StackContainer* sc = get_stack(STACK_TYPE_MAIN, NULL); ContextualStack* stk = get_stack(STACK_TYPE_MAIN, NULL);
if (NULL == sc) { if (NULL == stk) {
LOG(FATAL) << "Fail to get main stack container"; LOG(FATAL) << "Fail to get main stack container";
return -1; return -1;
} }
...@@ -258,11 +258,11 @@ int TaskGroup::init(size_t runqueue_capacity) { ...@@ -258,11 +258,11 @@ int TaskGroup::init(size_t runqueue_capacity) {
m->stat = EMPTY_STAT; m->stat = EMPTY_STAT;
m->attr = BTHREAD_ATTR_TASKGROUP; m->attr = BTHREAD_ATTR_TASKGROUP;
m->tid = make_tid(*m->version_butex, slot); m->tid = make_tid(*m->version_butex, slot);
m->set_stack(sc); m->set_stack(stk);
_cur_meta = m; _cur_meta = m;
_main_tid = m->tid; _main_tid = m->tid;
_main_stack_container = sc; _main_stack = stk;
_last_run_ns = base::cpuwide_time_ns(); _last_run_ns = base::cpuwide_time_ns();
return 0; return 0;
} }
...@@ -366,7 +366,7 @@ void TaskGroup::_release_last_context(void* arg) { ...@@ -366,7 +366,7 @@ void TaskGroup::_release_last_context(void* arg) {
if (m->stack_type() != STACK_TYPE_PTHREAD) { if (m->stack_type() != STACK_TYPE_PTHREAD) {
return_stack(m->release_stack()/*may be NULL*/); return_stack(m->release_stack()/*may be NULL*/);
} else { } else {
// it's _main_stack_container, don't return. // it's _main_stack, don't return.
m->set_stack(NULL); m->set_stack(NULL);
} }
return_resource(get_slot(m->tid)); return_resource(get_slot(m->tid));
...@@ -393,7 +393,7 @@ int TaskGroup::start_foreground(TaskGroup** pg, ...@@ -393,7 +393,7 @@ int TaskGroup::start_foreground(TaskGroup** pg,
m->about_to_quit = false; m->about_to_quit = false;
m->fn = fn; m->fn = fn;
m->arg = arg; m->arg = arg;
CHECK(m->stack_container == NULL); CHECK(m->stack == NULL);
m->attr = using_attr; m->attr = using_attr;
m->local_storage = LOCAL_STORAGE_INIT; m->local_storage = LOCAL_STORAGE_INIT;
m->cpuwide_start_ns = start_ns; m->cpuwide_start_ns = start_ns;
...@@ -441,7 +441,7 @@ int TaskGroup::start_background(bthread_t* __restrict th, ...@@ -441,7 +441,7 @@ int TaskGroup::start_background(bthread_t* __restrict th,
m->about_to_quit = false; m->about_to_quit = false;
m->fn = fn; m->fn = fn;
m->arg = arg; m->arg = arg;
CHECK(m->stack_container == NULL); CHECK(m->stack == NULL);
m->attr = using_attr; m->attr = using_attr;
m->local_storage = LOCAL_STORAGE_INIT; m->local_storage = LOCAL_STORAGE_INIT;
m->cpuwide_start_ns = start_ns; m->cpuwide_start_ns = start_ns;
...@@ -547,22 +547,22 @@ void TaskGroup::ending_sched(TaskGroup** pg) { ...@@ -547,22 +547,22 @@ void TaskGroup::ending_sched(TaskGroup** pg) {
TaskMeta* const cur_meta = g->_cur_meta; TaskMeta* const cur_meta = g->_cur_meta;
TaskMeta* next_meta = address_meta(next_tid); TaskMeta* next_meta = address_meta(next_tid);
if (next_meta->stack_container == NULL) { if (next_meta->stack == NULL) {
if (next_meta->stack_type() == cur_meta->stack_type()) { if (next_meta->stack_type() == cur_meta->stack_type()) {
// also works with pthread_task scheduling to pthread_task, the // also works with pthread_task scheduling to pthread_task, the
// transfered stack_container is just _main_stack_container. // transfered stack is just _main_stack.
next_meta->set_stack(cur_meta->release_stack()); next_meta->set_stack(cur_meta->release_stack());
} else { } else {
StackContainer* sc = get_stack(next_meta->stack_type(), task_runner); ContextualStack* stk = get_stack(next_meta->stack_type(), task_runner);
if (sc != NULL) { if (stk) {
next_meta->set_stack(sc); next_meta->set_stack(stk);
} else { } else {
// stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory, // stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory,
// In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD. // In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD.
// This basically means that if we can't allocate stack, run // This basically means that if we can't allocate stack, run
// the task in pthread directly. // the task in pthread directly.
next_meta->attr.stack_type = BTHREAD_STACKTYPE_PTHREAD; next_meta->attr.stack_type = BTHREAD_STACKTYPE_PTHREAD;
next_meta->set_stack(g->_main_stack_container); next_meta->set_stack(g->_main_stack);
} }
} }
} }
...@@ -616,19 +616,17 @@ void TaskGroup::sched_to(TaskGroup** pg, TaskMeta* next_meta) { ...@@ -616,19 +616,17 @@ void TaskGroup::sched_to(TaskGroup** pg, TaskMeta* next_meta) {
} }
g->_cur_meta = next_meta; g->_cur_meta = next_meta;
tls_bls = next_meta->local_storage; tls_bls = next_meta->local_storage;
if (cur_meta->stack_container != NULL) { if (cur_meta->stack != NULL) {
if (next_meta->stack_container != cur_meta->stack_container) { if (next_meta->stack != cur_meta->stack) {
bthread_jump_fcontext(&cur_meta->stack_container->context, jump_stack(cur_meta->stack, next_meta->stack);
next_meta->stack_container->context,
0/*not skip remained*/);
// probably went to another group, need to assign g again. // probably went to another group, need to assign g again.
g = tls_task_group; g = tls_task_group;
} }
#ifndef NDEBUG #ifndef NDEBUG
else { else {
// else pthread_task is switching to another pthread_task, sc // else pthread_task is switching to another pthread_task, sc
// can only equal when they're both _main_stack_container // can only equal when they're both _main_stack
CHECK(cur_meta->stack_container == g->_main_stack_container); CHECK(cur_meta->stack == g->_main_stack);
} }
#endif #endif
} }
......
...@@ -126,7 +126,7 @@ public: ...@@ -126,7 +126,7 @@ public:
bool is_current_main_task() const { return current_tid() == _main_tid; } bool is_current_main_task() const { return current_tid() == _main_tid; }
// True iff current task is in pthread-mode. // True iff current task is in pthread-mode.
bool is_current_pthread_task() const bool is_current_pthread_task() const
{ return _cur_meta->stack_container == _main_stack_container; } { return _cur_meta->stack == _main_stack; }
// Active time in nanoseconds spent by this TaskGroup. // Active time in nanoseconds spent by this TaskGroup.
int64_t cumulated_cputime_ns() const { return _cumulated_cputime_ns; } int64_t cumulated_cputime_ns() const { return _cumulated_cputime_ns; }
...@@ -225,7 +225,7 @@ friend class TaskControl; ...@@ -225,7 +225,7 @@ friend class TaskControl;
#endif #endif
size_t _steal_seed; size_t _steal_seed;
size_t _steal_offset; size_t _steal_offset;
StackContainer* _main_stack_container; ContextualStack* _main_stack;
bthread_t _main_tid; bthread_t _main_tid;
WorkStealingQueue<bthread_t> _rq; WorkStealingQueue<bthread_t> _rq;
RemoteTaskQueue _remote_rq; RemoteTaskQueue _remote_rq;
......
...@@ -45,17 +45,17 @@ inline void TaskGroup::exchange(TaskGroup** pg, bthread_t next_tid) { ...@@ -45,17 +45,17 @@ inline void TaskGroup::exchange(TaskGroup** pg, bthread_t next_tid) {
inline void TaskGroup::sched_to(TaskGroup** pg, bthread_t next_tid) { inline void TaskGroup::sched_to(TaskGroup** pg, bthread_t next_tid) {
TaskMeta* next_meta = address_meta(next_tid); TaskMeta* next_meta = address_meta(next_tid);
if (next_meta->stack_container == NULL) { if (next_meta->stack == NULL) {
StackContainer* sc = get_stack(next_meta->stack_type(), task_runner); ContextualStack* stk = get_stack(next_meta->stack_type(), task_runner);
if (sc != NULL) { if (stk) {
next_meta->set_stack(sc); next_meta->set_stack(stk);
} else { } else {
// stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory, // stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory,
// In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD. // In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD.
// This basically means that if we can't allocate stack, run // This basically means that if we can't allocate stack, run
// the task in pthread directly. // the task in pthread directly.
next_meta->attr.stack_type = BTHREAD_STACKTYPE_PTHREAD; next_meta->attr.stack_type = BTHREAD_STACKTYPE_PTHREAD;
next_meta->set_stack((*pg)->_main_stack_container); next_meta->set_stack((*pg)->_main_stack);
} }
} }
// Update now_ns only when wait_task did yield. // Update now_ns only when wait_task did yield.
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "bthread/butex.h" // butex_construct/destruct #include "bthread/butex.h" // butex_construct/destruct
#include "base/atomicops.h" // base::atomic #include "base/atomicops.h" // base::atomic
#include "bthread/types.h" // bthread_attr_t #include "bthread/types.h" // bthread_attr_t
#include "bthread/stack.h" // Context, StackContainer #include "bthread/stack.h" // ContextualStack
namespace bthread { namespace bthread {
...@@ -56,7 +56,8 @@ struct TaskMeta { ...@@ -56,7 +56,8 @@ struct TaskMeta {
void* (*fn)(void*); void* (*fn)(void*);
void* arg; void* arg;
StackContainer* stack_container; // Stack of this task.
ContextualStack* stack;
// Attributes creating this task // Attributes creating this task
bthread_attr_t attr; bthread_attr_t attr;
...@@ -74,7 +75,7 @@ public: ...@@ -74,7 +75,7 @@ public:
TaskMeta() TaskMeta()
: current_waiter(NULL) : current_waiter(NULL)
, current_sleep(0) , current_sleep(0)
, stack_container(NULL) { , stack(NULL) {
pthread_spin_init(&version_lock, 0); pthread_spin_init(&version_lock, 0);
version_butex = butex_create_checked<uint32_t>(); version_butex = butex_create_checked<uint32_t>();
*version_butex = 1; *version_butex = 1;
...@@ -86,13 +87,13 @@ public: ...@@ -86,13 +87,13 @@ public:
pthread_spin_destroy(&version_lock); pthread_spin_destroy(&version_lock);
} }
void set_stack(StackContainer* sc) { void set_stack(ContextualStack* s) {
stack_container = sc; stack = s;
} }
StackContainer* release_stack() { ContextualStack* release_stack() {
StackContainer* tmp = stack_container; ContextualStack* tmp = stack;
stack_container = NULL; stack = NULL;
return tmp; return tmp;
} }
......
...@@ -80,7 +80,7 @@ TEST(CondTest, sanity) { ...@@ -80,7 +80,7 @@ TEST(CondTest, sanity) {
bthread_t sth; bthread_t sth;
ASSERT_EQ(0, bthread_start_urgent(&sth, NULL, signaler, &a)); ASSERT_EQ(0, bthread_start_urgent(&sth, NULL, signaler, &a));
bthread_usleep(SIGNAL_INTERVAL_US * 300); bthread_usleep(SIGNAL_INTERVAL_US * 200);
pthread_mutex_lock(&wake_mutex); pthread_mutex_lock(&wake_mutex);
const size_t nbeforestop = wake_time.size(); const size_t nbeforestop = wake_time.size();
...@@ -336,11 +336,11 @@ void* disturb_thread(void* arg) { ...@@ -336,11 +336,11 @@ void* disturb_thread(void* arg) {
return NULL; return NULL;
} }
TEST(CondTest, mix_usage) { TEST(CondTest, mixed_usage) {
BroadcastArg ba; BroadcastArg ba;
ba.nwaiter = 0; ba.nwaiter = 0;
ba.cur_waiter = 0; ba.cur_waiter = 0;
ba.rounds = 100000; ba.rounds = 30000;
const int NTHREADS = 10; const int NTHREADS = 10;
ba.nwaiter = NTHREADS * 2; ba.nwaiter = NTHREADS * 2;
...@@ -424,7 +424,7 @@ void* wait_cond_thread(void* arg) { ...@@ -424,7 +424,7 @@ void* wait_cond_thread(void* arg) {
return NULL; return NULL;
} }
TEST(CondTest, too_many_bthread) { TEST(CondTest, too_many_bthreads) {
std::vector<bthread_t> th; std::vector<bthread_t> th;
th.resize(32768); th.resize(32768);
BthreadCond c; BthreadCond c;
...@@ -435,7 +435,7 @@ TEST(CondTest, too_many_bthread) { ...@@ -435,7 +435,7 @@ TEST(CondTest, too_many_bthread) {
bthread_start_background(&th[i], NULL, usleep_thread, NULL); bthread_start_background(&th[i], NULL, usleep_thread, NULL);
} }
c.Signal(); c.Signal();
usleep(1 * 1000 * 1000L); usleep(3 * 1000 * 1000L);
g_stop = true; g_stop = true;
bthread_join(tid, NULL); bthread_join(tid, NULL);
ASSERT_TRUE(started_wait); ASSERT_TRUE(started_wait);
......
...@@ -57,4 +57,3 @@ if [ -z "$DO_RUN" ]; then ...@@ -57,4 +57,3 @@ if [ -z "$DO_RUN" ]; then
echo "*** This is a dry-run. To really apply, run: DO_RUN=1 tools/patch_from_svn $1" echo "*** This is a dry-run. To really apply, run: DO_RUN=1 tools/patch_from_svn $1"
fi fi
patch -p0 -u $EXTRA_ARGS < $MODIFIED_PATCHFILE patch -p0 -u $EXTRA_ARGS < $MODIFIED_PATCHFILE
rm $MODIFIED_PATCHFILE
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment