Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
B
brpc
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
brpc
Commits
83d6540d
Commit
83d6540d
authored
Aug 24, 2017
by
gejun
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Patch svn r35154
parent
9554a130
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
209 additions
and
172 deletions
+209
-172
stack.cpp
src/bthread/stack.cpp
+76
-49
stack.h
src/bthread/stack.h
+33
-13
stack_inl.h
src/bthread/stack_inl.h
+58
-66
task_group.cpp
src/bthread/task_group.cpp
+21
-23
task_group.h
src/bthread/task_group.h
+2
-2
task_group_inl.h
src/bthread/task_group_inl.h
+5
-5
task_meta.h
src/bthread/task_meta.h
+9
-8
bthread_cond_unittest.cpp
test/bthread_cond_unittest.cpp
+5
-5
patch_from_svn
tools/patch_from_svn
+0
-1
No files found.
src/bthread/stack.cpp
View file @
83d6540d
...
...
@@ -10,7 +10,9 @@
#include <stdlib.h> // posix_memalign
#include "base/macros.h" // BAIDU_CASSERT
#include "base/memory/singleton_on_pthread_once.h"
#include "bvar/reducer.h" // bvar::Adder
#include "base/third_party/dynamic_annotations/dynamic_annotations.h" // RunningOnValgrind
#include "base/third_party/valgrind/valgrind.h" // VALGRIND_STACK_REGISTER
#include "bvar/passive_status.h"
#include "bthread/types.h" // BTHREAD_STACKTYPE_*
#include "bthread/stack.h"
...
...
@@ -29,78 +31,103 @@ BAIDU_CASSERT(BTHREAD_STACKTYPE_NORMAL == STACK_TYPE_NORMAL, must_match);
BAIDU_CASSERT
(
BTHREAD_STACKTYPE_LARGE
==
STACK_TYPE_LARGE
,
must_match
);
BAIDU_CASSERT
(
STACK_TYPE_MAIN
==
0
,
must_be_0
);
extern
const
int
PAGESIZE
=
getpagesize
();
extern
const
int
PAGESIZE_M1
=
PAGESIZE
-
1
;
const
int
MIN_STACKSIZE
=
PAGESIZE
*
2
;
const
int
MIN_GUARDSIZE
=
PAGESIZE
;
struct
StackCount
:
public
bvar
::
Adder
<
int64_t
>
{
StackCount
()
:
bvar
::
Adder
<
int64_t
>
(
"bthread_stack_count"
)
{}
};
inline
bvar
::
Adder
<
int64_t
>&
stack_count
()
{
return
*
base
::
get_leaky_singleton
<
StackCount
>
();
static
base
::
static_atomic
<
int64_t
>
s_stack_count
=
BASE_STATIC_ATOMIC_INIT
(
0
);
static
int64_t
get_stack_count
(
void
*
)
{
return
s_stack_count
.
load
(
base
::
memory_order_relaxed
);
}
void
*
allocate_stack
(
int
*
inout_stacksize
,
int
*
inout_guardsize
)
{
// Align stack and guard size.
int
stacksize
=
(
std
::
max
(
*
inout_stacksize
,
MIN_STACKSIZE
)
+
PAGESIZE_M1
)
&
~
PAGESIZE_M1
;
int
guardsize
=
(
std
::
max
(
*
inout_guardsize
,
MIN_GUARDSIZE
)
+
PAGESIZE_M1
)
&
static
bvar
::
PassiveStatus
<
int64_t
>
bvar_stack_count
(
"bthread_stack_count"
,
get_stack_count
,
NULL
);
int
allocate_stack_storage
(
StackStorage
*
s
,
int
stacksize_in
,
int
guardsize_in
)
{
const
static
int
PAGESIZE
=
getpagesize
();
const
int
PAGESIZE_M1
=
PAGESIZE
-
1
;
const
int
MIN_STACKSIZE
=
PAGESIZE
*
2
;
const
int
MIN_GUARDSIZE
=
PAGESIZE
;
// Align stacksize
const
int
stacksize
=
(
std
::
max
(
stacksize_in
,
MIN_STACKSIZE
)
+
PAGESIZE_M1
)
&
~
PAGESIZE_M1
;
if
(
FLAGS_guard_page_size
<=
0
)
{
if
(
guardsize_in
<=
0
)
{
void
*
mem
=
malloc
(
stacksize
);
if
(
NULL
==
mem
)
{
return
NULL
;
PLOG_EVERY_SECOND
(
ERROR
)
<<
"Fail to malloc (size="
<<
stacksize
<<
")"
;
return
-
1
;
}
stack_count
()
<<
1
;
*
inout_stacksize
=
stacksize
;
*
inout_guardsize
=
0
;
return
(
char
*
)
mem
+
stacksize
;
s_stack_count
.
fetch_add
(
1
,
base
::
memory_order_relaxed
);
s
->
bottom
=
(
char
*
)
mem
+
stacksize
;
s
->
stacksize
=
stacksize
;
s
->
guardsize
=
0
;
if
(
RunningOnValgrind
())
{
s
->
valgrind_stack_id
=
VALGRIND_STACK_REGISTER
(
s
->
bottom
,
(
char
*
)
s
->
bottom
-
stacksize
);
}
else
{
s
->
valgrind_stack_id
=
0
;
}
return
0
;
}
else
{
// Align guardsize
const
int
guardsize
=
(
std
::
max
(
guardsize_in
,
MIN_GUARDSIZE
)
+
PAGESIZE_M1
)
&
~
PAGESIZE_M1
;
const
int
memsize
=
stacksize
+
guardsize
;
void
*
const
mem
=
mmap
(
NULL
,
memsize
,
(
PROT_READ
|
PROT_WRITE
),
(
MAP_PRIVATE
|
MAP_ANONYMOUS
),
-
1
,
0
);
if
(
MAP_FAILED
==
mem
)
{
PLOG_EVERY_SECOND
(
ERROR
)
<<
"Fail to mmap, which is likely to be limited by the value"
" in /proc/sys/vm/max_map_count"
;
<<
"Fail to mmap size="
<<
memsize
<<
" stack_count="
<<
s_stack_count
.
load
(
base
::
memory_order_relaxed
)
<<
", possibly limited by /proc/sys/vm/max_map_count"
;
// may fail due to limit of max_map_count (65536 in default)
return
NULL
;
return
-
1
;
}
char
*
aligned_mem
=
(
char
*
)(((
intptr_t
)
mem
+
PAGESIZE_M1
)
&
~
PAGESIZE_M1
);
const
int
offset
=
aligned_mem
-
(
char
*
)
mem
;
void
*
aligned_mem
=
(
void
*
)(((
intptr_t
)
mem
+
PAGESIZE_M1
)
&
~
PAGESIZE_M1
);
if
(
aligned_mem
!=
mem
)
{
LOG_ONCE
(
ERROR
)
<<
"addr="
<<
mem
<<
" returned by mmap is not "
"aligned by pagesize="
<<
PAGESIZE
;
}
const
int
offset
=
(
char
*
)
aligned_mem
-
(
char
*
)
mem
;
if
(
guardsize
<=
offset
||
mprotect
(
aligned_mem
,
guardsize
-
offset
,
PROT_NONE
)
!=
0
)
{
munmap
(
mem
,
memsize
);
return
NULL
;
PLOG_EVERY_SECOND
(
ERROR
)
<<
"Fail to mprotect "
<<
(
void
*
)
aligned_mem
<<
" length="
<<
guardsize
-
offset
;
return
-
1
;
}
s_stack_count
.
fetch_add
(
1
,
base
::
memory_order_relaxed
);
s
->
bottom
=
(
char
*
)
mem
+
memsize
;
s
->
stacksize
=
stacksize
;
s
->
guardsize
=
guardsize
;
if
(
RunningOnValgrind
())
{
s
->
valgrind_stack_id
=
VALGRIND_STACK_REGISTER
(
s
->
bottom
,
(
char
*
)
s
->
bottom
-
stacksize
);
}
else
{
s
->
valgrind_stack_id
=
0
;
}
stack_count
()
<<
1
;
*
inout_stacksize
=
stacksize
;
*
inout_guardsize
=
guardsize
;
return
(
char
*
)
mem
+
memsize
;
return
0
;
}
}
void
deallocate_stack
(
void
*
mem
,
int
stacksize
,
int
guardsize
)
{
const
int
memsize
=
stacksize
+
guardsize
;
if
(
FLAGS_guard_page_size
<=
0
)
{
if
((
char
*
)
mem
>
(
char
*
)
NULL
+
memsize
)
{
stack_count
()
<<
-
1
;
free
((
char
*
)
mem
-
memsize
);
}
void
deallocate_stack_storage
(
StackStorage
*
s
)
{
if
(
RunningOnValgrind
())
{
VALGRIND_STACK_DEREGISTER
(
s
->
valgrind_stack_id
);
}
const
int
memsize
=
s
->
stacksize
+
s
->
guardsize
;
if
((
char
*
)
s
->
bottom
<=
(
char
*
)
NULL
+
memsize
)
{
return
;
}
s_stack_count
.
fetch_sub
(
1
,
base
::
memory_order_relaxed
);
if
(
s
->
guardsize
<=
0
)
{
free
((
char
*
)
s
->
bottom
-
memsize
);
}
else
{
if
((
char
*
)
mem
>
(
char
*
)
NULL
+
memsize
)
{
stack_count
()
<<
-
1
;
munmap
((
char
*
)
mem
-
memsize
,
memsize
);
}
munmap
((
char
*
)
s
->
bottom
-
memsize
,
memsize
);
}
}
...
...
src/bthread/stack.h
View file @
83d6540d
...
...
@@ -12,19 +12,31 @@
#include "bthread/types.h"
#include "bthread/context.h" // bthread_fcontext_t
#include "base/object_pool.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h" // RunningOnValgrind
#include "base/third_party/valgrind/valgrind.h" // VALGRIND_STACK_REGISTER
namespace
bthread
{
struct
Stack
Container
{
bthread_fcontext_t
context
;
int
stack
size
;
int
guardsize
;
void
*
stack
;
int
stacktype
;
struct
Stack
Storage
{
int
stacksize
;
int
guard
size
;
// Assume stack grows upwards.
// http://www.boost.org/doc/libs/1_55_0/libs/context/doc/html/context/stack.html
void
*
bottom
;
unsigned
valgrind_stack_id
;
// Clears all members.
void
zeroize
()
{
stacksize
=
0
;
guardsize
=
0
;
bottom
=
NULL
;
valgrind_stack_id
=
0
;
}
};
// Allocate a piece of stack.
int
allocate_stack_storage
(
StackStorage
*
s
,
int
stacksize
,
int
guardsize
);
// Deallocate a piece of stack. Parameters MUST be returned or set by the
// corresponding allocate_stack_storage() otherwise behavior is undefined.
void
deallocate_stack_storage
(
StackStorage
*
s
);
enum
StackType
{
STACK_TYPE_MAIN
=
0
,
...
...
@@ -34,12 +46,20 @@ enum StackType {
STACK_TYPE_LARGE
=
BTHREAD_STACKTYPE_LARGE
};
inline
StackContainer
*
get_stack
(
StackType
type
,
void
(
*
entry
)(
intptr_t
));
inline
void
return_stack
(
StackContainer
*
sc
);
struct
ContextualStack
{
bthread_fcontext_t
context
;
StackType
stacktype
;
StackStorage
storage
;
};
// Allocate/deallocate stacks with guard pages.
void
*
allocate_stack
(
int
*
stacksize
,
int
*
guardsize
);
void
deallocate_stack
(
void
*
mem
,
int
stacksize
,
int
guardsize
);
// Get a stack in the `type' and run `entry' at the first time that the
// stack is jumped.
ContextualStack
*
get_stack
(
StackType
type
,
void
(
*
entry
)(
intptr_t
));
// Recycle a stack. NULL does nothing.
void
return_stack
(
ContextualStack
*
);
// Jump from stack `from' to stack `to'. `from' must be the stack of callsite
// (to save contexts before jumping)
void
jump_stack
(
ContextualStack
*
from
,
ContextualStack
*
to
);
}
// namespace bthread
...
...
src/bthread/stack_inl.h
View file @
83d6540d
...
...
@@ -17,172 +17,164 @@ struct MainStackClass {};
struct
SmallStackClass
{
static
int
*
stack_size_flag
;
const
static
int
stacktype
=
STACK_TYPE_SMALL
;
// Older gcc does not allow static const enum, use int instead.
static
const
int
stacktype
=
(
int
)
STACK_TYPE_SMALL
;
};
struct
NormalStackClass
{
static
int
*
stack_size_flag
;
const
static
int
stacktype
=
STACK_TYPE_NORMAL
;
static
const
int
stacktype
=
(
int
)
STACK_TYPE_NORMAL
;
};
struct
LargeStackClass
{
static
int
*
stack_size_flag
;
const
static
int
stacktype
=
STACK_TYPE_LARGE
;
static
const
int
stacktype
=
(
int
)
STACK_TYPE_LARGE
;
};
template
<
typename
StackClass
>
struct
Stack
Container
Factory
{
struct
Wrapper
:
public
StackContainer
{
template
<
typename
StackClass
>
struct
StackFactory
{
struct
Wrapper
:
public
ContextualStack
{
explicit
Wrapper
(
void
(
*
entry
)(
intptr_t
))
{
stacksize
=
*
StackClass
::
stack_size_flag
;
guardsize
=
FLAGS_guard_page_size
;
stack
=
allocate_stack
(
&
stacksize
,
&
guardsize
);
valgrind_stack_id
=
0
;
if
(
BAIDU_UNLIKELY
(
NULL
==
stack
))
{
if
(
allocate_stack_storage
(
&
storage
,
*
StackClass
::
stack_size_flag
,
FLAGS_guard_page_size
)
!=
0
)
{
storage
.
zeroize
();
context
=
NULL
;
return
;
}
// TODO: Growth direction of stack is arch-dependent(not handled by
// fcontext). We temporarily assume stack grows upwards.
// http://www.boost.org/doc/libs/1_55_0/libs/context/doc/html/context/stack.html
if
(
RunningOnValgrind
())
{
valgrind_stack_id
=
VALGRIND_STACK_REGISTER
(
stack
,
(
char
*
)
stack
-
stacksize
);
}
context
=
bthread_make_fcontext
(
stack
,
stacksize
,
entry
);
stacktype
=
StackClass
::
stacktype
;
context
=
bthread_make_fcontext
(
storage
.
bottom
,
storage
.
stacksize
,
entry
);
stacktype
=
(
StackType
)
StackClass
::
stacktype
;
}
~
Wrapper
()
{
if
(
stack
)
{
if
(
RunningOnValgrind
())
{
VALGRIND_STACK_DEREGISTER
(
valgrind_stack_id
);
valgrind_stack_id
=
0
;
}
deallocate_stack
(
stack
,
stacksize
,
guardsize
);
stack
=
NULL
;
if
(
context
)
{
context
=
NULL
;
deallocate_stack_storage
(
&
storage
);
storage
.
zeroize
();
}
context
=
NULL
;
}
};
static
StackContainer
*
get_stack
(
void
(
*
entry
)(
intptr_t
))
{
static
ContextualStack
*
get_stack
(
void
(
*
entry
)(
intptr_t
))
{
return
base
::
get_object
<
Wrapper
>
(
entry
);
}
static
void
return_stack
(
StackContainer
*
sc
)
{
static
void
return_stack
(
ContextualStack
*
sc
)
{
base
::
return_object
(
static_cast
<
Wrapper
*>
(
sc
));
}
};
template
<>
struct
Stack
Container
Factory
<
MainStackClass
>
{
static
StackContainer
*
get_stack
(
void
(
*
)(
intptr_t
))
{
StackContainer
*
sc
=
new
(
std
::
nothrow
)
StackContainer
;
if
(
NULL
==
s
c
)
{
template
<>
struct
StackFactory
<
MainStackClass
>
{
static
ContextualStack
*
get_stack
(
void
(
*
)(
intptr_t
))
{
ContextualStack
*
s
=
new
(
std
::
nothrow
)
ContextualStack
;
if
(
NULL
==
s
)
{
return
NULL
;
}
s
c
->
stacksize
=
0
;
s
c
->
guardsize
=
0
;
s
c
->
stacktype
=
STACK_TYPE_MAIN
;
return
s
c
;
s
->
context
=
NULL
;
s
->
stacktype
=
STACK_TYPE_MAIN
;
s
->
storage
.
zeroize
()
;
return
s
;
}
static
void
return_stack
(
StackContainer
*
sc
)
{
delete
s
c
;
static
void
return_stack
(
ContextualStack
*
s
)
{
delete
s
;
}
};
StackContainer
*
get_stack
(
StackType
type
,
void
(
*
entry
)(
intptr_t
))
{
inline
ContextualStack
*
get_stack
(
StackType
type
,
void
(
*
entry
)(
intptr_t
))
{
switch
(
type
)
{
case
STACK_TYPE_PTHREAD
:
return
NULL
;
case
STACK_TYPE_SMALL
:
return
Stack
Container
Factory
<
SmallStackClass
>::
get_stack
(
entry
);
return
StackFactory
<
SmallStackClass
>::
get_stack
(
entry
);
case
STACK_TYPE_NORMAL
:
return
Stack
Container
Factory
<
NormalStackClass
>::
get_stack
(
entry
);
return
StackFactory
<
NormalStackClass
>::
get_stack
(
entry
);
case
STACK_TYPE_LARGE
:
return
Stack
Container
Factory
<
LargeStackClass
>::
get_stack
(
entry
);
return
StackFactory
<
LargeStackClass
>::
get_stack
(
entry
);
case
STACK_TYPE_MAIN
:
return
Stack
Container
Factory
<
MainStackClass
>::
get_stack
(
entry
);
return
StackFactory
<
MainStackClass
>::
get_stack
(
entry
);
}
return
NULL
;
}
void
return_stack
(
StackContainer
*
sc
)
{
if
(
NULL
==
s
c
)
{
inline
void
return_stack
(
ContextualStack
*
s
)
{
if
(
NULL
==
s
)
{
return
;
}
switch
(
s
c
->
stacktype
)
{
switch
(
s
->
stacktype
)
{
case
STACK_TYPE_PTHREAD
:
assert
(
false
);
return
;
case
STACK_TYPE_SMALL
:
return
Stack
ContainerFactory
<
SmallStackClass
>::
return_stack
(
sc
);
return
Stack
Factory
<
SmallStackClass
>::
return_stack
(
s
);
case
STACK_TYPE_NORMAL
:
return
Stack
ContainerFactory
<
NormalStackClass
>::
return_stack
(
sc
);
return
Stack
Factory
<
NormalStackClass
>::
return_stack
(
s
);
case
STACK_TYPE_LARGE
:
return
Stack
ContainerFactory
<
LargeStackClass
>::
return_stack
(
sc
);
return
Stack
Factory
<
LargeStackClass
>::
return_stack
(
s
);
case
STACK_TYPE_MAIN
:
return
Stack
ContainerFactory
<
MainStackClass
>::
return_stack
(
sc
);
return
Stack
Factory
<
MainStackClass
>::
return_stack
(
s
);
}
}
inline
void
jump_stack
(
ContextualStack
*
from
,
ContextualStack
*
to
)
{
bthread_jump_fcontext
(
&
from
->
context
,
to
->
context
,
0
/*not skip remained*/
);
}
}
// namespace bthread
namespace
base
{
template
<>
struct
ObjectPoolBlockMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
static
const
size_t
value
=
64
;
};
template
<>
struct
ObjectPoolBlockMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
static
const
size_t
value
=
64
;
};
template
<>
struct
ObjectPoolBlockMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
static
const
size_t
value
=
64
;
};
template
<>
struct
ObjectPoolFreeChunkMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
inline
static
size_t
value
()
{
return
(
FLAGS_tc_stack_small
<=
0
?
0
:
FLAGS_tc_stack_small
);
}
};
template
<>
struct
ObjectPoolFreeChunkMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
inline
static
size_t
value
()
{
return
(
FLAGS_tc_stack_normal
<=
0
?
0
:
FLAGS_tc_stack_normal
);
}
};
template
<>
struct
ObjectPoolFreeChunkMaxItem
<
bthread
::
Stack
Container
Factory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
inline
static
size_t
value
()
{
return
1UL
;
}
};
template
<>
struct
ObjectPoolValidator
<
bthread
::
Stack
Container
Factory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
LargeStackClass
>::
Wrapper
>
{
inline
static
bool
validate
(
const
bthread
::
Stack
Container
Factory
<
bthread
::
LargeStackClass
>::
Wrapper
*
w
)
{
return
w
->
stack
!=
NULL
;
const
bthread
::
StackFactory
<
bthread
::
LargeStackClass
>::
Wrapper
*
w
)
{
return
w
->
context
!=
NULL
;
}
};
template
<>
struct
ObjectPoolValidator
<
bthread
::
Stack
Container
Factory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
NormalStackClass
>::
Wrapper
>
{
inline
static
bool
validate
(
const
bthread
::
Stack
Container
Factory
<
bthread
::
NormalStackClass
>::
Wrapper
*
w
)
{
return
w
->
stack
!=
NULL
;
const
bthread
::
StackFactory
<
bthread
::
NormalStackClass
>::
Wrapper
*
w
)
{
return
w
->
context
!=
NULL
;
}
};
template
<>
struct
ObjectPoolValidator
<
bthread
::
Stack
Container
Factory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
bthread
::
StackFactory
<
bthread
::
SmallStackClass
>::
Wrapper
>
{
inline
static
bool
validate
(
const
bthread
::
Stack
Container
Factory
<
bthread
::
SmallStackClass
>::
Wrapper
*
w
)
{
return
w
->
stack
!=
NULL
;
const
bthread
::
StackFactory
<
bthread
::
SmallStackClass
>::
Wrapper
*
w
)
{
return
w
->
context
!=
NULL
;
}
};
...
...
src/bthread/task_group.cpp
View file @
83d6540d
...
...
@@ -168,7 +168,7 @@ void TaskGroup::run_main_task() {
while
(
wait_task
(
&
tid
))
{
TaskGroup
::
sched_to
(
&
dummy
,
tid
);
DCHECK_EQ
(
this
,
dummy
);
DCHECK_EQ
(
_cur_meta
->
stack
_container
,
_main_stack_container
);
DCHECK_EQ
(
_cur_meta
->
stack
,
_main_stack
);
if
(
_cur_meta
->
tid
!=
_main_tid
)
{
TaskGroup
::
task_runner
(
1
/*skip remained*/
);
}
...
...
@@ -200,7 +200,7 @@ TaskGroup::TaskGroup(TaskControl* c)
,
_last_context_remained
(
NULL
)
,
_last_context_remained_arg
(
NULL
)
,
_pl
(
NULL
)
,
_main_stack
_container
(
NULL
)
,
_main_stack
(
NULL
)
,
_main_tid
(
0
)
,
_remote_num_nosignal
(
0
)
,
_remote_nsignaled
(
0
)
...
...
@@ -214,7 +214,7 @@ TaskGroup::TaskGroup(TaskControl* c)
TaskGroup
::~
TaskGroup
()
{
if
(
_main_tid
)
{
TaskMeta
*
m
=
address_meta
(
_main_tid
);
CHECK
(
_main_stack
_container
==
m
->
stack_container
);
CHECK
(
_main_stack
==
m
->
stack
);
return_stack
(
m
->
release_stack
());
return_resource
(
get_slot
(
_main_tid
));
_main_tid
=
0
;
...
...
@@ -230,8 +230,8 @@ int TaskGroup::init(size_t runqueue_capacity) {
LOG
(
FATAL
)
<<
"Fail to init _remote_rq"
;
return
-
1
;
}
StackContainer
*
sc
=
get_stack
(
STACK_TYPE_MAIN
,
NULL
);
if
(
NULL
==
s
c
)
{
ContextualStack
*
stk
=
get_stack
(
STACK_TYPE_MAIN
,
NULL
);
if
(
NULL
==
s
tk
)
{
LOG
(
FATAL
)
<<
"Fail to get main stack container"
;
return
-
1
;
}
...
...
@@ -258,11 +258,11 @@ int TaskGroup::init(size_t runqueue_capacity) {
m
->
stat
=
EMPTY_STAT
;
m
->
attr
=
BTHREAD_ATTR_TASKGROUP
;
m
->
tid
=
make_tid
(
*
m
->
version_butex
,
slot
);
m
->
set_stack
(
s
c
);
m
->
set_stack
(
s
tk
);
_cur_meta
=
m
;
_main_tid
=
m
->
tid
;
_main_stack
_container
=
sc
;
_main_stack
=
stk
;
_last_run_ns
=
base
::
cpuwide_time_ns
();
return
0
;
}
...
...
@@ -366,7 +366,7 @@ void TaskGroup::_release_last_context(void* arg) {
if
(
m
->
stack_type
()
!=
STACK_TYPE_PTHREAD
)
{
return_stack
(
m
->
release_stack
()
/*may be NULL*/
);
}
else
{
// it's _main_stack
_container
, don't return.
// it's _main_stack, don't return.
m
->
set_stack
(
NULL
);
}
return_resource
(
get_slot
(
m
->
tid
));
...
...
@@ -393,7 +393,7 @@ int TaskGroup::start_foreground(TaskGroup** pg,
m
->
about_to_quit
=
false
;
m
->
fn
=
fn
;
m
->
arg
=
arg
;
CHECK
(
m
->
stack
_container
==
NULL
);
CHECK
(
m
->
stack
==
NULL
);
m
->
attr
=
using_attr
;
m
->
local_storage
=
LOCAL_STORAGE_INIT
;
m
->
cpuwide_start_ns
=
start_ns
;
...
...
@@ -441,7 +441,7 @@ int TaskGroup::start_background(bthread_t* __restrict th,
m
->
about_to_quit
=
false
;
m
->
fn
=
fn
;
m
->
arg
=
arg
;
CHECK
(
m
->
stack
_container
==
NULL
);
CHECK
(
m
->
stack
==
NULL
);
m
->
attr
=
using_attr
;
m
->
local_storage
=
LOCAL_STORAGE_INIT
;
m
->
cpuwide_start_ns
=
start_ns
;
...
...
@@ -547,22 +547,22 @@ void TaskGroup::ending_sched(TaskGroup** pg) {
TaskMeta
*
const
cur_meta
=
g
->
_cur_meta
;
TaskMeta
*
next_meta
=
address_meta
(
next_tid
);
if
(
next_meta
->
stack
_container
==
NULL
)
{
if
(
next_meta
->
stack
==
NULL
)
{
if
(
next_meta
->
stack_type
()
==
cur_meta
->
stack_type
())
{
// also works with pthread_task scheduling to pthread_task, the
// transfered stack
_container is just _main_stack_container
.
// transfered stack
is just _main_stack
.
next_meta
->
set_stack
(
cur_meta
->
release_stack
());
}
else
{
StackContainer
*
sc
=
get_stack
(
next_meta
->
stack_type
(),
task_runner
);
if
(
s
c
!=
NULL
)
{
next_meta
->
set_stack
(
s
c
);
ContextualStack
*
stk
=
get_stack
(
next_meta
->
stack_type
(),
task_runner
);
if
(
s
tk
)
{
next_meta
->
set_stack
(
s
tk
);
}
else
{
// stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory,
// In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD.
// This basically means that if we can't allocate stack, run
// the task in pthread directly.
next_meta
->
attr
.
stack_type
=
BTHREAD_STACKTYPE_PTHREAD
;
next_meta
->
set_stack
(
g
->
_main_stack
_container
);
next_meta
->
set_stack
(
g
->
_main_stack
);
}
}
}
...
...
@@ -616,19 +616,17 @@ void TaskGroup::sched_to(TaskGroup** pg, TaskMeta* next_meta) {
}
g
->
_cur_meta
=
next_meta
;
tls_bls
=
next_meta
->
local_storage
;
if
(
cur_meta
->
stack_container
!=
NULL
)
{
if
(
next_meta
->
stack_container
!=
cur_meta
->
stack_container
)
{
bthread_jump_fcontext
(
&
cur_meta
->
stack_container
->
context
,
next_meta
->
stack_container
->
context
,
0
/*not skip remained*/
);
if
(
cur_meta
->
stack
!=
NULL
)
{
if
(
next_meta
->
stack
!=
cur_meta
->
stack
)
{
jump_stack
(
cur_meta
->
stack
,
next_meta
->
stack
);
// probably went to another group, need to assign g again.
g
=
tls_task_group
;
}
#ifndef NDEBUG
else
{
// else pthread_task is switching to another pthread_task, sc
// can only equal when they're both _main_stack
_container
CHECK
(
cur_meta
->
stack
_container
==
g
->
_main_stack_container
);
// can only equal when they're both _main_stack
CHECK
(
cur_meta
->
stack
==
g
->
_main_stack
);
}
#endif
}
...
...
src/bthread/task_group.h
View file @
83d6540d
...
...
@@ -126,7 +126,7 @@ public:
bool
is_current_main_task
()
const
{
return
current_tid
()
==
_main_tid
;
}
// True iff current task is in pthread-mode.
bool
is_current_pthread_task
()
const
{
return
_cur_meta
->
stack
_container
==
_main_stack_container
;
}
{
return
_cur_meta
->
stack
==
_main_stack
;
}
// Active time in nanoseconds spent by this TaskGroup.
int64_t
cumulated_cputime_ns
()
const
{
return
_cumulated_cputime_ns
;
}
...
...
@@ -225,7 +225,7 @@ friend class TaskControl;
#endif
size_t
_steal_seed
;
size_t
_steal_offset
;
StackContainer
*
_main_stack_container
;
ContextualStack
*
_main_stack
;
bthread_t
_main_tid
;
WorkStealingQueue
<
bthread_t
>
_rq
;
RemoteTaskQueue
_remote_rq
;
...
...
src/bthread/task_group_inl.h
View file @
83d6540d
...
...
@@ -45,17 +45,17 @@ inline void TaskGroup::exchange(TaskGroup** pg, bthread_t next_tid) {
inline
void
TaskGroup
::
sched_to
(
TaskGroup
**
pg
,
bthread_t
next_tid
)
{
TaskMeta
*
next_meta
=
address_meta
(
next_tid
);
if
(
next_meta
->
stack
_container
==
NULL
)
{
StackContainer
*
sc
=
get_stack
(
next_meta
->
stack_type
(),
task_runner
);
if
(
s
c
!=
NULL
)
{
next_meta
->
set_stack
(
s
c
);
if
(
next_meta
->
stack
==
NULL
)
{
ContextualStack
*
stk
=
get_stack
(
next_meta
->
stack_type
(),
task_runner
);
if
(
s
tk
)
{
next_meta
->
set_stack
(
s
tk
);
}
else
{
// stack_type is BTHREAD_STACKTYPE_PTHREAD or out of memory,
// In latter case, attr is forced to be BTHREAD_STACKTYPE_PTHREAD.
// This basically means that if we can't allocate stack, run
// the task in pthread directly.
next_meta
->
attr
.
stack_type
=
BTHREAD_STACKTYPE_PTHREAD
;
next_meta
->
set_stack
((
*
pg
)
->
_main_stack
_container
);
next_meta
->
set_stack
((
*
pg
)
->
_main_stack
);
}
}
// Update now_ns only when wait_task did yield.
...
...
src/bthread/task_meta.h
View file @
83d6540d
...
...
@@ -11,7 +11,7 @@
#include "bthread/butex.h" // butex_construct/destruct
#include "base/atomicops.h" // base::atomic
#include "bthread/types.h" // bthread_attr_t
#include "bthread/stack.h" // Context
, StackContainer
#include "bthread/stack.h" // Context
ualStack
namespace
bthread
{
...
...
@@ -56,7 +56,8 @@ struct TaskMeta {
void
*
(
*
fn
)(
void
*
);
void
*
arg
;
StackContainer
*
stack_container
;
// Stack of this task.
ContextualStack
*
stack
;
// Attributes creating this task
bthread_attr_t
attr
;
...
...
@@ -74,7 +75,7 @@ public:
TaskMeta
()
:
current_waiter
(
NULL
)
,
current_sleep
(
0
)
,
stack
_container
(
NULL
)
{
,
stack
(
NULL
)
{
pthread_spin_init
(
&
version_lock
,
0
);
version_butex
=
butex_create_checked
<
uint32_t
>
();
*
version_butex
=
1
;
...
...
@@ -86,13 +87,13 @@ public:
pthread_spin_destroy
(
&
version_lock
);
}
void
set_stack
(
StackContainer
*
sc
)
{
stack
_container
=
sc
;
void
set_stack
(
ContextualStack
*
s
)
{
stack
=
s
;
}
StackContainer
*
release_stack
()
{
StackContainer
*
tmp
=
stack_container
;
stack
_container
=
NULL
;
ContextualStack
*
release_stack
()
{
ContextualStack
*
tmp
=
stack
;
stack
=
NULL
;
return
tmp
;
}
...
...
test/bthread_cond_unittest.cpp
View file @
83d6540d
...
...
@@ -80,7 +80,7 @@ TEST(CondTest, sanity) {
bthread_t
sth
;
ASSERT_EQ
(
0
,
bthread_start_urgent
(
&
sth
,
NULL
,
signaler
,
&
a
));
bthread_usleep
(
SIGNAL_INTERVAL_US
*
3
00
);
bthread_usleep
(
SIGNAL_INTERVAL_US
*
2
00
);
pthread_mutex_lock
(
&
wake_mutex
);
const
size_t
nbeforestop
=
wake_time
.
size
();
...
...
@@ -336,11 +336,11 @@ void* disturb_thread(void* arg) {
return
NULL
;
}
TEST
(
CondTest
,
mix_usage
)
{
TEST
(
CondTest
,
mix
ed
_usage
)
{
BroadcastArg
ba
;
ba
.
nwaiter
=
0
;
ba
.
cur_waiter
=
0
;
ba
.
rounds
=
10
0000
;
ba
.
rounds
=
3
0000
;
const
int
NTHREADS
=
10
;
ba
.
nwaiter
=
NTHREADS
*
2
;
...
...
@@ -424,7 +424,7 @@ void* wait_cond_thread(void* arg) {
return
NULL
;
}
TEST
(
CondTest
,
too_many_bthread
)
{
TEST
(
CondTest
,
too_many_bthread
s
)
{
std
::
vector
<
bthread_t
>
th
;
th
.
resize
(
32768
);
BthreadCond
c
;
...
...
@@ -435,7 +435,7 @@ TEST(CondTest, too_many_bthread) {
bthread_start_background
(
&
th
[
i
],
NULL
,
usleep_thread
,
NULL
);
}
c
.
Signal
();
usleep
(
1
*
1000
*
1000L
);
usleep
(
3
*
1000
*
1000L
);
g_stop
=
true
;
bthread_join
(
tid
,
NULL
);
ASSERT_TRUE
(
started_wait
);
...
...
tools/patch_from_svn
View file @
83d6540d
...
...
@@ -57,4 +57,3 @@ if [ -z "$DO_RUN" ]; then
echo
"*** This is a dry-run. To really apply, run: DO_RUN=1 tools/patch_from_svn
$1
"
fi
patch
-p0
-u
$EXTRA_ARGS
<
$MODIFIED_PATCHFILE
rm
$MODIFIED_PATCHFILE
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment