Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
B
brpc
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
brpc
Commits
34594d97
Commit
34594d97
authored
Jul 09, 2018
by
TousakaRin
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix comments
parent
268fa1dc
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
152 additions
and
181 deletions
+152
-181
adaptive_connection_type.cpp
src/brpc/adaptive_connection_type.cpp
+1
-1
concurrency_limiter.h
src/brpc/concurrency_limiter.h
+2
-0
method_status.cpp
src/brpc/details/method_status.cpp
+3
-1
method_status.h
src/brpc/details/method_status.h
+16
-20
server_private_accessor.h
src/brpc/details/server_private_accessor.h
+0
-14
baidu_rpc_protocol.cpp
src/brpc/policy/baidu_rpc_protocol.cpp
+3
-11
constant_concurrency_limiter.cpp
src/brpc/policy/constant_concurrency_limiter.cpp
+4
-0
constant_concurrency_limiter.h
src/brpc/policy/constant_concurrency_limiter.h
+3
-2
gradient_concurrency_limiter.cpp
src/brpc/policy/gradient_concurrency_limiter.cpp
+92
-71
gradient_concurrency_limiter.h
src/brpc/policy/gradient_concurrency_limiter.h
+11
-5
http_rpc_protocol.cpp
src/brpc/policy/http_rpc_protocol.cpp
+3
-23
hulu_pbrpc_protocol.cpp
src/brpc/policy/hulu_pbrpc_protocol.cpp
+3
-8
mongo_protocol.cpp
src/brpc/policy/mongo_protocol.cpp
+3
-7
nshead_protocol.cpp
src/brpc/policy/nshead_protocol.cpp
+2
-6
sofa_pbrpc_protocol.cpp
src/brpc/policy/sofa_pbrpc_protocol.cpp
+3
-8
thrift_protocol.cpp
src/brpc/policy/thrift_protocol.cpp
+3
-4
No files found.
src/brpc/adaptive_connection_type.cpp
View file @
34594d97
...
...
@@ -39,7 +39,7 @@ ConnectionType StringToConnectionType(const butil::StringPiece& type,
}
LOG_IF
(
ERROR
,
print_log_on_unknown
&&
!
type
.
empty
())
<<
"Unknown connection_type `"
<<
type
<<
"
`
, supported types: single pooled short"
;
<<
"
'
, supported types: single pooled short"
;
return
CONNECTION_TYPE_UNKNOWN
;
}
...
...
src/brpc/concurrency_limiter.h
View file @
34594d97
...
...
@@ -40,6 +40,8 @@ public:
virtual
int
MaxConcurrency
()
const
=
0
;
virtual
int
&
MaxConcurrency
()
=
0
;
virtual
int
CurrentMaxConcurrency
()
const
=
0
;
};
inline
Extension
<
const
ConcurrencyLimiter
>*
ConcurrencyLimiterExtension
()
{
...
...
src/brpc/details/method_status.cpp
View file @
34594d97
...
...
@@ -16,8 +16,9 @@
#include <limits>
#include "butil/macros.h"
#include "brpc/details/method_status.h"
#include "brpc/controller.h"
#include "brpc/details/server_private_accessor.h"
#include "brpc/details/method_status.h"
namespace
brpc
{
...
...
@@ -141,6 +142,7 @@ ScopedMethodStatus::~ScopedMethodStatus() {
_status
->
OnResponded
(
_c
->
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
_start_parse_us
);
_status
=
NULL
;
}
ServerPrivateAccessor
(
_server
).
RemoveConcurrency
(
_c
);
}
}
// namespace brpc
src/brpc/details/method_status.h
View file @
34594d97
...
...
@@ -26,6 +26,7 @@
namespace
brpc
{
class
Controller
;
class
Server
;
// Record accessing stats of a method.
class
MethodStatus
:
public
Describable
{
public
:
...
...
@@ -52,6 +53,10 @@ public:
// Describe internal vars, used by /status
void
Describe
(
std
::
ostream
&
os
,
const
DescribeOptions
&
)
const
;
int
current_max_concurrency
()
const
{
return
_cl
->
CurrentMaxConcurrency
();
}
int
max_concurrency
()
const
{
return
const_cast
<
const
ConcurrencyLimiter
*>
(
_cl
)
->
MaxConcurrency
();
}
...
...
@@ -68,7 +73,6 @@ public:
private
:
friend
class
ScopedMethodStatus
;
DISALLOW_COPY_AND_ASSIGN
(
MethodStatus
);
void
OnError
();
ConcurrencyLimiter
*
_cl
;
bvar
::
Adder
<
int64_t
>
_nerror
;
...
...
@@ -79,53 +83,45 @@ friend class ScopedMethodStatus;
butil
::
atomic
<
int
>
BAIDU_CACHELINE_ALIGNMENT
_nprocessing
;
};
// If release() is not called before destruction of this object,
// an error will be counted.
class
ScopedMethodStatus
{
public
:
ScopedMethodStatus
(
MethodStatus
*
status
,
Controller
*
c
,
ScopedMethodStatus
(
MethodStatus
*
status
,
const
Server
*
server
,
Controller
*
c
,
int64_t
start_parse_us
)
:
_status
(
status
)
,
_server
(
server
)
,
_c
(
c
)
,
_start_parse_us
(
start_parse_us
)
{}
~
ScopedMethodStatus
();
MethodStatus
*
release
()
{
MethodStatus
*
tmp
=
_status
;
_status
=
NULL
;
return
tmp
;
}
operator
MethodStatus
*
()
const
{
return
_status
;
}
private
:
DISALLOW_COPY_AND_ASSIGN
(
ScopedMethodStatus
);
MethodStatus
*
_status
;
const
Server
*
_server
;
Controller
*
_c
;
uint64_t
_start_parse_us
;
};
inline
bool
MethodStatus
::
OnRequested
()
{
_nprocessing
.
fetch_add
(
1
,
butil
::
memory_order_relaxed
);
bool
should_refuse
=
!
_cl
->
OnRequested
();
if
(
should_refuse
)
{
_nrefused_bvar
<<
1
;
if
(
_cl
->
OnRequested
())
{
return
true
;
}
return
!
should_refuse
;
_nrefused_bvar
<<
1
;
return
false
;
}
inline
void
MethodStatus
::
OnResponded
(
int
error_code
,
int64_t
latency
)
{
_nprocessing
.
fetch_sub
(
1
,
butil
::
memory_order_relaxed
);
if
(
0
==
error_code
)
{
_latency_rec
<<
latency
;
_nprocessing
.
fetch_sub
(
1
,
butil
::
memory_order_relaxed
);
}
else
{
OnError
()
;
_nerror
<<
1
;
}
_cl
->
OnResponded
(
error_code
,
latency
);
}
inline
void
MethodStatus
::
OnError
()
{
_nerror
<<
1
;
_nprocessing
.
fetch_sub
(
1
,
butil
::
memory_order_relaxed
);
}
}
// namespace brpc
...
...
src/brpc/details/server_private_accessor.h
View file @
34594d97
...
...
@@ -118,20 +118,6 @@ private:
const
Server
*
_server
;
};
class
ScopedRemoveConcurrency
{
public
:
ScopedRemoveConcurrency
(
const
Server
*
server
,
const
Controller
*
c
)
:
_server
(
server
),
_cntl
(
c
)
{}
~
ScopedRemoveConcurrency
()
{
ServerPrivateAccessor
(
_server
).
RemoveConcurrency
(
_cntl
);
}
private
:
DISALLOW_COPY_AND_ASSIGN
(
ScopedRemoveConcurrency
);
const
Server
*
_server
;
const
Controller
*
_cntl
;
};
}
// namespace brpc
...
...
src/brpc/policy/baidu_rpc_protocol.cpp
View file @
34594d97
...
...
@@ -146,11 +146,11 @@ void SendRpcResponse(int64_t correlation_id,
span
->
set_start_send_us
(
butil
::
cpuwide_time_us
());
}
Socket
*
sock
=
accessor
.
get_sending_socket
();
ScopedMethodStatus
method_status
(
method_status_raw
,
cntl
,
start_parse_us
);
std
::
unique_ptr
<
Controller
,
LogErrorTextAndDelete
>
recycle_cntl
(
cntl
);
ScopedMethodStatus
method_status
(
method_status_raw
,
server
,
cntl
,
received_us
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_req
(
req
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_res
(
res
);
ScopedRemoveConcurrency
remove_concurrency_dummy
(
server
,
cntl
);
StreamId
response_stream_id
=
accessor
.
response_stream
();
...
...
@@ -264,10 +264,6 @@ void SendRpcResponse(int64_t correlation_id,
// TODO: this is not sent
span
->
set_sent_us
(
butil
::
cpuwide_time_us
());
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
cntl
->
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
received_us
);
}
}
struct
CallMethodInBackupThreadArgs
{
...
...
@@ -443,7 +439,7 @@ void ProcessRpcRequest(InputMessageBase* msg_base) {
if
(
!
method_status
->
OnRequested
())
{
cntl
->
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
mp
->
method
->
full_name
().
c_str
(),
const_cast
<
const
MethodStatus
*>
(
method_status
)
->
max_concurrency
());
method_status
->
current_
max_concurrency
());
break
;
}
}
...
...
@@ -515,11 +511,7 @@ void ProcessRpcRequest(InputMessageBase* msg_base) {
// `socket' will be held until response has been sent
SendRpcResponse
(
meta
.
correlation_id
(),
cntl
.
release
(),
req
.
release
(),
res
.
release
(),
server
,
<<<<<<<
HEAD
method_status
,
msg
->
received_us
());
=======
method_status
,
start_parse_us
);
>>>>>>>
auto
max_concurrency
limiter
}
bool
VerifyRpcRequest
(
const
InputMessageBase
*
msg_base
)
{
...
...
src/brpc/policy/constant_concurrency_limiter.cpp
View file @
34594d97
...
...
@@ -32,6 +32,10 @@ void ConstantConcurrencyLimiter::OnResponded(int error_code, int64_t latency) {
_current_concurrency
.
fetch_sub
(
1
,
butil
::
memory_order_relaxed
);
}
int
ConstantConcurrencyLimiter
::
CurrentMaxConcurrency
()
const
{
return
_max_concurrency
;
}
int
ConstantConcurrencyLimiter
::
MaxConcurrency
()
const
{
return
_max_concurrency
;
}
...
...
src/brpc/policy/constant_concurrency_limiter.h
View file @
34594d97
...
...
@@ -32,8 +32,9 @@ public:
bool
OnRequested
()
override
;
void
OnResponded
(
int
error_code
,
int64_t
latency_us
)
override
;
virtual
int
MaxConcurrency
()
const
override
;
virtual
int
&
MaxConcurrency
()
override
;
int
CurrentMaxConcurrency
()
const
override
;
int
MaxConcurrency
()
const
override
;
int
&
MaxConcurrency
()
override
;
int
Expose
(
const
butil
::
StringPiece
&
prefix
)
override
;
ConstantConcurrencyLimiter
*
New
()
const
override
;
...
...
src/brpc/policy/gradient_concurrency_limiter.cpp
View file @
34594d97
...
...
@@ -28,13 +28,13 @@ DECLARE_int32(task_group_runqueue_capacity);
namespace
brpc
{
namespace
policy
{
DEFINE_int32
(
gradient_cl_sampling_interval_us
,
100
0
,
DEFINE_int32
(
gradient_cl_sampling_interval_us
,
100
,
"Interval for sampling request in gradient concurrency limiter"
);
DEFINE_int32
(
gradient_cl_sample_window_size_ms
,
1000
,
"Sample window size for update max concurrency in grandient "
"concurrency limiter"
);
DEFINE_int32
(
gradient_cl_min_sample_count
,
100
,
"Minium sample count for update max concurrency"
);
"Mini
m
um sample count for update max concurrency"
);
DEFINE_int32
(
gradient_cl_adjust_smooth
,
50
,
"Smooth coefficient for adjust the max concurrency, the value is 0-99,"
"the larger the value, the smaller the amount of each change"
);
...
...
@@ -44,41 +44,35 @@ DEFINE_bool(gradient_cl_enable_error_punish, true,
"Whether to consider failed requests when calculating maximum concurrency"
);
DEFINE_int32
(
gradient_cl_max_error_punish_ms
,
3000
,
"The maximum time wasted for a single failed request"
);
DEFINE_double
(
gradient_cl_fail_punish_
aggressive
,
1.0
,
DEFINE_double
(
gradient_cl_fail_punish_
ratio
,
1.0
,
"Use the failed requests to punish normal requests. The larger the "
"configuration item, the more aggressive the penalty strategy."
);
DEFINE_int32
(
gradient_cl_window_count
,
2
0
,
DEFINE_int32
(
gradient_cl_window_count
,
3
0
,
"Sample windows count for compute history min average latency"
);
DEFINE_int32
(
gradient_cl_task_per_req
,
3
,
"How many tasks will be generated for each request, calculate the maximum "
"concurrency of a system by calculating the maximum possible concurrent "
"tasks.When the maximum concurrency is automatically adjusted by "
"gradient_cl, the estimated maximum concurrency will not exceed the value. "
"When this configuration is less than or equal to 0, it does not take "
"effect."
);
DEFINE_int32
(
gradient_cl_reserved_concurrency
,
0
,
"The maximum concurrency reserved when the service is not overloaded."
"When the traffic increases, the larger the configuration item, the "
"faster the maximum concurrency grows until the server is fully loaded."
"When the value is less than or equal to 0, square root of current "
"concurrency is used."
);
DEFINE_double
(
gradient_cl_min_reduce_ratio
,
0.5
,
"The minimum reduce ratio of maximum concurrency per calculation."
" The value should be 0-1"
);
static
int32_t
cast_max_concurrency
(
void
*
arg
)
{
return
*
(
int32_t
*
)
arg
;
}
GradientConcurrencyLimiter
::
GradientConcurrencyLimiter
()
:
_ws_index
(
0
)
:
_ws_queue
(
FLAGS_gradient_cl_window_count
)
,
_ws_index
(
0
)
,
_unused_max_concurrency
(
0
)
,
_max_concurrency_bvar
(
cast_max_concurrency
,
&
_max_concurrency
)
,
_last_sampling_time_us
(
0
)
,
_total_succ_req
(
0
)
,
_max_concurrency
(
FLAGS_gradient_cl_initial_max_concurrency
)
,
_current_concurrency
(
0
)
{
if
(
FLAGS_gradient_cl_task_per_req
>
0
)
{
int32_t
config_max_concurrency
=
bthread
::
FLAGS_bthread_concurrency
*
FLAGS_task_group_runqueue_capacity
/
FLAGS_gradient_cl_task_per_req
;
if
(
config_max_concurrency
<
_max_concurrency
.
load
())
{
_max_concurrency
.
store
(
config_max_concurrency
);
LOG
(
WARNING
)
<<
"The value of gradient_cl_initial_max_concurrency is "
<<
"to large and is adjusted to "
<<
config_max_concurrency
;
}
}
}
}
void
GradientConcurrencyLimiter
::
Describe
(
std
::
ostream
&
os
,
const
DescribeOptions
&
options
)
{
...
...
@@ -92,6 +86,10 @@ void GradientConcurrencyLimiter::Describe(
os
<<
'}'
;
}
int
GradientConcurrencyLimiter
::
CurrentMaxConcurrency
()
const
{
return
_max_concurrency
.
load
(
butil
::
memory_order_relaxed
);
}
int
GradientConcurrencyLimiter
::
MaxConcurrency
()
const
{
return
_max_concurrency
.
load
(
butil
::
memory_order_relaxed
);
}
...
...
@@ -131,6 +129,12 @@ bool GradientConcurrencyLimiter::OnRequested() {
void
GradientConcurrencyLimiter
::
OnResponded
(
int
error_code
,
int64_t
latency_us
)
{
_current_concurrency
.
fetch_sub
(
1
,
butil
::
memory_order_relaxed
);
if
(
0
==
error_code
)
{
_total_succ_req
.
fetch_add
(
1
,
butil
::
memory_order_relaxed
);
}
else
if
(
ELIMIT
==
error_code
)
{
return
;
}
int64_t
now_time_us
=
butil
::
gettimeofday_us
();
int64_t
last_sampling_time_us
=
_last_sampling_time_us
.
load
(
butil
::
memory_order_relaxed
);
...
...
@@ -155,7 +159,6 @@ void GradientConcurrencyLimiter::AddSample(int error_code, int64_t latency_us,
}
if
(
error_code
!=
0
&&
error_code
!=
ELIMIT
&&
FLAGS_gradient_cl_enable_error_punish
)
{
++
_sw
.
failed_count
;
latency_us
=
...
...
@@ -167,21 +170,22 @@ void GradientConcurrencyLimiter::AddSample(int error_code, int64_t latency_us,
_sw
.
total_succ_us
+=
latency_us
;
}
if
(
sampling_time_us
-
_sw
.
start_time_us
>=
FLAGS_gradient_cl_sample_window_size_ms
*
1000
&&
(
_sw
.
succ_count
+
_sw
.
failed_count
)
>
if
(
sampling_time_us
-
_sw
.
start_time_us
<
FLAGS_gradient_cl_sample_window_size_ms
*
1000
)
{
return
;
}
else
if
(
_sw
.
succ_count
+
_sw
.
failed_count
<
FLAGS_gradient_cl_min_sample_count
)
{
if
(
_sw
.
succ_count
>
0
)
{
LOG_EVERY_N
(
INFO
,
100
)
<<
"Insufficient sample size"
;
}
else
if
(
_sw
.
succ_count
>
0
)
{
UpdateConcurrency
();
ResetSampleWindow
(
sampling_time_us
);
}
else
{
LOG_EVERY_N
(
ERROR
,
100
)
<<
"All request failed"
;
LOG
(
ERROR
)
<<
"All request failed, resize max_concurrency"
;
int32_t
current_concurrency
=
_current_concurrency
.
load
(
butil
::
memory_order_relaxed
);
_current_concurrency
.
store
(
current_concurrency
/
2
,
butil
::
memory_order_relaxed
);
}
}
else
if
(
sampling_time_us
-
_sw
.
start_time_us
>=
FLAGS_gradient_cl_sample_window_size_ms
*
1000
)
{
LOG_EVERY_N
(
INFO
,
100
)
<<
"Insufficient sample size"
;
}
}
void
GradientConcurrencyLimiter
::
ResetSampleWindow
(
int64_t
sampling_time_us
)
{
...
...
@@ -195,59 +199,51 @@ void GradientConcurrencyLimiter::ResetSampleWindow(int64_t sampling_time_us) {
void
GradientConcurrencyLimiter
::
UpdateConcurrency
()
{
int32_t
current_concurrency
=
_current_concurrency
.
load
();
int
max_concurrency
=
_max_concurrency
.
load
();
int32_t
total_succ_req
=
_total_succ_req
.
exchange
(
0
,
butil
::
memory_order_relaxed
);
int64_t
failed_punish
=
_sw
.
total_failed_us
*
FLAGS_gradient_cl_fail_punish_
aggressive
;
FLAGS_gradient_cl_fail_punish_
ratio
;
int64_t
avg_latency
=
(
failed_punish
+
_sw
.
total_succ_us
)
/
_sw
.
succ_count
;
avg_latency
=
std
::
max
(
static_cast
<
int64_t
>
(
1
),
avg_latency
);
WindowSnap
snap
(
avg_latency
,
current_concurrency
);
if
(
static_cast
<
int
>
(
_ws_queue
.
size
())
<
FLAGS_gradient_cl_window_count
)
{
_ws_queue
.
push_back
(
snap
);
}
else
{
_ws_queue
[
_ws_index
%
_ws_queue
.
size
()]
=
snap
;
}
WindowSnap
snap
(
avg_latency
,
current_concurrency
,
total_succ_req
);
_ws_queue
.
elim_push
(
snap
);
++
_ws_index
;
int64_t
min_avg_latency_us
=
_ws_queue
.
front
().
avg_latency_us
;
int32_t
safe_concurrency
=
_ws_queue
.
front
().
actuall_concurrency
;
for
(
const
auto
&
ws
:
_ws_queue
)
{
if
(
min_avg_latency_us
>
ws
.
avg_latency_us
)
{
min_avg_latency_us
=
ws
.
avg_latency_us
;
safe_concurrency
=
ws
.
actuall_concurrency
;
}
else
if
(
min_avg_latency_us
==
ws
.
avg_latency_us
)
{
int64_t
min_avg_latency_us
=
_ws_queue
.
bottom
()
->
avg_latency_us
;
int32_t
safe_concurrency
=
_ws_queue
.
bottom
()
->
actual_concurrency
;
for
(
size_t
i
=
0
;
i
<
_ws_queue
.
size
();
++
i
)
{
const
WindowSnap
&
snap
=
*
(
_ws_queue
.
bottom
(
i
));
if
(
min_avg_latency_us
>
snap
.
avg_latency_us
)
{
min_avg_latency_us
=
snap
.
avg_latency_us
;
safe_concurrency
=
snap
.
actual_concurrency
;
}
else
if
(
min_avg_latency_us
==
snap
.
avg_latency_us
)
{
safe_concurrency
=
std
::
max
(
safe_concurrency
,
ws
.
actual
l_concurrency
);
snap
.
actua
l_concurrency
);
}
}
int
smooth
=
50
;
if
(
FLAGS_gradient_cl_adjust_smooth
>=
0
&&
FLAGS_gradient_cl_adjust_smooth
<
99
)
{
smooth
=
FLAGS_gradient_cl_adjust_smooth
;
}
else
{
int
smooth
=
FLAGS_gradient_cl_adjust_smooth
;
if
(
smooth
<=
0
||
smooth
>
99
)
{
LOG_EVERY_N
(
WARNING
,
100
)
<<
"GFLAG `gradient_cl_adjust_smooth
`
should be 0-99,"
<<
"GFLAG `gradient_cl_adjust_smooth
'
should be 0-99,"
<<
"current: "
<<
FLAGS_gradient_cl_adjust_smooth
<<
", will compute with the defalut smooth value(50)"
;
smooth
=
50
;
}
int
reserved_concurrency
=
FLAGS_gradient_cl_reserved_concurrency
;
if
(
reserved_concurrency
<=
0
)
{
reserved_concurrency
=
std
::
ceil
(
std
::
sqrt
(
max_concurrency
));
}
int
queue_size
=
std
::
sqrt
(
max_concurrency
);
double
fix_gradient
=
std
::
min
(
1.0
,
double
(
min_avg_latency_us
)
/
avg_latency
);
int32_t
next_concurrency
=
std
::
ceil
(
max_concurrency
*
fix_gradient
+
queue_size
);
max_concurrency
*
fix_gradient
+
reserved_concurrency
);
next_concurrency
=
std
::
ceil
(
(
max_concurrency
*
smooth
+
next_concurrency
*
(
100
-
smooth
))
/
100
);
next_concurrency
=
std
::
max
(
next_concurrency
,
max_concurrency
/
2
);
next_concurrency
=
std
::
max
(
next_concurrency
,
safe_concurrency
/
2
);
if
(
FLAGS_gradient_cl_task_per_req
>
0
)
{
int32_t
config_max_concurrency
=
bthread
::
FLAGS_bthread_concurrency
*
FLAGS_task_group_runqueue_capacity
/
FLAGS_gradient_cl_task_per_req
;
next_concurrency
=
std
::
min
(
config_max_concurrency
,
next_concurrency
);
}
if
(
current_concurrency
+
queue_size
<
max_concurrency
&&
if
(
current_concurrency
+
reserved_concurrency
<
max_concurrency
&&
max_concurrency
<
next_concurrency
)
{
LOG
(
INFO
)
<<
"No need to expand the maximum concurrency"
...
...
@@ -258,19 +254,44 @@ void GradientConcurrencyLimiter::UpdateConcurrency() {
<<
", next_max_concurrency:"
<<
next_concurrency
;
return
;
}
if
(
fix_gradient
<
1.0
&&
max_concurrency
<
next_concurrency
)
{
for
(
size_t
i
=
0
;
i
<
_ws_queue
.
size
();
++
i
)
{
const
WindowSnap
&
snap
=
*
(
_ws_queue
.
bottom
(
i
));
if
(
current_concurrency
>
snap
.
actual_concurrency
&&
total_succ_req
<
snap
.
total_succ_req
)
{
int32_t
fixed_next_concurrency
=
std
::
ceil
(
snap
.
actual_concurrency
*
snap
.
avg_latency_us
/
avg_latency
);
next_concurrency
=
std
::
min
(
next_concurrency
,
fixed_next_concurrency
);
}
}
}
double
min_reduce_ratio
=
FLAGS_gradient_cl_min_reduce_ratio
;
if
(
min_reduce_ratio
<=
0.0
||
min_reduce_ratio
>=
1.0
)
{
LOG
(
INFO
)
<<
"GFLAG `gradient_cl_min_reduce_ratio' should "
<<
"be 0-1, current:"
<<
FLAGS_gradient_cl_min_reduce_ratio
<<
" , will compute with the default value(0.5)"
;
min_reduce_ratio
=
50
;
}
next_concurrency
=
std
::
max
(
next_concurrency
,
int32_t
(
max_concurrency
*
min_reduce_ratio
));
next_concurrency
=
std
::
max
(
next_concurrency
,
int32_t
(
safe_concurrency
*
min_reduce_ratio
));
LOG
(
INFO
)
<<
"Update max_concurrency by gradient limiter:"
<<
" pre_max_concurrency
=
"
<<
max_concurrency
<<
" pre_max_concurrency
:
"
<<
max_concurrency
<<
", min_avg_latency:"
<<
min_avg_latency_us
<<
"us"
<<
", sampling_avg_latency:"
<<
avg_latency
<<
"us"
<<
", failed_punish:"
<<
failed_punish
<<
"us"
<<
", succ total:"
<<
_sw
.
total_succ_us
<<
"us"
<<
", currency_concurrency="
<<
current_concurrency
<<
", fix_gradient="
<<
fix_gradient
<<
", succ sample count"
<<
_sw
.
succ_count
<<
", failed sample count"
<<
_sw
.
failed_count
<<
",
safe_concurrency"
<<
safe
_concurrency
<<
", next_max_concurrency
=
"
<<
next_concurrency
;
<<
",
current_concurrency:"
<<
current
_concurrency
<<
", next_max_concurrency
:
"
<<
next_concurrency
;
_max_concurrency
.
store
(
next_concurrency
,
butil
::
memory_order_relaxed
);
}
...
...
src/brpc/policy/gradient_concurrency_limiter.h
View file @
34594d97
...
...
@@ -17,8 +17,9 @@
#ifndef BRPC_POLICY_GRANDIENT_CONCURRENCY_LIMITER_H
#define BRPC_POLICY_GRANDIENT_CONCURRENCY_LIMITER_H
#include "brpc/concurrency_limiter.h"
#include "bvar/bvar.h"
#include "butil/containers/bounded_queue.h"
#include "brpc/concurrency_limiter.h"
namespace
brpc
{
namespace
policy
{
...
...
@@ -29,6 +30,7 @@ public:
~
GradientConcurrencyLimiter
()
{}
bool
OnRequested
()
override
;
void
OnResponded
(
int
error_code
,
int64_t
latency_us
)
override
;
int
CurrentMaxConcurrency
()
const
override
;
int
MaxConcurrency
()
const
override
;
// For compatibility with the MaxConcurrencyOf() interface. When using
...
...
@@ -39,6 +41,7 @@ public:
// effect.
int
&
MaxConcurrency
()
override
;
int
Expose
(
const
butil
::
StringPiece
&
prefix
)
override
;
GradientConcurrencyLimiter
*
New
()
const
override
;
void
Destroy
()
override
;
...
...
@@ -60,11 +63,13 @@ private:
};
struct
WindowSnap
{
WindowSnap
(
int64_t
latency_us
,
int32_t
concurrency
)
WindowSnap
(
int64_t
latency_us
,
int32_t
concurrency
,
int32_t
succ_req
)
:
avg_latency_us
(
latency_us
)
,
actuall_concurrency
(
concurrency
)
{}
,
actual_concurrency
(
concurrency
)
,
total_succ_req
(
succ_req
)
{}
int64_t
avg_latency_us
;
int32_t
actuall_concurrency
;
int32_t
actual_concurrency
;
int32_t
total_succ_req
;
};
void
AddSample
(
int
error_code
,
int64_t
latency_us
,
int64_t
sampling_time_us
);
...
...
@@ -74,12 +79,13 @@ private:
void
ResetSampleWindow
(
int64_t
sampling_time_us
);
SampleWindow
_sw
;
std
::
vector
<
WindowSnap
>
_ws_queue
;
butil
::
BoundedQueue
<
WindowSnap
>
_ws_queue
;
uint32_t
_ws_index
;
int32_t
_unused_max_concurrency
;
butil
::
Mutex
_sw_mutex
;
bvar
::
PassiveStatus
<
int32_t
>
_max_concurrency_bvar
;
butil
::
atomic
<
int64_t
>
BAIDU_CACHELINE_ALIGNMENT
_last_sampling_time_us
;
butil
::
atomic
<
int32_t
>
BAIDU_CACHELINE_ALIGNMENT
_total_succ_req
;
butil
::
atomic
<
int32_t
>
BAIDU_CACHELINE_ALIGNMENT
_max_concurrency
;
butil
::
atomic
<
int32_t
>
BAIDU_CACHELINE_ALIGNMENT
_current_concurrency
;
};
...
...
src/brpc/policy/http_rpc_protocol.cpp
View file @
34594d97
...
...
@@ -557,12 +557,12 @@ static void SendHttpResponse(Controller *cntl,
if
(
span
)
{
span
->
set_start_send_us
(
butil
::
cpuwide_time_us
());
}
ScopedMethodStatus
method_status
(
method_status_raw
,
cntl
,
start_parse_us
);
ScopedMethodStatus
method_status
(
method_status_raw
,
server
,
cntl
,
received_us
);
std
::
unique_ptr
<
Controller
,
LogErrorTextAndDelete
>
recycle_cntl
(
cntl
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_req
(
req
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_res
(
res
);
Socket
*
socket
=
accessor
.
get_sending_socket
();
ScopedRemoveConcurrency
remove_concurrency_dummy
(
server
,
cntl
);
if
(
cntl
->
IsCloseConnection
())
{
socket
->
SetFailed
();
...
...
@@ -727,10 +727,6 @@ static void SendHttpResponse(Controller *cntl,
// TODO: this is not sent
span
->
set_sent_us
(
butil
::
cpuwide_time_us
());
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
cntl
->
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
received_us
);
}
}
inline
void
SendHttpResponse
(
Controller
*
cntl
,
const
Server
*
svr
,
...
...
@@ -1172,19 +1168,10 @@ void ProcessHttpRequest(InputMessageBase *msg) {
MethodStatus
*
method_status
=
sp
->
status
;
if
(
method_status
)
{
if
(
!
method_status
->
OnRequested
())
{
<<<<<<<
HEAD
cntl
->
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
sp
->
method
->
full_name
().
c_str
(),
method_status
->
max_concurrency
());
return
SendHttpResponse
(
cntl
.
release
(),
server
,
method_status
,
msg
->
received_us
());
=======
cntl
->
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
sp
->
method
->
full_name
().
c_str
(),
const_cast
<
const
MethodStatus
*>
(
method_status
)
->
max_concurrency
());
return
SendHttpResponse
(
cntl
.
release
(),
server
,
method_status
);
>>>>>>>
auto
max_concurrency
limiter
}
}
...
...
@@ -1200,16 +1187,9 @@ void ProcessHttpRequest(InputMessageBase *msg) {
return
SendHttpResponse
(
cntl
.
release
(),
server
,
method_status
,
msg
->
received_us
());
}
if
(
!
server_accessor
.
AddConcurrency
(
cntl
.
get
()))
{
<<<<<<<
HEAD
cntl
->
SetFailed
(
ELIMIT
,
"Reached server's max_concurrency=%d"
,
s
erver
->
options
().
max_concurrency
);
s
tatic_cast
<
int
>
(
server
->
options
().
max_concurrency
)
);
return
SendHttpResponse
(
cntl
.
release
(),
server
,
method_status
,
msg
->
received_us
());
=======
cntl
->
SetFailed
(
ELIMIT
,
"Reached server's max_concurrency=%d"
,
static_cast
<
int
>
((
server
->
options
().
max_concurrency
)));
return
SendHttpResponse
(
cntl
.
release
(),
server
,
method_status
);
>>>>>>>
auto
max_concurrency
limiter
}
if
(
FLAGS_usercode_in_pthread
&&
TooManyUserCode
())
{
cntl
->
SetFailed
(
ELIMIT
,
"Too many user code to run when"
...
...
src/brpc/policy/hulu_pbrpc_protocol.cpp
View file @
34594d97
...
...
@@ -231,12 +231,12 @@ static void SendHuluResponse(int64_t correlation_id,
if
(
span
)
{
span
->
set_start_send_us
(
butil
::
cpuwide_time_us
());
}
ScopedMethodStatus
method_status
(
method_status_raw
,
cntl
,
start_parse_us
);
ScopedMethodStatus
method_status
(
method_status_raw
,
server
,
cntl
,
received_us
);
Socket
*
sock
=
accessor
.
get_sending_socket
();
std
::
unique_ptr
<
HuluController
,
LogErrorTextAndDelete
>
recycle_cntl
(
cntl
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_req
(
req
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_res
(
res
);
ScopedRemoveConcurrency
remove_concurrency_dummy
(
server
,
cntl
);
if
(
cntl
->
IsCloseConnection
())
{
sock
->
SetFailed
();
...
...
@@ -318,10 +318,6 @@ static void SendHuluResponse(int64_t correlation_id,
// TODO: this is not sent
span
->
set_sent_us
(
butil
::
cpuwide_time_us
());
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
cntl
->
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
received_us
);
}
}
// Defined in baidu_rpc_protocol.cpp
...
...
@@ -460,8 +456,7 @@ void ProcessHuluRequest(InputMessageBase* msg_base) {
if
(
!
method_status
->
OnRequested
())
{
cntl
->
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
sp
->
method
->
full_name
().
c_str
(),
const_cast
<
const
MethodStatus
*>
(
method_status
)
->
max_concurrency
());
method_status
->
current_max_concurrency
());
break
;
}
}
...
...
src/brpc/policy/mongo_protocol.cpp
View file @
34594d97
...
...
@@ -60,7 +60,8 @@ SendMongoResponse::~SendMongoResponse() {
void
SendMongoResponse
::
Run
()
{
std
::
unique_ptr
<
SendMongoResponse
>
delete_self
(
this
);
ScopedMethodStatus
method_status
(
status
,
&
cntl
,
butil
::
cpuwide_time_us
());
ScopedMethodStatus
method_status
(
status
,
server
,
&
cntl
,
received_us
);
Socket
*
socket
=
ControllerPrivateAccessor
(
&
cntl
).
get_sending_socket
();
if
(
cntl
.
IsCloseConnection
())
{
...
...
@@ -102,10 +103,6 @@ void SendMongoResponse::Run() {
return
;
}
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
cntl
.
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
received_us
);
}
}
ParseResult
ParseMongoMessage
(
butil
::
IOBuf
*
source
,
...
...
@@ -249,8 +246,7 @@ void ProcessMongoRequest(InputMessageBase* msg_base) {
mongo_done
->
cntl
.
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
mp
->
method
->
full_name
().
c_str
(),
const_cast
<
const
MethodStatus
*>
(
method_status
)
->
max_concurrency
());
method_status
->
current_max_concurrency
());
break
;
}
}
...
...
src/brpc/policy/nshead_protocol.cpp
View file @
34594d97
...
...
@@ -64,7 +64,6 @@ public:
void
NsheadClosure
::
Run
()
{
// Recycle itself after `Run'
std
::
unique_ptr
<
NsheadClosure
,
DeleteNsheadClosure
>
recycle_ctx
(
this
);
ScopedRemoveConcurrency
remove_concurrency_dummy
(
_server
,
&
_controller
);
ControllerPrivateAccessor
accessor
(
&
_controller
);
Span
*
span
=
accessor
.
span
();
...
...
@@ -73,7 +72,8 @@ void NsheadClosure::Run() {
}
Socket
*
sock
=
accessor
.
get_sending_socket
();
ScopedMethodStatus
method_status
(
_server
->
options
().
nshead_service
->
_status
,
&
_controller
,
butil
::
cpuwide_time_us
());
_server
,
&
_controller
,
_received_us
);
if
(
!
method_status
)
{
// Judge errors belongings.
// may not be accurate, but it does not matter too much.
...
...
@@ -124,10 +124,6 @@ void NsheadClosure::Run() {
// TODO: this is not sent
span
->
set_sent_us
(
butil
::
cpuwide_time_us
());
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
_controller
.
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
_received_us
);
}
}
void
NsheadClosure
::
SetMethodName
(
const
std
::
string
&
full_method_name
)
{
...
...
src/brpc/policy/sofa_pbrpc_protocol.cpp
View file @
34594d97
...
...
@@ -215,12 +215,12 @@ static void SendSofaResponse(int64_t correlation_id,
if
(
span
)
{
span
->
set_start_send_us
(
butil
::
cpuwide_time_us
());
}
ScopedMethodStatus
method_status
(
method_status_raw
,
cntl
,
start_parse_us
);
ScopedMethodStatus
method_status
(
method_status_raw
,
server
,
cntl
,
received_us
);
Socket
*
sock
=
accessor
.
get_sending_socket
();
std
::
unique_ptr
<
Controller
,
LogErrorTextAndDelete
>
recycle_cntl
(
cntl
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_req
(
req
);
std
::
unique_ptr
<
const
google
::
protobuf
::
Message
>
recycle_res
(
res
);
ScopedRemoveConcurrency
remove_concurrency_dummy
(
server
,
cntl
);
if
(
cntl
->
IsCloseConnection
())
{
sock
->
SetFailed
();
...
...
@@ -294,10 +294,6 @@ static void SendSofaResponse(int64_t correlation_id,
// TODO: this is not sent
span
->
set_sent_us
(
butil
::
cpuwide_time_us
());
}
if
(
method_status
)
{
method_status
.
release
()
->
OnResponded
(
cntl
->
ErrorCode
(),
butil
::
cpuwide_time_us
()
-
received_us
);
}
}
// Defined in baidu_rpc_protocol.cpp
...
...
@@ -416,8 +412,7 @@ void ProcessSofaRequest(InputMessageBase* msg_base) {
if
(
!
method_status
->
OnRequested
())
{
cntl
->
SetFailed
(
ELIMIT
,
"Reached %s's max_concurrency=%d"
,
sp
->
method
->
full_name
().
c_str
(),
const_cast
<
MethodStatus
*>
(
method_status
)
->
max_concurrency
());
method_status
->
current_max_concurrency
());
break
;
}
}
...
...
src/brpc/policy/thrift_protocol.cpp
View file @
34594d97
...
...
@@ -232,9 +232,8 @@ void ThriftClosure::DoRun() {
if
(
span
)
{
span
->
set_start_send_us
(
butil
::
cpuwide_time_us
());
}
Socket
*
sock
=
accessor
.
get_sending_socket
();
ScopedMethodStatus
method_status
(
server
->
options
().
thrift_service
?
server
->
options
().
thrift_service
->
_status
:
NULL
);
ScopedMethodStatus
method_status
(
_server
->
options
().
thrift_service
->
_status
,
_server
,
&
_controller
,
cpuwide_start_us
());
if
(
!
method_status
)
{
// Judge errors belongings.
// may not be accurate, but it does not matter too much.
...
...
@@ -526,7 +525,7 @@ void ProcessThriftRequest(InputMessageBase* msg_base) {
}
if
(
!
server_accessor
.
AddConcurrency
(
cntl
))
{
cntl
->
SetFailed
(
ELIMIT
,
"Reached server's max_concurrency=%d"
,
s
erver
->
options
().
max_concurrency
);
s
tatic_cast
<
int
>
(
server
->
options
().
max_concurrency
)
);
break
;
}
if
(
FLAGS_usercode_in_pthread
&&
TooManyUserCode
())
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment