Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
P
protobuf
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
protobuf
Commits
cee0447f
Commit
cee0447f
authored
Sep 25, 2018
by
Yilun Chong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add node and php to benchmark dashboard
parent
4426cb57
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
130 additions
and
32 deletions
+130
-32
Makefile.am
benchmarks/Makefile.am
+1
-2
js_benchmark.js
benchmarks/js/js_benchmark.js
+14
-2
PhpBenchmark.php
benchmarks/php/PhpBenchmark.php
+16
-4
py_benchmark.py
benchmarks/python/py_benchmark.py
+14
-7
result_parser.py
benchmarks/util/result_parser.py
+43
-5
result_uploader.py
benchmarks/util/result_uploader.py
+22
-8
build.sh
kokoro/linux/benchmark/build.sh
+20
-4
No files found.
benchmarks/Makefile.am
View file @
cee0447f
...
@@ -260,7 +260,7 @@ go-benchmark: go_protoc_middleman
...
@@ -260,7 +260,7 @@ go-benchmark: go_protoc_middleman
@
echo
'all_data=""'
>>
go-benchmark
@
echo
'all_data=""'
>>
go-benchmark
@
echo
'conf=()'
>>
go-benchmark
@
echo
'conf=()'
>>
go-benchmark
@
echo
'data_files=()'
>>
go-benchmark
@
echo
'data_files=()'
>>
go-benchmark
@
echo
'for arg in $$@; do if [[ $${arg:0:1} == "-" ]]; then conf+=($$arg); else data_files+=("
../
$$arg"); fi; done'
>>
go-benchmark
@
echo
'for arg in $$@; do if [[ $${arg:0:1} == "-" ]]; then conf+=($$arg); else data_files+=("$$arg"); fi; done'
>>
go-benchmark
@
echo
'go test -bench=. $${conf[*]} -- $${data_files[*]}'
>>
go-benchmark
@
echo
'go test -bench=. $${conf[*]} -- $${data_files[*]}'
>>
go-benchmark
@
echo
'cd ..'
>>
go-benchmark
@
echo
'cd ..'
>>
go-benchmark
@
chmod
+x go-benchmark
@
chmod
+x go-benchmark
...
@@ -533,7 +533,6 @@ php-c-benchmark: proto3_middleman_php generate_proto3_data php_c_extension php_c
...
@@ -533,7 +533,6 @@ php-c-benchmark: proto3_middleman_php generate_proto3_data php_c_extension php_c
@
echo
'#! /bin/bash'
>
php-c-benchmark
@
echo
'#! /bin/bash'
>
php-c-benchmark
@
echo
'export PROTOBUF_PHP_SRCDIR="$$(cd
$(top_srcdir)
&& pwd)/php/src"'
>>
php-c-benchmark
@
echo
'export PROTOBUF_PHP_SRCDIR="$$(cd
$(top_srcdir)
&& pwd)/php/src"'
>>
php-c-benchmark
@
echo
'export PROTOBUF_PHP_EXTDIR="$$PROTOBUF_PHP_SRCDIR/../ext/google/protobuf/modules"'
>>
php-c-benchmark
@
echo
'export PROTOBUF_PHP_EXTDIR="$$PROTOBUF_PHP_SRCDIR/../ext/google/protobuf/modules"'
>>
php-c-benchmark
@
echo
'echo "$$PROTOBUF_PHP_EXTDIR/protobuf.so"'
>>
php-c-benchmark
@
echo
'cd tmp/php'
>>
php-c-benchmark
@
echo
'cd tmp/php'
>>
php-c-benchmark
@
echo
'export CURRENT_DIR=$
$(pwd)
'
>>
php-c-benchmark
@
echo
'export CURRENT_DIR=$
$(pwd)
'
>>
php-c-benchmark
@
echo
'php -d auto_prepend_file="autoload.php" -d include_path="$
$(pwd)
" -d extension="$$PROTOBUF_PHP_EXTDIR/protobuf.so" Google/Protobuf/Benchmark/PhpBenchmark.php $$@'
>>
php-c-benchmark
@
echo
'php -d auto_prepend_file="autoload.php" -d include_path="$
$(pwd)
" -d extension="$$PROTOBUF_PHP_EXTDIR/protobuf.so" Google/Protobuf/Benchmark/PhpBenchmark.php $$@'
>>
php-c-benchmark
...
...
benchmarks/js/js_benchmark.js
View file @
cee0447f
...
@@ -18,6 +18,7 @@ function getNewPrototype(name) {
...
@@ -18,6 +18,7 @@ function getNewPrototype(name) {
}
}
var
results
=
[];
var
results
=
[];
var
json_file
=
""
;
console
.
log
(
"#####################################################"
);
console
.
log
(
"#####################################################"
);
console
.
log
(
"Js Benchmark: "
);
console
.
log
(
"Js Benchmark: "
);
...
@@ -25,6 +26,11 @@ process.argv.forEach(function(filename, index) {
...
@@ -25,6 +26,11 @@ process.argv.forEach(function(filename, index) {
if
(
index
<
2
)
{
if
(
index
<
2
)
{
return
;
return
;
}
}
if
(
filename
.
indexOf
(
"--json_output"
)
!=
-
1
)
{
json_file
=
filename
.
replace
(
/^--json_output=/
,
''
);
return
;
}
var
benchmarkDataset
=
var
benchmarkDataset
=
proto
.
benchmarks
.
BenchmarkDataset
.
deserializeBinary
(
fs
.
readFileSync
(
filename
));
proto
.
benchmarks
.
BenchmarkDataset
.
deserializeBinary
(
fs
.
readFileSync
(
filename
));
var
messageList
=
[];
var
messageList
=
[];
...
@@ -55,8 +61,8 @@ process.argv.forEach(function(filename, index) {
...
@@ -55,8 +61,8 @@ process.argv.forEach(function(filename, index) {
results
.
push
({
results
.
push
({
filename
:
filename
,
filename
:
filename
,
benchmarks
:
{
benchmarks
:
{
protobufjs_decoding
:
senarios
.
benches
[
0
]
*
totalBytes
,
protobufjs_decoding
:
senarios
.
benches
[
0
]
*
totalBytes
/
1024
/
1024
,
protobufjs_encoding
:
senarios
.
benches
[
1
]
*
totalBytes
protobufjs_encoding
:
senarios
.
benches
[
1
]
*
totalBytes
/
1024
/
1024
}
}
})
})
...
@@ -68,3 +74,9 @@ process.argv.forEach(function(filename, index) {
...
@@ -68,3 +74,9 @@ process.argv.forEach(function(filename, index) {
});
});
console
.
log
(
"#####################################################"
);
console
.
log
(
"#####################################################"
);
if
(
json_file
!=
""
)
{
fs
.
writeFile
(
json_file
,
JSON
.
stringify
(
results
),
(
err
)
=>
{
if
(
err
)
throw
err
;
});
}
benchmarks/php/PhpBenchmark.php
View file @
cee0447f
...
@@ -93,7 +93,7 @@ function getMessageName(&$dataset) {
...
@@ -93,7 +93,7 @@ function getMessageName(&$dataset) {
}
}
}
}
function
runBenchmark
(
$file
)
{
function
runBenchmark
(
$file
,
$behavior_prefix
)
{
$datafile
=
fopen
(
$file
,
"r"
)
or
die
(
"Unable to open file "
.
$file
);
$datafile
=
fopen
(
$file
,
"r"
)
or
die
(
"Unable to open file "
.
$file
);
$bytes
=
fread
(
$datafile
,
filesize
(
$file
));
$bytes
=
fread
(
$datafile
,
filesize
(
$file
));
$dataset
=
new
BenchmarkDataset
(
NULL
);
$dataset
=
new
BenchmarkDataset
(
NULL
);
...
@@ -119,8 +119,8 @@ function runBenchmark($file) {
...
@@ -119,8 +119,8 @@ function runBenchmark($file) {
return
array
(
return
array
(
"filename"
=>
$file
,
"filename"
=>
$file
,
"benchmarks"
=>
array
(
"benchmarks"
=>
array
(
"parse_php
"
=>
$parse_benchmark
->
runBenchmark
(),
$behavior_prefix
.
"_parse
"
=>
$parse_benchmark
->
runBenchmark
(),
"serailize_php
"
=>
$serialize_benchmark
->
runBenchmark
()
$behavior_prefix
.
"_serailize
"
=>
$serialize_benchmark
->
runBenchmark
()
),
),
"message_name"
=>
$dataset
->
getMessageName
()
"message_name"
=>
$dataset
->
getMessageName
()
);
);
...
@@ -129,15 +129,27 @@ function runBenchmark($file) {
...
@@ -129,15 +129,27 @@ function runBenchmark($file) {
// main
// main
$json_output
=
false
;
$json_output
=
false
;
$results
=
array
();
$results
=
array
();
$behavior_prefix
=
""
;
foreach
(
$argv
as
$index
=>
$arg
)
{
foreach
(
$argv
as
$index
=>
$arg
)
{
if
(
$index
==
0
)
{
if
(
$index
==
0
)
{
continue
;
continue
;
}
}
if
(
$arg
==
"--json"
)
{
if
(
$arg
==
"--json"
)
{
$json_output
=
true
;
$json_output
=
true
;
}
else
if
(
strpos
(
$arg
,
"--behavior_prefix"
)
==
0
)
{
$behavior_prefix
=
str_replace
(
"--behavior_prefix="
,
""
,
$arg
);
}
}
foreach
(
$argv
as
$index
=>
$arg
)
{
if
(
$index
==
0
)
{
continue
;
}
if
(
substr
(
$arg
,
0
,
2
)
==
"--"
)
{
continue
;
continue
;
}
else
{
}
else
{
array_push
(
$results
,
runBenchmark
(
$arg
));
array_push
(
$results
,
runBenchmark
(
$arg
,
$behavior_prefix
));
}
}
}
}
...
...
benchmarks/python/py_benchmark.py
View file @
cee0447f
...
@@ -44,9 +44,13 @@ def run_one_test(filename):
...
@@ -44,9 +44,13 @@ def run_one_test(filename):
data
=
open
(
filename
)
.
read
()
data
=
open
(
filename
)
.
read
()
benchmark_dataset
=
benchmarks_pb2
.
BenchmarkDataset
()
benchmark_dataset
=
benchmarks_pb2
.
BenchmarkDataset
()
benchmark_dataset
.
ParseFromString
(
data
)
benchmark_dataset
.
ParseFromString
(
data
)
total_bytes
=
0
for
payload
in
benchmark_dataset
.
payload
:
total_bytes
+=
len
(
payload
)
benchmark_util
=
Benchmark
(
full_iteration
=
len
(
benchmark_dataset
.
payload
),
benchmark_util
=
Benchmark
(
full_iteration
=
len
(
benchmark_dataset
.
payload
),
module
=
"py_benchmark"
,
module
=
"py_benchmark"
,
setup_method
=
"init"
)
setup_method
=
"init"
,
total_bytes
=
total_bytes
)
result
=
{}
result
=
{}
result
[
"filename"
]
=
filename
result
[
"filename"
]
=
filename
result
[
"message_name"
]
=
benchmark_dataset
.
message_name
result
[
"message_name"
]
=
benchmark_dataset
.
message_name
...
@@ -61,10 +65,11 @@ def run_one_test(filename):
...
@@ -61,10 +65,11 @@ def run_one_test(filename):
def
init
(
filename
):
def
init
(
filename
):
global
benchmark_dataset
,
message_class
,
message_list
,
counter
global
benchmark_dataset
,
message_class
,
message_list
,
counter
,
total_bytes
message_list
=
[]
message_list
=
[]
counter
=
0
counter
=
0
data
=
open
(
os
.
path
.
dirname
(
sys
.
argv
[
0
])
+
"/../"
+
filename
)
.
read
()
total_bytes
=
0
data
=
open
(
filename
)
.
read
()
benchmark_dataset
=
benchmarks_pb2
.
BenchmarkDataset
()
benchmark_dataset
=
benchmarks_pb2
.
BenchmarkDataset
()
benchmark_dataset
.
ParseFromString
(
data
)
benchmark_dataset
.
ParseFromString
(
data
)
...
@@ -85,6 +90,7 @@ def init(filename):
...
@@ -85,6 +90,7 @@ def init(filename):
temp
=
message_class
()
temp
=
message_class
()
temp
.
ParseFromString
(
one_payload
)
temp
.
ParseFromString
(
one_payload
)
message_list
.
append
(
temp
)
message_list
.
append
(
temp
)
total_bytes
+=
len
(
one_payload
)
def
parse_from_benchmark
():
def
parse_from_benchmark
():
...
@@ -101,11 +107,12 @@ def serialize_to_benchmark():
...
@@ -101,11 +107,12 @@ def serialize_to_benchmark():
class
Benchmark
:
class
Benchmark
:
def
__init__
(
self
,
module
=
None
,
test_method
=
None
,
def
__init__
(
self
,
module
=
None
,
test_method
=
None
,
setup_method
=
None
,
full_iteration
=
1
):
setup_method
=
None
,
total_bytes
=
None
,
full_iteration
=
1
):
self
.
full_iteration
=
full_iteration
self
.
full_iteration
=
full_iteration
self
.
module
=
module
self
.
module
=
module
self
.
test_method
=
test_method
self
.
test_method
=
test_method
self
.
setup_method
=
setup_method
self
.
setup_method
=
setup_method
self
.
total_bytes
=
total_bytes
def
set_test_method
(
self
,
test_method
):
def
set_test_method
(
self
,
test_method
):
self
.
test_method
=
test_method
self
.
test_method
=
test_method
...
@@ -130,7 +137,7 @@ class Benchmark:
...
@@ -130,7 +137,7 @@ class Benchmark:
t
=
timeit
.
timeit
(
stmt
=
"
%
s(
%
s)"
%
(
self
.
test_method
,
test_method_args
),
t
=
timeit
.
timeit
(
stmt
=
"
%
s(
%
s)"
%
(
self
.
test_method
,
test_method_args
),
setup
=
self
.
full_setup_code
(
setup_method_args
),
setup
=
self
.
full_setup_code
(
setup_method_args
),
number
=
reps
);
number
=
reps
);
return
1.0
*
t
/
reps
*
(
10
**
9
)
return
self
.
total_bytes
*
1.0
/
2
**
20
/
(
1.0
*
t
/
reps
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
@@ -144,10 +151,10 @@ if __name__ == "__main__":
...
@@ -144,10 +151,10 @@ if __name__ == "__main__":
for
result
in
results
:
for
result
in
results
:
print
(
"Message
%
s of dataset file
%
s"
%
\
print
(
"Message
%
s of dataset file
%
s"
%
\
(
result
[
"message_name"
],
result
[
"filename"
]))
(
result
[
"message_name"
],
result
[
"filename"
]))
print
(
"Average t
ime for parse_from_benchmark:
%.2
f n
s"
%
\
print
(
"Average t
hroughput for parse_from_benchmark:
%.2
f MB/
s"
%
\
(
result
[
"benchmarks"
][
\
(
result
[
"benchmarks"
][
\
args
.
behavior_prefix
+
"_parse_from_benchmark"
]))
args
.
behavior_prefix
+
"_parse_from_benchmark"
]))
print
(
"Average t
ime for serialize_to_benchmark:
%.2
f n
s"
%
\
print
(
"Average t
hroughput for serialize_to_benchmark:
%.2
f MB/
s"
%
\
(
result
[
"benchmarks"
][
\
(
result
[
"benchmarks"
][
\
args
.
behavior_prefix
+
"_serialize_to_benchmark"
]))
args
.
behavior_prefix
+
"_serialize_to_benchmark"
]))
print
(
""
)
print
(
""
)
benchmarks/util/result_parser.py
View file @
cee0447f
...
@@ -115,7 +115,6 @@ def __parse_synthetic_result(filename):
...
@@ -115,7 +115,6 @@ def __parse_synthetic_result(filename):
# behavior: results,
# behavior: results,
# ...
# ...
# },
# },
# "message_name": STRING
# },
# },
# ...
# ...
# ], #pure-python
# ], #pure-python
...
@@ -136,8 +135,7 @@ def __parse_python_result(filename):
...
@@ -136,8 +135,7 @@ def __parse_python_result(filename):
"language"
:
"python"
,
"language"
:
"python"
,
"dataFilename"
:
__extract_file_name
(
result
[
"filename"
]),
"dataFilename"
:
__extract_file_name
(
result
[
"filename"
]),
"behavior"
:
behavior
,
"behavior"
:
behavior
,
"throughput"
:
avg_size
/
"throughput"
:
result
[
"benchmarks"
][
behavior
]
result
[
"benchmarks"
][
behavior
]
*
1e9
/
2
**
20
})
})
...
@@ -220,7 +218,7 @@ def __parse_go_result(filename):
...
@@ -220,7 +218,7 @@ def __parse_go_result(filename):
continue
continue
first_slash_index
=
result_list
[
0
]
.
find
(
'/'
)
first_slash_index
=
result_list
[
0
]
.
find
(
'/'
)
last_slash_index
=
result_list
[
0
]
.
rfind
(
'/'
)
last_slash_index
=
result_list
[
0
]
.
rfind
(
'/'
)
full_filename
=
result_list
[
0
][
first_slash_index
+
4
:
last_slash_index
]
# delete ../ prefix
full_filename
=
result_list
[
0
][
first_slash_index
+
1
:
last_slash_index
]
total_bytes
,
_
=
__get_data_size
(
full_filename
)
total_bytes
,
_
=
__get_data_size
(
full_filename
)
behavior_with_suffix
=
result_list
[
0
][
last_slash_index
+
1
:]
behavior_with_suffix
=
result_list
[
0
][
last_slash_index
+
1
:]
last_dash
=
behavior_with_suffix
.
rfind
(
"-"
)
last_dash
=
behavior_with_suffix
.
rfind
(
"-"
)
...
@@ -236,11 +234,45 @@ def __parse_go_result(filename):
...
@@ -236,11 +234,45 @@ def __parse_go_result(filename):
})
})
# Node/Php results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def
__parse_js_php_result
(
filename
,
language
):
if
filename
==
""
:
return
if
filename
[
0
]
!=
'/'
:
filename
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
+
'/'
+
filename
with
open
(
filename
)
as
f
:
results
=
json
.
loads
(
f
.
read
())
for
result
in
results
:
_
,
avg_size
=
__get_data_size
(
result
[
"filename"
])
for
behavior
in
result
[
"benchmarks"
]:
__results
.
append
({
"language"
:
language
,
"dataFilename"
:
__extract_file_name
(
result
[
"filename"
]),
"behavior"
:
behavior
,
"throughput"
:
result
[
"benchmarks"
][
behavior
]
})
def
get_result_from_file
(
cpp_file
=
""
,
def
get_result_from_file
(
cpp_file
=
""
,
java_file
=
""
,
java_file
=
""
,
python_file
=
""
,
python_file
=
""
,
go_file
=
""
,
go_file
=
""
,
synthetic_file
=
""
):
synthetic_file
=
""
,
node_file
=
""
,
php_c_file
=
""
,
php_file
=
""
):
results
=
{}
results
=
{}
if
cpp_file
!=
""
:
if
cpp_file
!=
""
:
__parse_cpp_result
(
cpp_file
)
__parse_cpp_result
(
cpp_file
)
...
@@ -252,5 +284,11 @@ def get_result_from_file(cpp_file="",
...
@@ -252,5 +284,11 @@ def get_result_from_file(cpp_file="",
__parse_go_result
(
go_file
)
__parse_go_result
(
go_file
)
if
synthetic_file
!=
""
:
if
synthetic_file
!=
""
:
__parse_synthetic_result
(
synthetic_file
)
__parse_synthetic_result
(
synthetic_file
)
if
node_file
!=
""
:
__parse_js_php_result
(
node_file
,
"node"
)
if
php_file
!=
""
:
__parse_js_php_result
(
php_file
,
"php"
)
if
php_c_file
!=
""
:
__parse_js_php_result
(
php_c_file
,
"php"
)
return
__results
return
__results
benchmarks/util/result_uploader.py
View file @
cee0447f
...
@@ -59,13 +59,14 @@ def upload_result(result_list, metadata):
...
@@ -59,13 +59,14 @@ def upload_result(result_list, metadata):
labels_string
+=
",|
%
s:
%
s|"
%
(
key
,
result
[
key
])
labels_string
+=
",|
%
s:
%
s|"
%
(
key
,
result
[
key
])
new_result
[
"labels"
]
=
labels_string
[
1
:]
new_result
[
"labels"
]
=
labels_string
[
1
:]
new_result
[
"timestamp"
]
=
_INITIAL_TIME
new_result
[
"timestamp"
]
=
_INITIAL_TIME
print
(
labels_string
)
bq
=
big_query_utils
.
create_big_query
()
#
row
=
big_query_utils
.
make_row
(
str
(
uuid
.
uuid4
()),
new_result
)
# bq = big_query_utils.create_big_query()
if
not
big_query_utils
.
insert_rows
(
bq
,
_PROJECT_ID
,
_DATASET
,
# row = big_query_utils.make_row(str(uuid.uuid4()), new_result)
_TABLE
+
"$"
+
_NOW
,
# if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET,
[
row
]):
# _TABLE + "$" + _NOW,
print
(
'Error when uploading result'
,
new_result
)
# [row]):
# print('Error when uploading result', new_result)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
@@ -82,6 +83,15 @@ if __name__ == "__main__":
...
@@ -82,6 +83,15 @@ if __name__ == "__main__":
parser
.
add_argument
(
"-go"
,
"--go_input_file"
,
parser
.
add_argument
(
"-go"
,
"--go_input_file"
,
help
=
"The golang benchmark result file's name"
,
help
=
"The golang benchmark result file's name"
,
default
=
""
)
default
=
""
)
parser
.
add_argument
(
"-node"
,
"--node_input_file"
,
help
=
"The node.js benchmark result file's name"
,
default
=
""
)
parser
.
add_argument
(
"-php"
,
"--php_input_file"
,
help
=
"The pure php benchmark result file's name"
,
default
=
""
)
parser
.
add_argument
(
"-php_c"
,
"--php_c_input_file"
,
help
=
"The php with c ext benchmark result file's name"
,
default
=
""
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
metadata
=
get_metadata
()
metadata
=
get_metadata
()
...
@@ -90,5 +100,8 @@ if __name__ == "__main__":
...
@@ -90,5 +100,8 @@ if __name__ == "__main__":
cpp_file
=
args
.
cpp_input_file
,
cpp_file
=
args
.
cpp_input_file
,
java_file
=
args
.
java_input_file
,
java_file
=
args
.
java_input_file
,
python_file
=
args
.
python_input_file
,
python_file
=
args
.
python_input_file
,
go_file
=
args
.
go_input_file
go_file
=
args
.
go_input_file
,
node_file
=
args
.
node_input_file
,
php_file
=
args
.
php_input_file
,
php_c_file
=
args
.
php_c_input_file
,
),
metadata
)
),
metadata
)
\ No newline at end of file
kokoro/linux/benchmark/build.sh
View file @
cee0447f
...
@@ -19,7 +19,8 @@ fi
...
@@ -19,7 +19,8 @@ fi
# download datasets for benchmark
# download datasets for benchmark
cd
benchmarks
cd
benchmarks
./download_data.sh
./download_data.sh
datasets
=
`
find
.
-type
f
-name
"dataset.*.pb"
`
datasets
=
$(for
file
in
$(
find
.
-type
f
-name
"dataset.*.pb"
-not
-path
"./tmp/*"
)
;
do
echo
"
$(
pwd
)
/
$file
"
;
done
| xargs
)
echo
$datasets
cd
$oldpwd
cd
$oldpwd
# build Python protobuf
# build Python protobuf
...
@@ -84,9 +85,24 @@ make java-benchmark
...
@@ -84,9 +85,24 @@ make java-benchmark
echo
"benchmarking java..."
echo
"benchmarking java..."
./java-benchmark
-Cresults
.file.options.file
=
"tmp/java_result.json"
$datasets
./java-benchmark
-Cresults
.file.options.file
=
"tmp/java_result.json"
$datasets
make js-benchmark
echo
"benchmarking js..."
./js-benchmark
$datasets
--json_output
=
$(
pwd
)
/tmp/node_result.json
make
-j8
generate_proto3_data
proto3_datasets
=
$(for
file
in
$datasets
;
do
echo
$(
pwd
)
/tmp/proto3_data/
${
file
#
$(
pwd
)}
;
done
| xargs
)
echo
$proto3_datasets
# build php benchmark
make
-j8
php-benchmark
echo
"benchmarking php..."
./php-benchmark
$proto3_datasets
--json
--behavior_prefix
=
"php"
>
tmp/php_result.json
make
-j8
php-c-benchmark
echo
"benchmarking php_c..."
./php-c-benchmark
$proto3_datasets
--json
--behavior_prefix
=
"php_c"
>
tmp/php_c_result.json
# upload result to bq
# upload result to bq
make python_add_init
make python_add_init
env
LD_LIBRARY_PATH
=
"
$oldpwd
/src/.libs"
python
-m
util.result_uploader
-cpp
=
"../tmp/cpp_result.json"
-java
=
"../tmp/java_result.json"
\
env
LD_LIBRARY_PATH
=
"
$oldpwd
/src/.libs"
python
-m
util.result_uploader
-php
=
"../tmp/php_result.json"
-php_c
=
"../tmp/php_c_result.json"
\
-python
=
"../tmp/python_result.json"
-go
=
"../tmp/go_result.txt"
-cpp
=
"../tmp/cpp_result.json"
-java
=
"../tmp/java_result.json"
-go
=
"../tmp/go_result.txt"
-python
=
"../tmp/python_result.json"
-node
=
"../tmp/node_result.json"
cd
$oldpwd
cd
$oldpwd
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment