Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
bbb00715
Unverified
Commit
bbb00715
authored
Jul 17, 2019
by
aslepko
Committed by
GitHub
Jul 17, 2019
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into aslepko/ci
parents
b9429dee
f490903b
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
182 additions
and
86 deletions
+182
-86
clang_4_0_flags.cmake
cmake/clang_4_0_flags.cmake
+0
-2
shrink.cpp
src/ngraph/frontend/onnx_import/op/shrink.cpp
+18
-6
graph_util.cpp
src/ngraph/graph_util.cpp
+20
-13
max.cpp
src/ngraph/op/max.cpp
+3
-2
min.cpp
src/ngraph/op/min.cpp
+3
-2
cpu_emitter.cpp
src/ngraph/runtime/cpu/cpu_emitter.cpp
+2
-0
cpu_external_function.cpp
src/ngraph/runtime/cpu/cpu_external_function.cpp
+14
-1
cpu_memory_assignment.cpp
src/ngraph/runtime/cpu/pass/cpu_memory_assignment.cpp
+2
-1
cpu_cg_runtime_context.hpp
...h/runtime/cpu/pregenerated_src/cpu_cg_runtime_context.hpp
+54
-54
cudnn_host_parameters.hpp
src/ngraph/runtime/gpu/cudnn_host_parameters.hpp
+4
-0
benchmark.cpp
src/tools/nbench/benchmark.cpp
+4
-0
provenance.cpp
test/provenance.cpp
+58
-5
No files found.
cmake/clang_4_0_flags.cmake
View file @
bbb00715
...
...
@@ -28,8 +28,6 @@ add_compile_options(-Wno-global-constructors)
add_compile_options
(
-Wno-exit-time-destructors
)
add_compile_options
(
-Wno-missing-prototypes
)
add_compile_options
(
-Wno-missing-noreturn
)
add_compile_options
(
-Wno-switch
)
add_compile_options
(
-Wno-switch-enum
)
add_compile_options
(
-Wno-covered-switch-default
)
add_compile_options
(
-Wno-undef
)
if
(
"
${
CMAKE_CXX_COMPILER_ID
}
"
STREQUAL
"AppleClang"
)
...
...
src/ngraph/frontend/onnx_import/op/shrink.cpp
View file @
bbb00715
...
...
@@ -43,14 +43,26 @@ namespace ngraph
ASSERT_VALID_ARGUMENT
(
node
,
!
(
lambd
<
0.0
f
))
<<
" The provided 'lambd' value:"
<<
lambd
<<
" must not be negative."
;
const
auto
negative_lambd
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
(),
input
->
get_shape
(),
{
-
lambd
});
std
::
shared_ptr
<
ngraph
::
op
::
Constant
>
negative_lambd
;
const
auto
input_element_type
=
input
->
get_element_type
();
if
(
input_element_type
.
is_signed
())
{
negative_lambd
=
ngraph
::
op
::
Constant
::
create
(
input_element_type
,
input
->
get_shape
(),
{
-
lambd
});
}
else
{
// Passing -lambd to unsigned type constant will cause an overflow.
// For unsigned types the lowest possible value is 0.
negative_lambd
=
ngraph
::
op
::
Constant
::
create
(
input_element_type
,
input
->
get_shape
(),
{
0
});
}
const
auto
positive_lambd
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
()
,
input
->
get_shape
(),
{
lambd
});
input
_element_type
,
input
->
get_shape
(),
{
lambd
});
const
auto
bias_tensor
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
()
,
input
->
get_shape
(),
{
bias
});
input
_element_type
,
input
->
get_shape
(),
{
bias
});
// Create a mask indicating locations of values that need to be adjusted
// by adding and subtracting bias
...
...
@@ -63,9 +75,9 @@ namespace ngraph
// Convert from bool to the input type to be able to multiply adjusted inputs
// by the created masks
values_below_neg_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Convert
>
(
values_below_neg_lambd
,
input
->
get_element_type
()
);
values_below_neg_lambd
,
input
_element_type
);
values_above_pos_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Convert
>
(
values_above_pos_lambd
,
input
->
get_element_type
()
);
values_above_pos_lambd
,
input
_element_type
);
std
::
shared_ptr
<
ngraph
::
Node
>
input_minus_bias
=
input
-
bias_tensor
;
std
::
shared_ptr
<
ngraph
::
Node
>
input_plus_bias
=
input
+
bias_tensor
;
...
...
src/ngraph/graph_util.cpp
View file @
bbb00715
...
...
@@ -102,28 +102,28 @@ void ngraph::traverse_nodes(const NodeVector& subgraph_results,
}
}
NodeVector
ngraph
::
find_common_args
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
)
NodeVector
ngraph
::
find_common_args
(
std
::
shared_ptr
<
Node
>
node1
,
std
::
shared_ptr
<
Node
>
node2
)
{
std
::
unordered_set
<
std
::
shared_ptr
<
Node
>>
target
_args
;
std
::
unordered_set
<
std
::
shared_ptr
<
Node
>>
node1
_args
;
auto
compute_
target_args
=
[
&
target
_args
](
const
std
::
shared_ptr
<
Node
>
node
)
{
target
_args
.
insert
(
node
);
auto
compute_
node1_args
=
[
&
node1
_args
](
const
std
::
shared_ptr
<
Node
>
node
)
{
node1
_args
.
insert
(
node
);
};
traverse_nodes
({
target
},
compute_target
_args
,
false
,
NodeVector
{});
traverse_nodes
({
node1
},
compute_node1
_args
,
false
,
NodeVector
{});
std
::
unordered_set
<
std
::
shared_ptr
<
Node
>>
replacement
_args
;
std
::
unordered_set
<
std
::
shared_ptr
<
Node
>>
node2
_args
;
auto
compute_
replacement_args
=
[
&
replacement
_args
](
const
std
::
shared_ptr
<
Node
>
node
)
{
replacement
_args
.
insert
(
node
);
auto
compute_
node2_args
=
[
&
node2
_args
](
const
std
::
shared_ptr
<
Node
>
node
)
{
node2
_args
.
insert
(
node
);
};
traverse_nodes
({
replacement
},
compute_replacement
_args
,
false
,
NodeVector
{});
traverse_nodes
({
node2
},
compute_node2
_args
,
false
,
NodeVector
{});
NodeVector
common_args
;
for
(
auto
e
:
target
_args
)
for
(
auto
e
:
node1
_args
)
{
if
(
replacement
_args
.
count
(
e
)
>
0
)
if
(
node2
_args
.
count
(
e
)
>
0
)
{
common_args
.
push_back
(
e
);
}
...
...
@@ -149,12 +149,19 @@ void ngraph::replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> re
if
(
ngraph
::
get_provenance_enabled
())
{
auto
common_args
=
ngraph
::
find_common_args
(
target
,
replacement
);
auto
set_replacement_prov
=
[
replacement
](
std
::
shared_ptr
<
Node
>
node
)
{
replacement
->
merge_provenance_tags_from
(
node
);
};
traverse_nodes
(
{
target
},
set_replacement_prov
,
false
,
ngraph
::
find_common_args
(
target
,
replacement
));
traverse_nodes
({
target
},
set_replacement_prov
,
false
,
common_args
);
auto
set_prov_new_nodes
=
[
replacement
](
std
::
shared_ptr
<
Node
>
node
)
{
node
->
merge_provenance_tags_from
(
replacement
);
};
traverse_nodes
({
replacement
},
set_prov_new_nodes
,
false
,
common_args
);
}
// For each of target's output O with replacement output O_rep:
...
...
src/ngraph/op/max.cpp
View file @
bbb00715
...
...
@@ -51,9 +51,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case
element
:
:
Type_t
::
boolean
:
return
make_constant_from_string
(
"0"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
bf16
:
return
make_constant_from_string
(
"-INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
f16
:
case
element
:
:
Type_t
::
f32
:
return
make_constant_from_string
(
"-INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
f64
:
return
make_constant_from_string
(
"-INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
i8
:
...
...
@@ -80,6 +79,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case
element
:
:
Type_t
::
u64
:
return
make_constant_from_string
(
to_string
(
numeric_limits
<
uint64_t
>::
min
()),
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
undefined
:
case
element
:
:
Type_t
::
dynamic
:
default
:
throw
runtime_error
(
"Max default value not defined for type"
);
}
}
src/ngraph/op/min.cpp
View file @
bbb00715
...
...
@@ -51,9 +51,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case
element
:
:
Type_t
::
boolean
:
return
make_constant_from_string
(
"1"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
bf16
:
return
make_constant_from_string
(
"INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
f16
:
case
element
:
:
Type_t
::
f32
:
return
make_constant_from_string
(
"INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
f64
:
return
make_constant_from_string
(
"INFINITY"
,
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
i8
:
...
...
@@ -80,6 +79,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case
element
:
:
Type_t
::
u64
:
return
make_constant_from_string
(
to_string
(
numeric_limits
<
uint64_t
>::
max
()),
get_element_type
(),
get_shape
());
case
element
:
:
Type_t
::
undefined
:
case
element
:
:
Type_t
::
dynamic
:
default
:
throw
runtime_error
(
"Min default value not defined for type"
);
}
}
src/ngraph/runtime/cpu/cpu_emitter.cpp
View file @
bbb00715
...
...
@@ -3027,6 +3027,7 @@ namespace ngraph
case
ngraph
:
:
op
::
PadMode
::
REFLECT
:
pad_mode_string
=
"ngraph::op::PadMode::REFLECT"
;
break
;
case
ngraph
:
:
op
::
PadMode
::
SYMMETRIC
:
throw
ngraph_error
(
"Unsupported PadMode"
);
}
writer
<<
"reference::pad<"
<<
out
[
0
].
get_type
()
<<
">("
<<
args
[
0
].
get_name
()
<<
",
\n
"
;
...
...
@@ -3470,6 +3471,7 @@ namespace ngraph
func_block
+=
"d_"
+
out_denom
+
" = 1;
\n
"
;
}
break
;
case
ngraph
:
:
op
::
SigmoidMultiply
::
FunctionType
::
NumTypes
:
default
:
throw
ngraph_error
(
"generate_sigmoid_mul_func input function type not supported"
);
...
...
src/ngraph/runtime/cpu/cpu_external_function.cpp
View file @
bbb00715
...
...
@@ -1290,6 +1290,18 @@ static void dump_one_kernel_with_type(runtime::cpu::CPU_DebugTracer& debug_trace
t_attrs
.
m_t_shape
,
in_out
);
break
;
case
element
:
:
Type_t
::
undefined
:
case
element
:
:
Type_t
::
dynamic
:
case
element
:
:
Type_t
::
boolean
:
case
element
:
:
Type_t
::
bf16
:
case
element
:
:
Type_t
::
f16
:
case
element
:
:
Type_t
::
f64
:
case
element
:
:
Type_t
::
i16
:
case
element
:
:
Type_t
::
i64
:
case
element
:
:
Type_t
::
u16
:
case
element
:
:
Type_t
::
u32
:
case
element
:
:
Type_t
::
u64
:
default
:
break
;
}
}
...
...
@@ -1613,8 +1625,9 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
case
TensorRole
:
:
INTERMEDIATE
:
return
string
(
"TensorRole::INTERMEDIATE"
);
case
TensorRole
:
:
CONSTANT
:
return
string
(
"TensorRole::CONSTANT"
);
case
TensorRole
:
:
OUTPUT
:
return
string
(
"TensorRole::OUTPUT"
);
case
TensorRole
:
:
UNKNOWN
:
default
:
throw
runtime_error
(
"unhandled CPU tensor role"
);
}
throw
runtime_error
(
"unhandled CPU tensor role"
);
};
//dump the tensor roles to debug manifest
...
...
src/ngraph/runtime/cpu/pass/cpu_memory_assignment.cpp
View file @
bbb00715
...
...
@@ -578,8 +578,9 @@ void runtime::cpu::pass::CPUMemoryAssignment::liveness_analysis(
case
TensorRole
:
:
INTERMEDIATE
:
return
string
(
"TensorRole::INTERMEDIATE"
);
case
TensorRole
:
:
CONSTANT
:
return
string
(
"TensorRole::CONSTANT"
);
case
TensorRole
:
:
OUTPUT
:
return
string
(
"TensorRole::OUTPUT"
);
case
TensorRole
:
:
UNKNOWN
:
default
:
throw
runtime_error
(
"unhandled CPU tensor role"
);
}
throw
runtime_error
(
"unhandled CPU tensor role"
);
};
//liveness analysis
...
...
src/ngraph/runtime/cpu/pregenerated_src/cpu_cg_runtime_context.hpp
View file @
bbb00715
...
...
@@ -31,29 +31,29 @@ struct CPURuntimeContextCG
std::vector<mkldnn::primitive*> mkldnn_primitives;
std::vector<char*> mkldnn_workspaces;
std::vector<mkldnn::memory::desc*> mkldnn_descriptors;
std::vector<mkldnn::memory::desc*> mkldnn_descriptors;
mkldnn::engine global_cpu_engine = mkldnn::engine(mkldnn::engine::cpu, 0);
void set_memory_ptr(size_t primitive_index,
void set_memory_ptr(size_t primitive_index,
void* ptr)
{
auto primitive = static_cast<mkldnn::memory*>(mkldnn_primitives[primitive_index]);
primitive->set_data_handle(ptr);
}
void mkldnn_invoke_primitive(size_t primitive_index)
{
mkldnn::stream s(mkldnn::stream::kind::eager);
try
{
s.submit({*mkldnn_primitives[primitive_index]}).wait();
}
catch (const mkldnn::error& e)
{
throw std::runtime_error("Could not run mkldnn primitive " + e.message);
}
}
{
auto primitive = static_cast<mkldnn::memory*>(mkldnn_primitives[primitive_index]);
primitive->set_data_handle(ptr);
}
void mkldnn_invoke_primitive(size_t primitive_index)
{
mkldnn::stream s(mkldnn::stream::kind::eager);
try
{
s.submit({*mkldnn_primitives[primitive_index]}).wait();
}
catch (const mkldnn::error& e)
{
throw std::runtime_error("Could not run mkldnn primitive " + e.message);
}
}
private:
...
...
@@ -89,32 +89,32 @@ private:
void init_mkldnn_primitives();
inline void cleanup_mkldnn_primitives()
{
for (auto p : mkldnn_primitives)
{
delete p;
}
inline void cleanup_mkldnn_primitives()
{
for (auto p : mkldnn_primitives)
{
delete p;
}
#ifndef _WIN32
//To avoid memory leak in mkldnn, release any buffers that are not free'd yet.
//https://software.intel.com/en-us/mkl-linux-developer-guide-avoiding-memory-leaks-in-intel-mkl
//mkl_free_buffers() is not exposed at this point, hence using mkl_serv_free_buffers()
ngraph::runtime::cpu::mkldnn_utils::mkl_serv_free_buffers();
//To avoid memory leak in mkldnn, release any buffers that are not free'd yet.
//https://software.intel.com/en-us/mkl-linux-developer-guide-avoiding-memory-leaks-in-intel-mkl
//mkl_free_buffers() is not exposed at this point, hence using mkl_serv_free_buffers()
ngraph::runtime::cpu::mkldnn_utils::mkl_serv_free_buffers();
#endif
for (auto w : mkldnn_workspaces)
{
free(w);
}
}
for (auto w : mkldnn_workspaces)
{
free(w);
}
}
inline void cleanup_mkldnn_descriptors()
{
for (auto d : mkldnn_descriptors)
{
free(d);
}
}
{
for (auto d : mkldnn_descriptors)
{
free(d);
}
}
};
extern "C" CPURuntimeContextCG* init_cg_ctx()
...
...
@@ -128,23 +128,23 @@ extern "C" void destroy_cg_ctx(CPURuntimeContextCG* cg_ctx)
}
static void
deserialize_memory_descs_and_build_memory_primitives(std::ifstream& desc_file,
CPURuntimeContextCG* cg_ctx,
size_t descs_count)
deserialize_memory_descs_and_build_memory_primitives(std::ifstream& desc_file,
CPURuntimeContextCG* cg_ctx,
size_t descs_count)
{
cg_ctx->mkldnn_descriptors = std::vector<mkldnn::memory::desc*>(descs_count);
for (auto i = 0; i < descs_count; i++)
cg_ctx->mkldnn_descriptors = std::vector<mkldnn::memory::desc*>(descs_count);
for (auto i = 0; i < descs_count; i++)
{
size_t primitive_index;
desc_file >> primitive_index;
size_t primitive_index;
desc_file >> primitive_index;
auto desc = (mkldnn::memory::desc*)malloc(sizeof(mkldnn::memory::desc));
if (!desc)
{
throw std::bad_alloc();
}
if (!desc)
{
throw std::bad_alloc();
}
desc_file.read(reinterpret_cast<char*>(desc), sizeof(mkldnn::memory::desc));
cg_ctx->mkldnn_descriptors[i] = desc;
cg_ctx->mkldnn_primitives[primitive_index] = new mkldnn::memory({*cg_ctx->mkldnn_descriptors[i], cg_ctx->global_cpu_engine}, nullptr);
}
cg_ctx->mkldnn_descriptors[i] = desc;
cg_ctx->mkldnn_primitives[primitive_index] = new mkldnn::memory({*cg_ctx->mkldnn_descriptors[i], cg_ctx->global_cpu_engine}, nullptr);
}
};
)"
src/ngraph/runtime/gpu/cudnn_host_parameters.hpp
View file @
bbb00715
...
...
@@ -58,6 +58,10 @@ namespace ngraph
case
CUDNN_DATA_INT32
:
r
=
m_host_parameters
->
cache
(
static_cast
<
int32_t
>
(
value
));
break
;
case
CUDNN_DATA_HALF
:
case
CUDNN_DATA_INT8x4
:
case
CUDNN_DATA_UINT8
:
case
CUDNN_DATA_UINT8x4
:
default:
throw
std
::
runtime_error
(
"Encountered unhandled cudnnDataType_t during compilation."
);
...
...
src/tools/nbench/benchmark.cpp
View file @
bbb00715
...
...
@@ -122,6 +122,10 @@ static void random_init(shared_ptr<runtime::Tensor> tv)
case
element
:
:
Type_t
::
u16
:
init_int_tv
<
uint16_t
>
(
tv
,
0
,
1
);
break
;
case
element
:
:
Type_t
::
u32
:
init_int_tv
<
uint32_t
>
(
tv
,
0
,
1
);
break
;
case
element
:
:
Type_t
::
u64
:
init_int_tv
<
uint64_t
>
(
tv
,
0
,
1
);
break
;
case
element
:
:
Type_t
::
undefined
:
case
element
:
:
Type_t
::
dynamic
:
case
element
:
:
Type_t
::
bf16
:
case
element
:
:
Type_t
::
f16
:
default
:
throw
runtime_error
(
"unsupported type"
);
}
}
...
...
test/provenance.cpp
View file @
bbb00715
...
...
@@ -221,19 +221,70 @@ TEST(provenance, provenance)
// Replacement:
//
// A{tag_a} B{tag_b}
// |
|
//
E{tag_e}
|
//
|
|
//
C -> D{tag_d}
// | |
//
E{}
|
//
|
|
// C -> D{tag_d}
//
//
// After:
//
// A{tag_a} B{tag_b}
// | |
// E{tag_c, tag_d} |
// | |
// D{tag_c, tag_d}
//
// Comment:
// * D is the replacement root replacing C and creating a new argument node E
//
{
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
PartialShape
{
2
,
3
,
4
});
auto
y
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
PartialShape
{
2
,
3
,
4
});
auto
a
=
make_shared
<
op
::
Add
>
(
x
,
y
);
a
->
add_provenance_tag
(
"tag_a"
);
auto
b
=
make_shared
<
op
::
Multiply
>
(
y
,
x
);
b
->
add_provenance_tag
(
"tag_b"
);
auto
c
=
make_shared
<
op
::
Subtract
>
(
a
,
b
);
c
->
add_provenance_tag
(
"tag_c"
);
auto
f
=
make_shared
<
Function
>
(
c
,
ParameterVector
{
x
,
y
});
auto
e
=
make_shared
<
op
::
Subtract
>
(
a
,
x
);
auto
d
=
make_shared
<
op
::
Subtract
>
(
e
,
b
);
d
->
add_provenance_tag
(
"tag_d"
);
replace_node
(
c
,
d
);
EXPECT_EQ
(
d
->
get_provenance_tags
(),
(
ProvSet
{
"tag_c"
,
"tag_d"
}));
EXPECT_EQ
(
e
->
get_provenance_tags
(),
(
ProvSet
{
"tag_c"
,
"tag_d"
}));
}
//
// Before:
//
// A{tag_a} B{tag_b}
// | |
// C{tag_c}
//
//
// Replacement:
//
// A{tag_a} B{tag_b}
// | |
// E{tag_e} |
// | |
// D{tag_c, tag_d}
// C -> D{tag_d}
//
//
// After:
//
// A{tag_a} B{tag_b}
// \ /
// E{tag_c, tag_d, tag_e} /
// \ /
// D{tag_c, tag_d}
//
// Comment:
// * D is the replacement root replacing C and creating a new argument node E
...
...
@@ -252,11 +303,13 @@ TEST(provenance, provenance)
auto
f
=
make_shared
<
Function
>
(
c
,
ParameterVector
{
x
,
y
});
auto
e
=
make_shared
<
op
::
Subtract
>
(
a
,
x
);
e
->
add_provenance_tag
(
"tag_e"
);
auto
d
=
make_shared
<
op
::
Subtract
>
(
e
,
b
);
d
->
add_provenance_tag
(
"tag_d"
);
replace_node
(
c
,
d
);
EXPECT_EQ
(
d
->
get_provenance_tags
(),
(
ProvSet
{
"tag_c"
,
"tag_d"
}));
EXPECT_EQ
(
e
->
get_provenance_tags
(),
(
ProvSet
{
"tag_c"
,
"tag_d"
,
"tag_e"
}));
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment