Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
c7b51d2d
Commit
c7b51d2d
authored
Oct 04, 2017
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
apply new .clang-format
parent
158de495
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
103 changed files
with
245 additions
and
245 deletions
+245
-245
buffer.hpp
src/ngraph/descriptor/buffer.hpp
+0
-1
buffer_pos.hpp
src/ngraph/descriptor/buffer_pos.hpp
+0
-1
dense_tensor_view_layout.cpp
src/ngraph/descriptor/layout/dense_tensor_view_layout.cpp
+0
-0
dense_tensor_view_layout.hpp
src/ngraph/descriptor/layout/dense_tensor_view_layout.hpp
+0
-1
tensor_view_layout.hpp
src/ngraph/descriptor/layout/tensor_view_layout.hpp
+5
-4
primary_tensor_view.cpp
src/ngraph/descriptor/primary_tensor_view.cpp
+0
-0
primary_tensor_view.hpp
src/ngraph/descriptor/primary_tensor_view.hpp
+0
-0
tensor.cpp
src/ngraph/descriptor/tensor.cpp
+0
-0
tensor.hpp
src/ngraph/descriptor/tensor.hpp
+0
-0
tensor_view.hpp
src/ngraph/descriptor/tensor_view.hpp
+0
-1
tuple.cpp
src/ngraph/descriptor/tuple.cpp
+0
-0
tuple.hpp
src/ngraph/descriptor/tuple.hpp
+0
-0
value.hpp
src/ngraph/descriptor/value.hpp
+0
-0
function.cpp
src/ngraph/function.cpp
+0
-0
function.hpp
src/ngraph/function.hpp
+2
-5
log.cpp
src/ngraph/log.cpp
+0
-0
log.hpp
src/ngraph/log.hpp
+0
-0
node.cpp
src/ngraph/node.cpp
+3
-1
node.hpp
src/ngraph/node.hpp
+0
-4
binary_elementwise_arithmetic.cpp
src/ngraph/ops/binary_elementwise_arithmetic.cpp
+1
-2
binary_elementwise_builtin.cpp
src/ngraph/ops/binary_elementwise_builtin.cpp
+5
-7
binary_elementwise_comparison.cpp
src/ngraph/ops/binary_elementwise_comparison.cpp
+1
-2
broadcast.cpp
src/ngraph/ops/broadcast.cpp
+4
-2
broadcast.hpp
src/ngraph/ops/broadcast.hpp
+0
-1
concatenate.cpp
src/ngraph/ops/concatenate.cpp
+4
-3
concatenate.hpp
src/ngraph/ops/concatenate.hpp
+1
-2
constant.cpp
src/ngraph/ops/constant.cpp
+6
-3
constant.hpp
src/ngraph/ops/constant.hpp
+6
-7
convert.hpp
src/ngraph/ops/convert.hpp
+0
-0
dot.cpp
src/ngraph/ops/dot.cpp
+6
-5
dot.hpp
src/ngraph/ops/dot.hpp
+0
-0
function_call.hpp
src/ngraph/ops/function_call.hpp
+0
-1
get_tuple_element.cpp
src/ngraph/ops/get_tuple_element.cpp
+2
-1
get_tuple_element.hpp
src/ngraph/ops/get_tuple_element.hpp
+0
-2
op.hpp
src/ngraph/ops/op.hpp
+21
-17
parameter.cpp
src/ngraph/ops/parameter.cpp
+3
-1
parameter.hpp
src/ngraph/ops/parameter.hpp
+1
-1
reduce.cpp
src/ngraph/ops/reduce.cpp
+6
-3
reduce.hpp
src/ngraph/ops/reduce.hpp
+5
-3
select.cpp
src/ngraph/ops/select.cpp
+3
-4
select.hpp
src/ngraph/ops/select.hpp
+0
-0
tuple.hpp
src/ngraph/ops/tuple.hpp
+0
-0
unary_elementwise_arithmetic.cpp
src/ngraph/ops/unary_elementwise_arithmetic.cpp
+2
-2
unary_elementwise_builtin.cpp
src/ngraph/ops/unary_elementwise_builtin.cpp
+2
-2
assign_tensors.cpp
src/ngraph/pass/assign_tensors.cpp
+1
-1
dump_sorted.cpp
src/ngraph/pass/dump_sorted.cpp
+1
-2
dump_sorted.hpp
src/ngraph/pass/dump_sorted.hpp
+0
-0
liveness.cpp
src/ngraph/pass/liveness.cpp
+4
-8
liveness.hpp
src/ngraph/pass/liveness.hpp
+1
-1
manager.cpp
src/ngraph/pass/manager.cpp
+2
-2
manager.hpp
src/ngraph/pass/manager.hpp
+3
-3
memory_layout.cpp
src/ngraph/pass/memory_layout.cpp
+7
-12
memory_layout.hpp
src/ngraph/pass/memory_layout.hpp
+1
-5
memory_visualize.cpp
src/ngraph/pass/memory_visualize.cpp
+10
-7
memory_visualize.hpp
src/ngraph/pass/memory_visualize.hpp
+1
-1
pass.hpp
src/ngraph/pass/pass.hpp
+1
-0
visualize_tree.cpp
src/ngraph/pass/visualize_tree.cpp
+2
-3
visualize_tree.hpp
src/ngraph/pass/visualize_tree.hpp
+1
-1
call_frame.cpp
src/ngraph/runtime/call_frame.cpp
+0
-0
call_frame.hpp
src/ngraph/runtime/call_frame.hpp
+1
-5
abs.hpp
src/ngraph/runtime/eigen/abs.hpp
+2
-1
broadcast_scalar.hpp
src/ngraph/runtime/eigen/broadcast_scalar.hpp
+2
-3
call.hpp
src/ngraph/runtime/eigen/call.hpp
+5
-3
concat_matrix.hpp
src/ngraph/runtime/eigen/concat_matrix.hpp
+0
-0
concat_vector.hpp
src/ngraph/runtime/eigen/concat_vector.hpp
+4
-2
constant.hpp
src/ngraph/runtime/eigen/constant.hpp
+4
-2
dot.hpp
src/ngraph/runtime/eigen/dot.hpp
+3
-2
less_than.hpp
src/ngraph/runtime/eigen/less_than.hpp
+0
-0
log.hpp
src/ngraph/runtime/eigen/log.hpp
+2
-1
return.hpp
src/ngraph/runtime/eigen/return.hpp
+0
-1
scalar_tensor_product.hpp
src/ngraph/runtime/eigen/scalar_tensor_product.hpp
+2
-2
utils.hpp
src/ngraph/runtime/eigen/utils.hpp
+2
-1
external_function.cpp
src/ngraph/runtime/external_function.cpp
+19
-18
external_function.hpp
src/ngraph/runtime/external_function.hpp
+2
-2
parameterized_tensor_view.hpp
src/ngraph/runtime/parameterized_tensor_view.hpp
+0
-1
tensor_view.hpp
src/ngraph/runtime/tensor_view.hpp
+0
-3
tensor_view_info.hpp
src/ngraph/runtime/tensor_view_info.hpp
+0
-1
tuple.cpp
src/ngraph/runtime/tuple.cpp
+0
-0
tuple.hpp
src/ngraph/runtime/tuple.hpp
+1
-2
value.hpp
src/ngraph/runtime/value.hpp
+0
-1
shape.cpp
src/ngraph/shape.cpp
+0
-0
element_type.cpp
src/ngraph/types/element_type.cpp
+1
-1
element_type.hpp
src/ngraph/types/element_type.hpp
+0
-0
type.cpp
src/ngraph/types/type.cpp
+6
-4
type.hpp
src/ngraph/types/type.hpp
+5
-6
util.cpp
src/ngraph/util.cpp
+10
-11
util.hpp
src/ngraph/util.hpp
+1
-1
uuid.hpp
src/ngraph/uuid.hpp
+0
-0
visualize.cpp
src/ngraph/visualize.cpp
+1
-1
visualize.hpp
src/ngraph/visualize.hpp
+0
-0
build_graph.cpp
test/build_graph.cpp
+7
-3
execute.cpp
test/execute.cpp
+0
-0
input_output_assign.cpp
test/input_output_assign.cpp
+0
-0
main.cpp
test/main.cpp
+0
-0
mkldnn.cpp
test/mkldnn.cpp
+25
-12
pass_liveness.cpp
test/pass_liveness.cpp
+5
-7
pass_manager.cpp
test/pass_manager.cpp
+0
-0
pass_memory_layout.cpp
test/pass_memory_layout.cpp
+4
-4
tensor.cpp
test/tensor.cpp
+3
-3
test_tools.cpp
test/test_tools.cpp
+4
-6
topological_sort.cpp
test/topological_sort.cpp
+0
-0
type_prop.cpp
test/type_prop.cpp
+0
-0
util.cpp
test/util.cpp
+0
-0
No files found.
src/ngraph/descriptor/buffer.hpp
View file @
c7b51d2d
...
...
@@ -27,7 +27,6 @@ namespace ngraph
{
public
:
size_t
size
()
const
{
return
m_size
;
}
protected
:
size_t
m_size
;
};
...
...
src/ngraph/descriptor/buffer_pos.hpp
View file @
c7b51d2d
...
...
@@ -29,7 +29,6 @@ namespace ngraph
{
public
:
BufferPos
()
{}
BufferPos
(
std
::
shared_ptr
<
Buffer
>
buffer
,
size_t
offset
,
size_t
size
)
:
m_buffer
(
buffer
)
,
m_offset
(
offset
)
...
...
src/ngraph/descriptor/layout/dense_tensor_view_layout.cpp
View file @
c7b51d2d
src/ngraph/descriptor/layout/dense_tensor_view_layout.hpp
View file @
c7b51d2d
...
...
@@ -40,7 +40,6 @@ namespace ngraph
virtual
size_t
get_index_offset
(
const
std
::
vector
<
size_t
>&
indices
)
override
;
const
Strides
&
get_strides
()
const
{
return
m_strides
;
}
protected
:
Strides
m_strides
;
size_t
m_offset
;
...
...
src/ngraph/descriptor/layout/tensor_view_layout.hpp
View file @
c7b51d2d
...
...
@@ -17,8 +17,8 @@
#include <tuple>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/buffer_pos.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
namespace
ngraph
{
...
...
@@ -41,7 +41,6 @@ namespace ngraph
public
:
virtual
~
TensorViewLayout
()
{}
/// Extent of this view in buffer.
///
/// When we support non-linear buffers, this will need to be something other than size_t.
...
...
@@ -52,12 +51,14 @@ namespace ngraph
/// With non-linear buffers, this will need to be something other than size_t.
virtual
size_t
get_index_offset
(
const
std
::
vector
<
size_t
>&
indices
)
=
0
;
const
Shape
&
get_shape
()
const
{
return
m_tensor_view
.
get_tensor_view_type
()
->
get_shape
();
}
const
Shape
&
get_shape
()
const
{
return
m_tensor_view
.
get_tensor_view_type
()
->
get_shape
();
}
/// Where this view is located in the buffer.
const
BufferPos
&
get_buffer_pos
()
const
{
return
m_buffer_pos
;
}
BufferPos
&
get_buffer_pos
()
{
return
m_buffer_pos
;
}
protected
:
const
ngraph
::
descriptor
::
TensorView
&
m_tensor_view
;
BufferPos
m_buffer_pos
;
...
...
src/ngraph/descriptor/primary_tensor_view.cpp
View file @
c7b51d2d
src/ngraph/descriptor/primary_tensor_view.hpp
View file @
c7b51d2d
src/ngraph/descriptor/tensor.cpp
View file @
c7b51d2d
src/ngraph/descriptor/tensor.hpp
View file @
c7b51d2d
src/ngraph/descriptor/tensor_view.hpp
View file @
c7b51d2d
...
...
@@ -57,7 +57,6 @@ namespace ngraph
}
const
std
::
string
&
get_name
()
const
{
return
m_name
;
}
std
::
shared_ptr
<
const
TensorViewType
>
get_tensor_view_type
()
const
{
return
m_tensor_view_type
;
...
...
src/ngraph/descriptor/tuple.cpp
View file @
c7b51d2d
src/ngraph/descriptor/tuple.hpp
View file @
c7b51d2d
src/ngraph/descriptor/value.hpp
View file @
c7b51d2d
src/ngraph/function.cpp
View file @
c7b51d2d
src/ngraph/function.hpp
View file @
c7b51d2d
...
...
@@ -16,8 +16,8 @@
#include <initializer_list>
#include <memory>
#include <vector>
#include <string>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/node.hpp"
...
...
@@ -41,10 +41,7 @@ namespace ngraph
{
return
m_parameters
;
}
const
std
::
shared_ptr
<
ValueType
>
get_result_type
()
const
{
return
m_result_type
;
}
const
std
::
shared_ptr
<
ValueType
>
get_result_type
()
const
{
return
m_result_type
;
}
std
::
string
get_name
()
const
{
return
m_name
;
}
protected
:
std
::
shared_ptr
<
Node
>
m_result
;
...
...
src/ngraph/log.cpp
View file @
c7b51d2d
src/ngraph/log.hpp
View file @
c7b51d2d
src/ngraph/node.cpp
View file @
c7b51d2d
...
...
@@ -32,7 +32,9 @@ Node::Node(const std::vector<shared_ptr<Node>>& arguments, shared_ptr<ValueType>
}
}
Node
::~
Node
()
{}
Node
::~
Node
()
{
}
void
Node
::
set_value_type_checked
(
const
shared_ptr
<
const
ValueType
>&
value_type
)
{
...
...
src/ngraph/node.hpp
View file @
c7b51d2d
...
...
@@ -65,9 +65,7 @@ namespace ngraph
const
Nodes
&
get_arguments
()
const
{
return
m_arguments
;
}
void
clear_arguments
()
{
m_arguments
.
clear
();
}
const
std
::
multiset
<
Node
*>&
users
()
const
{
return
m_users
;
}
virtual
std
::
string
get_node_id
()
const
;
/// Return true if this has the same implementing class as node. This
...
...
@@ -80,7 +78,6 @@ namespace ngraph
std
::
shared_ptr
<
const
ValueType
>
get_value_type
()
{
return
m_value_type
;
}
const
std
::
shared_ptr
<
const
ValueType
>
get_value_type
()
const
{
return
m_value_type
;
}
void
set_value_type
(
const
element
::
Type
&
element_type
,
const
Shape
&
shape
)
{
m_value_type
=
std
::
make_shared
<
TensorViewType
>
(
element_type
,
shape
);
...
...
@@ -108,7 +105,6 @@ namespace ngraph
const
std
::
vector
<
descriptor
::
Input
>&
get_inputs
()
const
{
return
m_inputs
;
}
std
::
vector
<
descriptor
::
Output
>&
get_outputs
()
{
return
m_outputs
;
}
const
std
::
vector
<
descriptor
::
Output
>&
get_outputs
()
const
{
return
m_outputs
;
}
std
::
unordered_set
<
descriptor
::
Tensor
*>
liveness_live_list
;
std
::
unordered_set
<
descriptor
::
Tensor
*>
liveness_new_list
;
std
::
unordered_set
<
descriptor
::
Tensor
*>
liveness_free_list
;
...
...
src/ngraph/ops/binary_elementwise_arithmetic.cpp
View file @
c7b51d2d
...
...
@@ -19,8 +19,7 @@ using namespace ngraph;
using
namespace
ngraph
::
op
;
const
element
::
Type
&
BinaryElementwiseArithmetic
::
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
{
if
(
arg0_element_type
!=
arg1_element_type
)
{
...
...
src/ngraph/ops/binary_elementwise_builtin.cpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -41,11 +41,9 @@ void BinaryElementwiseBuiltin::propagate_types()
throw
ngraph_error
(
"Arguments must have the same tensor view shape"
);
}
const
element
::
Type
&
result_element_type
=
propagate_element_types
(
arg0_tensor_type
->
get_element_type
(),
arg1_tensor_type
->
get_element_type
());
const
element
::
Type
&
result_element_type
=
propagate_element_types
(
arg0_tensor_type
->
get_element_type
(),
arg1_tensor_type
->
get_element_type
());
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
result_element_type
,
arg0_tensor_type
->
get_shape
()));
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
result_element_type
,
arg0_tensor_type
->
get_shape
()));
}
src/ngraph/ops/binary_elementwise_comparison.cpp
View file @
c7b51d2d
...
...
@@ -19,8 +19,7 @@ using namespace ngraph;
using
namespace
ngraph
::
op
;
const
element
::
Type
&
BinaryElementwiseComparison
::
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
{
if
(
arg0_element_type
!=
arg1_element_type
)
{
...
...
src/ngraph/ops/broadcast.cpp
View file @
c7b51d2d
...
...
@@ -19,7 +19,8 @@ using namespace ngraph::op;
void
Broadcast
::
propagate_types
()
{
if
(
m_arguments
.
size
()
!=
1
){
if
(
m_arguments
.
size
()
!=
1
)
{
throw
ngraph_error
(
"Wrong number of arguments."
);
}
...
...
@@ -42,5 +43,6 @@ void Broadcast::propagate_types()
{
throw
ngraph_error
(
"Broadcast arg, shape, and axes are incompatible"
);
}
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
arg_tensor_view_type
->
get_element_type
(),
m_shape
));
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
arg_tensor_view_type
->
get_element_type
(),
m_shape
));
}
src/ngraph/ops/broadcast.hpp
View file @
c7b51d2d
...
...
@@ -40,7 +40,6 @@ namespace ngraph
virtual
void
propagate_types
()
override
;
const
AxisSet
&
get_broadcast_axes
()
const
{
return
m_broadcast_axes
;
}
protected
:
Shape
m_shape
;
AxisSet
m_broadcast_axes
;
...
...
src/ngraph/ops/concatenate.cpp
View file @
c7b51d2d
...
...
@@ -47,7 +47,7 @@ void Concat::propagate_types()
size_t
concatenation_axis_length
=
arg0_shape
.
at
(
m_concatenation_axis
);
auto
&
arg0_element_type
=
arg0_tensor_view_type
->
get_element_type
();
for
(
auto
i
=
1
;
i
<
m_arguments
.
size
();
i
++
)
for
(
auto
i
=
1
;
i
<
m_arguments
.
size
();
i
++
)
{
auto
argi_type
=
m_arguments
.
at
(
i
)
->
get_value_type
();
if
(
nullptr
==
argi_type
)
...
...
@@ -72,11 +72,12 @@ void Concat::propagate_types()
throw
ngraph_error
(
"Argument element types do not match"
);
}
for
(
auto
j
=
0
;
j
<
argi_shape
.
size
();
j
++
)
for
(
auto
j
=
0
;
j
<
argi_shape
.
size
();
j
++
)
{
if
(
j
!=
m_concatenation_axis
&&
arg0_shape
.
at
(
j
)
!=
argi_shape
.
at
(
j
))
{
throw
ngraph_error
(
"Arguments to concat do not have same dimension on a non-concatenation axis"
);
throw
ngraph_error
(
"Arguments to concat do not have same dimension on a non-concatenation axis"
);
}
else
if
(
j
==
m_concatenation_axis
)
{
...
...
src/ngraph/ops/concatenate.hpp
View file @
c7b51d2d
...
...
@@ -30,7 +30,7 @@ namespace ngraph
///
/// Example: n0 has shape {2,4,2}, and n1 has shape {2,5,2}. Then the output of
/// Concat(Nodes{n0,n1},1) will have shape {2,9,2}.
Concat
(
const
Nodes
&
args
,
size_t
concatenation_axis
)
Concat
(
const
Nodes
&
args
,
size_t
concatenation_axis
)
:
Builtin
(
args
)
,
m_concatenation_axis
(
concatenation_axis
)
{
...
...
@@ -40,7 +40,6 @@ namespace ngraph
virtual
void
propagate_types
()
override
;
size_t
get_concatenation_axis
()
const
{
return
m_concatenation_axis
;
}
protected
:
const
size_t
m_concatenation_axis
;
};
...
...
src/ngraph/ops/constant.cpp
View file @
c7b51d2d
...
...
@@ -16,7 +16,10 @@
using
namespace
ngraph
::
op
;
void
ScalarConstantBase
::
propagate_types
()
{}
void
TensorConstantBase
::
propagate_types
()
{
}
void
ScalarConstantBase
::
propagate_types
()
{
}
void
TensorConstantBase
::
propagate_types
()
{
}
src/ngraph/ops/constant.hpp
View file @
c7b51d2d
...
...
@@ -16,8 +16,8 @@
#include <sstream>
#include "ngraph/types/element_type.hpp"
#include "ngraph/runtime/utils.hpp"
#include "ngraph/types/element_type.hpp"
namespace
ngraph
{
...
...
@@ -60,11 +60,7 @@ namespace ngraph
return
ss
.
str
();
}
type
get_value
()
const
{
return
m_value
;
}
type
get_value
()
const
{
return
m_value
;
}
protected
:
typename
T
::
type
m_value
;
};
...
...
@@ -113,7 +109,10 @@ namespace ngraph
return
ss
.
str
();
}
typename
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
get_value
()
const
{
return
m_value
;
}
typename
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
get_value
()
const
{
return
m_value
;
}
protected
:
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
m_value
;
...
...
src/ngraph/ops/convert.hpp
View file @
c7b51d2d
src/ngraph/ops/dot.cpp
View file @
c7b51d2d
...
...
@@ -56,22 +56,23 @@ void Dot::propagate_types()
vector
<
size_t
>
result_shape
;
result_shape
.
reserve
(
arg0_shape
.
size
()
+
arg1_shape
.
size
()
-
(
is_scalar_mult
?
0
:
2
));
for
(
auto
i
=
0
;
i
<
arg0_shape
.
size
();
i
++
)
for
(
auto
i
=
0
;
i
<
arg0_shape
.
size
();
i
++
)
{
if
(
is_scalar_mult
||
i
!=
arg0_reduction
)
if
(
is_scalar_mult
||
i
!=
arg0_reduction
)
{
result_shape
.
push_back
(
arg0_shape
[
i
]);
}
}
for
(
auto
i
=
0
;
i
<
arg1_shape
.
size
();
i
++
)
for
(
auto
i
=
0
;
i
<
arg1_shape
.
size
();
i
++
)
{
if
(
is_scalar_mult
||
i
!=
arg1_reduction
)
if
(
is_scalar_mult
||
i
!=
arg1_reduction
)
{
result_shape
.
push_back
(
arg1_shape
[
i
]);
}
}
auto
result_type
=
make_shared
<
TensorViewType
>
(
arg0_tensor_type
->
get_element_type
(),
result_shape
);
auto
result_type
=
make_shared
<
TensorViewType
>
(
arg0_tensor_type
->
get_element_type
(),
result_shape
);
set_value_type_checked
(
result_type
);
}
src/ngraph/ops/dot.hpp
View file @
c7b51d2d
src/ngraph/ops/function_call.hpp
View file @
c7b51d2d
...
...
@@ -39,7 +39,6 @@ namespace ngraph
virtual
void
propagate_types
()
override
;
std
::
shared_ptr
<
Function
>
get_function
()
const
{
return
m_function
;
}
protected
:
std
::
shared_ptr
<
Function
>
m_function
;
};
...
...
src/ngraph/ops/get_tuple_element.cpp
View file @
c7b51d2d
...
...
@@ -33,7 +33,8 @@ void GetTupleElement::propagate_types()
throw
ngraph_error
(
"Argument must be a tuple view"
);
}
if
(
m_n
>=
arg0_tuple_type
->
get_element_types
().
size
()){
if
(
m_n
>=
arg0_tuple_type
->
get_element_types
().
size
())
{
throw
ngraph_error
(
"Indexing tuple beyond its size"
);
}
...
...
src/ngraph/ops/get_tuple_element.hpp
View file @
c7b51d2d
...
...
@@ -33,9 +33,7 @@ namespace ngraph
virtual
void
propagate_types
()
override
;
virtual
std
::
string
description
()
const
override
{
return
"GetTupleElement"
;
}
size_t
get_n
()
const
{
return
m_n
;
}
protected
:
size_t
m_n
;
};
...
...
src/ngraph/ops/op.hpp
View file @
c7b51d2d
...
...
@@ -31,7 +31,6 @@ namespace ngraph
{
public
:
virtual
std
::
string
description
()
const
override
{
return
"Builtin"
;
}
protected
:
Builtin
(
const
std
::
vector
<
std
::
shared_ptr
<
Node
>>&
args
)
:
Node
(
args
)
...
...
@@ -73,8 +72,8 @@ namespace ngraph
:
Builtin
(
Nodes
{
arg
})
{
}
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
=
0
;
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
=
0
;
public
:
virtual
void
propagate_types
()
override
;
...
...
@@ -87,8 +86,8 @@ namespace ngraph
:
UnaryElementwiseBuiltin
({
arg
})
{
}
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
final
override
;
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
final
override
;
};
/// Op(X, Y)[I] = op(X[I], Y[I])
...
...
@@ -100,8 +99,8 @@ namespace ngraph
:
Builtin
(
Nodes
{
arg0
,
arg1
})
{
}
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
=
0
;
public
:
...
...
@@ -111,34 +110,39 @@ namespace ngraph
class
BinaryElementwiseComparison
:
public
BinaryElementwiseBuiltin
{
public
:
BinaryElementwiseComparison
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
)
BinaryElementwiseComparison
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
)
:
BinaryElementwiseBuiltin
(
arg0
,
arg1
)
{
}
virtual
std
::
string
description
()
const
override
{
return
"BinaryElementwiseComparison"
;
}
virtual
std
::
string
description
()
const
override
{
return
"BinaryElementwiseComparison"
;
}
//virtual void propagate_types() override;
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
override
;
};
class
BinaryElementwiseArithmetic
:
public
BinaryElementwiseBuiltin
{
public
:
BinaryElementwiseArithmetic
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
)
BinaryElementwiseArithmetic
(
const
std
::
shared_ptr
<
Node
>&
arg0
,
const
std
::
shared_ptr
<
Node
>&
arg1
)
:
BinaryElementwiseBuiltin
(
arg0
,
arg1
)
{
}
virtual
std
::
string
description
()
const
override
{
return
"BinaryElementwiseArithmetic"
;
}
virtual
std
::
string
description
()
const
override
{
return
"BinaryElementwiseArithmetic"
;
}
//virtual void propagate_types() override;
virtual
const
element
::
Type
&
propagate_element_types
(
const
element
::
Type
&
arg0_element_type
,
const
element
::
Type
&
arg1_element_type
)
const
final
override
;
const
element
::
Type
&
arg1_element_type
)
const
final
override
;
};
}
}
src/ngraph/ops/parameter.cpp
View file @
c7b51d2d
...
...
@@ -41,4 +41,6 @@ void Parameter::assign_function(Function* function, size_t index)
m_index
=
index
;
}
void
Parameter
::
propagate_types
()
{}
void
Parameter
::
propagate_types
()
{
}
src/ngraph/ops/parameter.hpp
View file @
c7b51d2d
...
...
@@ -37,7 +37,7 @@ namespace ngraph
void
assign_function
(
Function
*
function
,
size_t
index
);
public
:
Parameter
(
const
std
::
shared_ptr
<
ValueType
>&
value_type
=
nullptr
);
Parameter
(
const
std
::
shared_ptr
<
ValueType
>&
value_type
=
nullptr
);
Parameter
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
);
std
::
string
description
()
const
override
{
return
"Parameter"
;
}
...
...
src/ngraph/ops/reduce.cpp
View file @
c7b51d2d
...
...
@@ -30,7 +30,8 @@ void Reduce::propagate_types()
{
throw
ngraph_error
(
"Argument to reduce is missing type."
);
}
auto
arg_reductee_tensor_view_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
arg_reductee_type
);
auto
arg_reductee_tensor_view_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
arg_reductee_type
);
if
(
nullptr
==
arg_reductee_tensor_view_type
)
{
throw
ngraph_error
(
"Argument to reduce is not a tensor view"
);
...
...
@@ -51,7 +52,8 @@ void Reduce::propagate_types()
throw
ngraph_error
(
"Argument for initial value is not a scalar"
);
}
if
(
arg_init_tensor_view_type
->
get_element_type
()
!=
arg_reductee_tensor_view_type
->
get_element_type
())
if
(
arg_init_tensor_view_type
->
get_element_type
()
!=
arg_reductee_tensor_view_type
->
get_element_type
())
{
throw
ngraph_error
(
"Element types for reductee and initial values do not match"
);
}
...
...
@@ -99,5 +101,6 @@ void Reduce::propagate_types()
throw
ngraph_error
(
"Return type from reduction function does not match expected"
);
}
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
arg_reductee_tensor_view_type
->
get_element_type
(),
result_shape
));
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
arg_reductee_tensor_view_type
->
get_element_type
(),
result_shape
));
}
src/ngraph/ops/reduce.hpp
View file @
c7b51d2d
...
...
@@ -31,7 +31,7 @@ namespace ngraph
const
std
::
shared_ptr
<
Node
>&
arg_init
,
const
std
::
shared_ptr
<
Function
>&
reduction_function
,
const
AxisSet
&
reduction_axes
)
:
Builtin
({
arg_reductee
,
arg_init
})
:
Builtin
({
arg_reductee
,
arg_init
})
,
m_reduction_function
(
reduction_function
)
,
m_reduction_axes
(
reduction_axes
)
{
...
...
@@ -40,9 +40,11 @@ namespace ngraph
virtual
std
::
string
description
()
const
override
{
return
"Reduce"
;
}
virtual
void
propagate_types
()
override
;
std
::
shared_ptr
<
Function
>
get_reduction_function
()
const
{
return
m_reduction_function
;
}
std
::
shared_ptr
<
Function
>
get_reduction_function
()
const
{
return
m_reduction_function
;
}
const
AxisSet
&
get_reduction_axes
()
const
{
return
m_reduction_axes
;
}
protected
:
std
::
shared_ptr
<
Function
>
m_reduction_function
;
AxisSet
m_reduction_axes
;
...
...
src/ngraph/ops/select.cpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -42,8 +42,8 @@ void Select::propagate_types()
{
throw
ngraph_error
(
"Argument 0 for arithmetic operators must have boolean element type"
);
}
if
(
arg0_tensor_type
->
get_shape
()
!=
arg1_tensor_type
->
get_shape
()
||
arg0_tensor_type
->
get_shape
()
!=
arg2_tensor_type
->
get_shape
())
if
(
arg0_tensor_type
->
get_shape
()
!=
arg1_tensor_type
->
get_shape
()
||
arg0_tensor_type
->
get_shape
()
!=
arg2_tensor_type
->
get_shape
())
{
throw
ngraph_error
(
"Arguments must have the same tensor view shape"
);
}
...
...
@@ -54,4 +54,3 @@ void Select::propagate_types()
set_value_type_checked
(
arg1_tensor_type
);
}
src/ngraph/ops/select.hpp
View file @
c7b51d2d
src/ngraph/ops/tuple.hpp
View file @
c7b51d2d
src/ngraph/ops/unary_elementwise_arithmetic.cpp
View file @
c7b51d2d
...
...
@@ -20,8 +20,8 @@ using namespace std;
using
namespace
ngraph
;
using
namespace
ngraph
::
op
;
const
element
::
Type
&
UnaryElementwiseArithmetic
::
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
const
element
::
Type
&
UnaryElementwiseArithmetic
::
propagate_element_types
(
const
element
::
Type
&
arg_element_type
)
const
{
if
(
arg_element_type
==
element
::
Bool
::
element_type
())
{
...
...
src/ngraph/ops/unary_elementwise_builtin.cpp
View file @
c7b51d2d
...
...
@@ -37,6 +37,6 @@ void UnaryElementwiseBuiltin::propagate_types()
const
element
::
Type
&
result_element_type
=
propagate_element_types
(
arg_tensor_type
->
get_element_type
());
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
result_element_type
,
arg_tensor_type
->
get_shape
()));
set_value_type_checked
(
make_shared
<
TensorViewType
>
(
result_element_type
,
arg_tensor_type
->
get_shape
()));
}
src/ngraph/pass/assign_tensors.cpp
View file @
c7b51d2d
...
...
@@ -19,8 +19,8 @@
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
src/ngraph/pass/dump_sorted.cpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#include <fstream>
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
...
...
@@ -51,7 +51,6 @@ bool pass::DumpSorted::run_on_call_list(list<Node*>& nodes)
out
<<
join
(
outputs
);
out
<<
"
\n
"
;
for
(
const
Tensor
*
tensor
:
node
->
liveness_live_list
)
{
out
<<
" L "
<<
tensor
->
get_name
()
<<
"
\n
"
;
...
...
src/ngraph/pass/dump_sorted.hpp
View file @
c7b51d2d
src/ngraph/pass/liveness.cpp
View file @
c7b51d2d
...
...
@@ -16,12 +16,12 @@
#include <sstream>
#include <unordered_set>
#include "ngraph/log.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/util.hpp"
#include "ngraph/log.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -31,7 +31,7 @@ bool pass::Liveness::run_on_call_list(list<Node*>& ops)
{
unordered_set
<
Tensor
*>
currently_live
;
for
(
auto
it
=
ops
.
rbegin
();
it
!=
ops
.
rend
();
it
++
)
for
(
auto
it
=
ops
.
rbegin
();
it
!=
ops
.
rend
();
it
++
)
{
Node
*
node
=
*
it
;
node
->
liveness_live_list
.
clear
();
...
...
@@ -143,11 +143,8 @@ void pass::Liveness::check_dependencies(
bool
pass
::
Liveness
::
is_temporary
(
const
Tensor
&
tensor
)
{
return
tensor
.
is_persistent
()
==
false
&&
tensor
.
is_input
()
==
false
&&
tensor
.
is_output
()
==
false
;
return
tensor
.
is_persistent
()
==
false
&&
tensor
.
is_input
()
==
false
&&
tensor
.
is_output
()
==
false
;
// && tensor.is_constant() == false
// && tensor.is_compile_only() == false;
}
...
...
@@ -170,4 +167,3 @@ void pass::Liveness::validate_liveness(const list<Node*>& ops)
dead_tensors
.
insert
(
node
->
liveness_free_list
.
begin
(),
node
->
liveness_free_list
.
end
());
}
}
src/ngraph/pass/liveness.hpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#pragma once
#include "ngraph/pass/call_pass.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/pass/call_pass.hpp"
namespace
ngraph
{
...
...
src/ngraph/pass/manager.cpp
View file @
c7b51d2d
...
...
@@ -15,10 +15,10 @@
#include <iostream>
#include <memory>
#include "ngraph/function.hpp"
#include "ngraph/log.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/node.hpp"
#include "ngraph/
function
.hpp"
#include "ngraph/
pass/manager
.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
src/ngraph/pass/manager.hpp
View file @
c7b51d2d
...
...
@@ -14,9 +14,9 @@
#pragma once
#include <vector>
#include <memory>
#include <list>
#include <memory>
#include <vector>
#include "ngraph/pass/call_pass.hpp"
#include "ngraph/pass/tree_pass.hpp"
...
...
@@ -59,7 +59,7 @@ public:
void
initialize_default_passes
();
template
<
typename
T
,
class
...
Args
>
template
<
typename
T
,
class
...
Args
>
void
register_pass
(
Args
...
args
)
{
static_assert
(
std
::
is_base_of
<
pass
::
Base
,
T
>::
value
,
"pass not derived from pass base"
);
...
...
src/ngraph/pass/memory_layout.cpp
View file @
c7b51d2d
...
...
@@ -15,12 +15,12 @@
#include <exception>
#include <sstream>
#include "ngraph/log.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
...
...
@@ -69,7 +69,6 @@ pass::MemoryManager::node::node(size_t size, block_state state)
:
m_size
{
size
}
,
m_state
{
state
}
{
}
pass
::
MemoryManager
::
MemoryManager
(
size_t
alignment
)
...
...
@@ -84,14 +83,10 @@ pass::MemoryManager::MemoryManager(size_t alignment)
size_t
pass
::
MemoryManager
::
allocate
(
size_t
size
)
{
size_t
rc
;
switch
(
m_scheme
)
switch
(
m_scheme
)
{
case
allocation_scheme
:
:
FIRST_FIT
:
rc
=
first_fit
(
size
);
break
;
case
allocation_scheme
:
:
BEST_FIT
:
rc
=
best_fit
(
size
);
break
;
case
allocation_scheme
:
:
FIRST_FIT
:
rc
=
first_fit
(
size
);
break
;
case
allocation_scheme
:
:
BEST_FIT
:
rc
=
best_fit
(
size
);
break
;
}
return
rc
;
}
...
...
@@ -103,7 +98,7 @@ size_t pass::MemoryManager::best_fit(size_t size)
size_t
min_delta
=
numeric_limits
<
size_t
>::
max
();
auto
best_fit
=
m_node_list
.
end
();
size_t
best_offset
=
offset
;
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
{
if
(
it
->
m_state
==
block_state
::
FREE
&&
it
->
m_size
>=
size
)
{
...
...
@@ -143,7 +138,7 @@ size_t pass::MemoryManager::first_fit(size_t size)
size
=
align
(
size
,
m_alignment
);
size_t
offset
=
0
;
bool
found
=
false
;
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
{
if
(
it
->
m_state
==
block_state
::
FREE
&&
it
->
m_size
>=
size
)
{
...
...
@@ -176,7 +171,7 @@ void pass::MemoryManager::free(size_t offset)
{
size_t
search_offset
=
0
;
bool
found
=
false
;
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
for
(
auto
it
=
m_node_list
.
begin
();
it
!=
m_node_list
.
end
();
++
it
)
{
if
(
offset
==
search_offset
)
{
...
...
src/ngraph/pass/memory_layout.hpp
View file @
c7b51d2d
...
...
@@ -62,12 +62,11 @@ public:
node
(
size_t
size
,
block_state
state
);
bool
is_free
()
const
{
return
m_state
==
block_state
::
FREE
;
}
size_t
m_size
;
block_state
m_state
;
};
MemoryManager
(
size_t
alignment
=
1
);
MemoryManager
(
size_t
alignment
=
1
);
// memory_manager& alignment(size_t a);
size_t
allocate
(
size_t
size
);
...
...
@@ -81,11 +80,8 @@ public:
std
::
list
<
node
>::
iterator
end
()
{
return
m_node_list
.
end
();
}
std
::
list
<
node
>::
const_iterator
begin
()
const
{
return
m_node_list
.
cbegin
();
}
std
::
list
<
node
>::
const_iterator
end
()
const
{
return
m_node_list
.
cend
();
}
const
std
::
list
<
node
>&
get_node_list
()
const
{
return
m_node_list
;
}
size_t
max_allocated
()
const
{
return
m_max_allocated
;
}
private
:
size_t
first_fit
(
size_t
size
);
size_t
best_fit
(
size_t
size
);
...
...
src/ngraph/pass/memory_visualize.cpp
View file @
c7b51d2d
...
...
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include <fstream>
#include <unordered_set>
#include <unordered_map>
#include <
algorithm
>
#include <
unordered_set
>
#include "memory_visualize.hpp"
#include "ngraph/descriptor/tensor.hpp"
...
...
@@ -154,8 +154,7 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>&
}
i
++
;
}
sort
(
tensor_set
.
begin
(),
tensor_set
.
end
(),
[](
const
Tensor
*
t1
,
const
Tensor
*
t2
)
{
sort
(
tensor_set
.
begin
(),
tensor_set
.
end
(),
[](
const
Tensor
*
t1
,
const
Tensor
*
t2
)
{
return
t1
->
size
()
<
t2
->
size
();
});
for
(
const
Tensor
*
tensor
:
tensor_set
)
...
...
@@ -206,12 +205,16 @@ void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nod
y
+=
line_spacing
;
size_t
x1
=
offset
;
size_t
x2
=
((
usage
/
memory_footprint
)
*
scale
)
+
offset
;
file
<<
"<text x=
\"
"
<<
0
<<
"
\"
y=
\"
"
<<
y
+
text_offset
<<
"
\"
fill=
\"
"
<<
"black"
<<
"
\"
>"
<<
node
->
get_node_id
()
<<
"</text>
\n
"
;
file
<<
"<line x1=
\"
"
<<
x1
<<
"
\"
y1=
\"
"
<<
y
<<
"
\"
x2=
\"
"
<<
x2
<<
"
\"
y2=
\"
"
<<
y
<<
"
\"
"
;
file
<<
"<text x=
\"
"
<<
0
<<
"
\"
y=
\"
"
<<
y
+
text_offset
<<
"
\"
fill=
\"
"
<<
"black"
<<
"
\"
>"
<<
node
->
get_node_id
()
<<
"</text>
\n
"
;
file
<<
"<line x1=
\"
"
<<
x1
<<
"
\"
y1=
\"
"
<<
y
<<
"
\"
x2=
\"
"
<<
x2
<<
"
\"
y2=
\"
"
<<
y
<<
"
\"
"
;
file
<<
" style=
\"
stroke:forestgreen;stroke-width:"
<<
stroke_width
<<
"
\"
/>
\n
"
;
x1
=
x2
;
x2
=
((
footprint
/
memory_footprint
)
*
scale
)
+
offset
;
file
<<
"<line x1=
\"
"
<<
x1
<<
"
\"
y1=
\"
"
<<
y
<<
"
\"
x2=
\"
"
<<
x2
<<
"
\"
y2=
\"
"
<<
y
<<
"
\"
"
;
file
<<
"<line x1=
\"
"
<<
x1
<<
"
\"
y1=
\"
"
<<
y
<<
"
\"
x2=
\"
"
<<
x2
<<
"
\"
y2=
\"
"
<<
y
<<
"
\"
"
;
file
<<
" style=
\"
stroke:firebrick;stroke-width:"
<<
stroke_width
<<
"
\"
/>
\n
"
;
}
file
<<
"</svg>
\n
"
;
...
...
src/ngraph/pass/memory_visualize.hpp
View file @
c7b51d2d
...
...
@@ -14,9 +14,9 @@
#pragma once
#include <iostream>
#include <limits>
#include <list>
#include <iostream>
#include "ngraph/pass/call_pass.hpp"
...
...
src/ngraph/pass/pass.hpp
View file @
c7b51d2d
...
...
@@ -27,6 +27,7 @@ namespace ngraph
class
ngraph
::
pass
::
Base
{
friend
class
Manager
;
public
:
protected
:
ManagerState
&
get_state
();
...
...
src/ngraph/pass/visualize_tree.cpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#include <fstream>
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/node.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/util.hpp"
using
namespace
ngraph
;
...
...
@@ -24,8 +24,7 @@ using namespace std;
bool
pass
::
VisualizeTree
::
run_on_tree
(
std
::
shared_ptr
<
Node
>
base_node
)
{
// map<size_t, list<node_ptr>> dependent_nodes;
traverse_nodes
(
base_node
,
[
&
](
Node
*
node
)
{
traverse_nodes
(
base_node
,
[
&
](
Node
*
node
)
{
for
(
auto
arg
:
node
->
get_arguments
())
{
m_ss
<<
add_attributes
(
arg
.
get
());
...
...
src/ngraph/pass/visualize_tree.hpp
View file @
c7b51d2d
...
...
@@ -14,9 +14,9 @@
#pragma once
#include <set>
#include <sstream>
#include <string>
#include <set>
#include "ngraph/pass/tree_pass.hpp"
...
...
src/ngraph/runtime/call_frame.cpp
View file @
c7b51d2d
src/ngraph/runtime/call_frame.hpp
View file @
c7b51d2d
...
...
@@ -48,23 +48,19 @@ namespace ngraph
void
tensor_call
(
const
TensorViewPtrs
&
inputs
,
const
TensorViewPtrs
&
outputs
);
void
set_return
()
{
m_return
=
true
;
}
std
::
shared_ptr
<
TensorView
>
get_tensor_view
(
size_t
i
)
{
return
m_tensor_views
[
i
];
}
template
<
typename
ET
>
ParameterizedTensorView
<
ET
>*
get_parameterized_tensor_view
(
size_t
i
)
{
return
m_tensor_views
[
i
]
->
get_parameterized_tensor_view
<
ET
>
();
}
template
<
typename
ET
>
template
<
typename
ET
>
typename
ET
::
type
*
get_tensor_view_data
(
size_t
i
)
{
return
&
get_parameterized_tensor_view
<
ET
>
(
i
)
->
get_vector
()[
0
];
}
protected
:
size_t
m_n_inputs
;
size_t
m_n_outputs
;
...
...
src/ngraph/runtime/eigen/abs.hpp
View file @
c7b51d2d
...
...
@@ -38,7 +38,8 @@ namespace ngraph
virtual
void
execute
(
CallFrame
&
call_frame
)
const
override
{
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
=
Eigen
::
abs
(
EigenArray1d
<
ET
>
(
call_frame
,
m_arg
));
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
=
Eigen
::
abs
(
EigenArray1d
<
ET
>
(
call_frame
,
m_arg
));
}
protected
:
...
...
src/ngraph/runtime/eigen/broadcast_scalar.hpp
View file @
c7b51d2d
...
...
@@ -29,8 +29,7 @@ namespace ngraph
class
BroadcastScalarInstruction
:
public
Instruction
{
public
:
BroadcastScalarInstruction
(
const
TensorViewInfo
&
arg
,
const
TensorViewInfo
&
out
)
BroadcastScalarInstruction
(
const
TensorViewInfo
&
arg
,
const
TensorViewInfo
&
out
)
:
m_arg
(
arg
)
,
m_out
(
out
)
{
...
...
@@ -42,7 +41,7 @@ namespace ngraph
// pull it out as a vector. This works because of the way
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
=
EigenArray1d
<
ET
>
(
call_frame
,
m_arg
)(
0
,
0
);
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
=
EigenArray1d
<
ET
>
(
call_frame
,
m_arg
)(
0
,
0
);
}
protected
:
...
...
src/ngraph/runtime/eigen/call.hpp
View file @
c7b51d2d
...
...
@@ -15,8 +15,8 @@
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
...
...
@@ -29,7 +29,9 @@ namespace ngraph
class
CallInstruction
:
public
Instruction
{
public
:
CallInstruction
(
std
::
shared_ptr
<
ExternalFunction
>
ef
,
std
::
vector
<
TensorViewInfo
>
in
,
std
::
vector
<
TensorViewInfo
>
out
)
CallInstruction
(
std
::
shared_ptr
<
ExternalFunction
>
ef
,
std
::
vector
<
TensorViewInfo
>
in
,
std
::
vector
<
TensorViewInfo
>
out
)
:
m_external_function
(
ef
)
,
m_in
(
in
)
,
m_out
(
out
)
...
...
@@ -51,7 +53,7 @@ namespace ngraph
{
outputs
.
push_back
(
call_frame
.
get_tensor_view
(
out
.
get_index
()));
}
(
*
cf
)(
inputs
,
outputs
);
(
*
cf
)(
inputs
,
outputs
);
}
protected
:
...
...
src/ngraph/runtime/eigen/concat_matrix.hpp
View file @
c7b51d2d
src/ngraph/runtime/eigen/concat_vector.hpp
View file @
c7b51d2d
...
...
@@ -46,8 +46,10 @@ namespace ngraph
{
EigenVector
<
ET
>
out
(
call_frame
,
m_out
);
size_t
concat_pos
=
0
;
for
(
size_t
i
=
0
;
i
<
m_args
.
size
();
i
++
){
out
.
segment
(
concat_pos
,
m_sizes
[
i
])
<<
EigenVector
<
ET
>
(
call_frame
,
m_args
.
at
(
i
));
for
(
size_t
i
=
0
;
i
<
m_args
.
size
();
i
++
)
{
out
.
segment
(
concat_pos
,
m_sizes
[
i
])
<<
EigenVector
<
ET
>
(
call_frame
,
m_args
.
at
(
i
));
concat_pos
+=
m_sizes
[
i
];
}
}
...
...
src/ngraph/runtime/eigen/constant.hpp
View file @
c7b51d2d
...
...
@@ -30,7 +30,8 @@ namespace ngraph
class
ConstantInstruction
:
public
Instruction
{
public
:
ConstantInstruction
(
const
std
::
vector
<
typename
ET
::
type
>
value
,
const
TensorViewInfo
&
out
)
ConstantInstruction
(
const
std
::
vector
<
typename
ET
::
type
>
value
,
const
TensorViewInfo
&
out
)
:
m_value
(
value
)
,
m_out
(
out
)
{
...
...
@@ -38,7 +39,8 @@ namespace ngraph
virtual
void
execute
(
CallFrame
&
call_frame
)
const
override
{
call_frame
.
get_parameterized_tensor_view
<
ET
>
(
m_out
.
get_index
())
->
get_vector
()
=
m_value
;
call_frame
.
get_parameterized_tensor_view
<
ET
>
(
m_out
.
get_index
())
->
get_vector
()
=
m_value
;
}
protected
:
...
...
src/ngraph/runtime/eigen/dot.hpp
View file @
c7b51d2d
...
...
@@ -40,8 +40,9 @@ namespace ngraph
virtual
void
execute
(
CallFrame
&
call_frame
)
const
override
{
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
<<
EigenVector
<
ET
>
(
call_frame
,
m_arg0
).
dot
(
EigenVector
<
ET
>
(
call_frame
,
m_arg1
));
EigenArray1d
<
ET
>
(
call_frame
,
m_out
)
<<
EigenVector
<
ET
>
(
call_frame
,
m_arg0
)
.
dot
(
EigenVector
<
ET
>
(
call_frame
,
m_arg1
));
}
protected
:
...
...
src/ngraph/runtime/eigen/less_than.hpp
View file @
c7b51d2d
src/ngraph/runtime/eigen/log.hpp
View file @
c7b51d2d
...
...
@@ -37,7 +37,8 @@ namespace ngraph
virtual
void
execute
(
CallFrame
&
call_frame
)
const
override
{
EigenArray1d
<
ET
,
fmt
::
V
>
(
call_frame
,
m_out
)
=
Eigen
::
log
(
EigenArray1d
<
ET
,
fmt
::
V
>
(
call_frame
,
m_arg
));
EigenArray1d
<
ET
,
fmt
::
V
>
(
call_frame
,
m_out
)
=
Eigen
::
log
(
EigenArray1d
<
ET
,
fmt
::
V
>
(
call_frame
,
m_arg
));
}
protected
:
...
...
src/ngraph/runtime/eigen/return.hpp
View file @
c7b51d2d
...
...
@@ -27,7 +27,6 @@ namespace ngraph
{
public
:
ReturnInstruction
()
{}
virtual
void
execute
(
CallFrame
&
call_frame
)
const
override
{
call_frame
.
set_return
();
...
...
src/ngraph/runtime/eigen/scalar_tensor_product.hpp
View file @
c7b51d2d
...
...
@@ -45,8 +45,8 @@ namespace ngraph
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenVector
<
ET
>
(
call_frame
,
m_out
)
=
call_frame
.
get_tensor_view_data
<
ET
>
(
m_arg0
.
get_index
())[
0
]
*
EigenVector
<
ET
>
(
call_frame
,
m_arg1
);
call_frame
.
get_tensor_view_data
<
ET
>
(
m_arg0
.
get_index
())[
0
]
*
EigenVector
<
ET
>
(
call_frame
,
m_arg1
);
}
protected
:
...
...
src/ngraph/runtime/eigen/utils.hpp
View file @
c7b51d2d
...
...
@@ -40,7 +40,8 @@ namespace ngraph
using
EigenArrayBase
=
Eigen
::
Map
<
DynamicArray
<
ET
>
,
0
,
DynamicStrides
>
;
template
<
typename
ET
>
using
DynamicMatrix
=
Eigen
::
Matrix
<
typename
ET
::
type
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
using
DynamicMatrix
=
Eigen
::
Matrix
<
typename
ET
::
type
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
template
<
typename
ET
>
using
EigenMatrixBase
=
Eigen
::
Map
<
DynamicMatrix
<
ET
>
,
0
,
DynamicStrides
>
;
...
...
src/ngraph/runtime/external_function.cpp
View file @
c7b51d2d
...
...
@@ -97,7 +97,8 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
const std::vector<TensorViewInfo>& out)
#define REGISTER_INSTRUCTION(op_class, instr_class, ...) \
REGISTER_TO_OP_MAP(op_class) { \
REGISTER_TO_OP_MAP(op_class) \
{ \
ef->get_instructions()->push_back(make_shared<instr_class>(__VA_ARGS__)); \
}
...
...
@@ -146,8 +147,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
auto
broadcast
=
static_cast
<
const
op
::
Broadcast
*>
(
n
);
auto
arg_tensor_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
());
auto
arg_tensor_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
());
assert
(
nullptr
!=
arg_tensor_type
);
auto
result_tensor_type
=
...
...
@@ -175,18 +176,22 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
if
(
broadcast
->
get_broadcast_axes
()
==
AxisSet
{
1
})
{
ef
->
get_instructions
()
->
push_back
(
make_shared
<
runtime
::
eigen
::
BroadcastVectorColwiseInstruction
<
element
::
Float32
>>
(
make_shared
<
runtime
::
eigen
::
BroadcastVectorColwiseInstruction
<
element
::
Float32
>>
(
in
[
0
],
out
[
0
]));
}
else
if
(
broadcast
->
get_broadcast_axes
()
==
AxisSet
{
0
})
{
ef
->
get_instructions
()
->
push_back
(
make_shared
<
runtime
::
eigen
::
BroadcastVectorRowwiseInstruction
<
element
::
Float32
>>
(
make_shared
<
runtime
::
eigen
::
BroadcastVectorRowwiseInstruction
<
element
::
Float32
>>
(
in
[
0
],
out
[
0
]));
}
else
{
throw
ngraph_error
(
"Internal error: axis set for vector-matrix broadcast is neither {0} or {1}"
);
throw
ngraph_error
(
"Internal error: axis set for vector-matrix broadcast is neither {0} or "
"{1}"
);
}
}
else
...
...
@@ -206,8 +211,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
if
(
result_shape
.
size
()
==
1
)
{
ef
->
get_instructions
()
->
push_back
(
make_shared
<
runtime
::
eigen
::
ConcatVectorInstruction
<
element
::
Float32
>>
(
in
,
out
[
0
]));
make_shared
<
runtime
::
eigen
::
ConcatVectorInstruction
<
element
::
Float32
>>
(
in
,
out
[
0
]));
}
else
if
(
result_shape
.
size
()
==
2
)
{
...
...
@@ -286,7 +291,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
};
// Parameter is a "runtime no-op" because the output tensor has already been filled.
REGISTER_TO_OP_MAP
(
op
::
Parameter
)
{};
REGISTER_TO_OP_MAP
(
op
::
Parameter
){};
// GetTupleElement will be spliced out, with the users of out redirected to in's source, but, for now, we need to copy.
REGISTER_TO_OP_MAP
(
op
::
GetTupleElement
)
...
...
@@ -322,20 +327,16 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
}
catch
(
const
std
::
out_of_range
)
{
external
=
make_shared
<
ngraph
::
runtime
::
ExternalFunction
>
(
function_call
->
get_function
());
function_map
.
insert
({
function
,
external
});
external
=
make_shared
<
ngraph
::
runtime
::
ExternalFunction
>
(
function_call
->
get_function
());
function_map
.
insert
({
function
,
external
});
}
ef
->
get_instructions
()
->
push_back
(
make_shared
<
runtime
::
eigen
::
CallInstruction
>
(
external
,
in
,
out
));
};
REGISTER_TO_OP_MAP
(
op
::
Reduce
)
{
throw
ngraph_error
(
"op::Reduce not implemented yet"
);
make_shared
<
runtime
::
eigen
::
CallInstruction
>
(
external
,
in
,
out
));
};
REGISTER_TO_OP_MAP
(
op
::
Reduce
)
{
throw
ngraph_error
(
"op::Reduce not implemented yet"
);
};
initialized
=
true
;
}
return
op_map
;
...
...
src/ngraph/runtime/external_function.hpp
View file @
c7b51d2d
...
...
@@ -28,7 +28,8 @@ namespace ngraph
{
class
ExternalFunction
{
using
FunctionMap
=
std
::
unordered_map
<
std
::
shared_ptr
<
Function
>
,
std
::
shared_ptr
<
ExternalFunction
>>
;
using
FunctionMap
=
std
::
unordered_map
<
std
::
shared_ptr
<
Function
>
,
std
::
shared_ptr
<
ExternalFunction
>>
;
using
OpFunction
=
std
::
function
<
void
(
const
ngraph
::
Node
*
,
ExternalFunction
*
,
...
...
@@ -50,7 +51,6 @@ namespace ngraph
// Release original function's resources
void
release_function
()
{
m_function
=
nullptr
;
}
protected
:
void
compile
();
void
compile
(
FunctionMap
&
function_map
);
...
...
src/ngraph/runtime/parameterized_tensor_view.hpp
View file @
c7b51d2d
...
...
@@ -61,7 +61,6 @@ namespace ngraph
// For getting the data out
storage_type
&
get_vector
()
{
return
m_vector
;
}
protected
:
storage_type
m_vector
;
};
...
...
src/ngraph/runtime/tensor_view.hpp
View file @
c7b51d2d
...
...
@@ -39,9 +39,7 @@ namespace ngraph
public
:
TensorView
()
{}
virtual
~
TensorView
()
{}
template
<
typename
ET
>
ParameterizedTensorView
<
ET
>*
get_parameterized_tensor_view
()
{
...
...
@@ -65,7 +63,6 @@ namespace ngraph
}
const
Shape
&
get_shape
()
{
return
m_descriptor
->
get_tensor_view_type
()
->
get_shape
();
}
protected
:
std
::
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
m_descriptor
;
};
...
...
src/ngraph/runtime/tensor_view_info.hpp
View file @
c7b51d2d
...
...
@@ -34,7 +34,6 @@ namespace ngraph
}
size_t
get_index
()
const
{
return
m_index
;
}
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
TensorViewLayout
>
get_tensor_view_layout
()
const
{
...
...
src/ngraph/runtime/tuple.cpp
View file @
c7b51d2d
src/ngraph/runtime/tuple.hpp
View file @
c7b51d2d
...
...
@@ -40,8 +40,7 @@ namespace ngraph
return
m_descriptor
;
}
virtual
void
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
views
,
virtual
void
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
views
,
const
std
::
shared_ptr
<
Value
>&
value
)
const
override
;
protected
:
...
...
src/ngraph/runtime/value.hpp
View file @
c7b51d2d
...
...
@@ -30,7 +30,6 @@ namespace ngraph
{
public
:
virtual
~
Value
()
{}
/// @brief The compile-time descriptor for this value.
virtual
std
::
shared_ptr
<
ngraph
::
descriptor
::
Value
>
get_descriptor
()
const
=
0
;
...
...
src/ngraph/shape.cpp
View file @
c7b51d2d
src/ngraph/types/element_type.cpp
View file @
c7b51d2d
...
...
@@ -16,8 +16,8 @@
#include <cmath>
#include <iostream>
#include "ngraph/types/element_type.hpp"
#include "ngraph/log.hpp"
#include "ngraph/types/element_type.hpp"
using
namespace
ngraph
;
...
...
src/ngraph/types/element_type.hpp
View file @
c7b51d2d
src/ngraph/types/type.cpp
View file @
c7b51d2d
...
...
@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
...
...
@@ -39,7 +39,8 @@ bool TensorViewType::operator==(const ValueType& that) const
return
true
;
}
void
TensorViewType
::
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
void
TensorViewType
::
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
{
views
.
push_back
(
shared_from_this
());
}
...
...
@@ -54,9 +55,10 @@ bool TupleType::operator==(const ValueType& that) const
return
that_tvt
->
get_element_types
()
==
get_element_types
();
}
void
TupleType
::
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
void
TupleType
::
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
{
for
(
auto
elt
:
m_element_types
)
for
(
auto
elt
:
m_element_types
)
{
elt
->
collect_tensor_views
(
views
);
}
...
...
src/ngraph/types/type.hpp
View file @
c7b51d2d
...
...
@@ -17,8 +17,8 @@
#include <memory>
#include <vector>
#include "ngraph/types/element_type.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
namespace
ngraph
{
...
...
@@ -35,12 +35,10 @@ namespace ngraph
protected
:
ValueType
()
{}
public
:
virtual
~
ValueType
()
{}
virtual
bool
operator
==
(
const
ValueType
&
that
)
const
=
0
;
bool
operator
!=
(
const
ValueType
&
that
)
const
{
return
!
(
*
this
==
that
);
}
/// Add tensor views in depth-first order.
virtual
void
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
=
0
;
...
...
@@ -62,7 +60,6 @@ namespace ngraph
const
element
::
Type
&
get_element_type
()
const
{
return
m_element_type
;
}
const
Shape
&
get_shape
()
const
{
return
m_shape
;
}
virtual
bool
operator
==
(
const
ValueType
&
that
)
const
override
;
virtual
void
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
const
TensorViewType
>>&
views
)
const
override
;
...
...
@@ -80,7 +77,6 @@ namespace ngraph
public
:
/// Construct empty tuple and add value types later.
TupleType
()
{}
/// @param element_types A vector of types for the tuple elements
TupleType
(
const
std
::
vector
<
std
::
shared_ptr
<
const
ValueType
>>&
element_types
)
:
m_element_types
(
element_types
)
...
...
@@ -91,7 +87,10 @@ namespace ngraph
{
return
m_element_types
;
}
std
::
vector
<
std
::
shared_ptr
<
const
ValueType
>>
set_element_types
()
{
return
m_element_types
;
}
std
::
vector
<
std
::
shared_ptr
<
const
ValueType
>>
set_element_types
()
{
return
m_element_types
;
}
virtual
bool
operator
==
(
const
ValueType
&
that
)
const
override
;
virtual
void
collect_tensor_views
(
...
...
src/ngraph/util.cpp
View file @
c7b51d2d
...
...
@@ -12,15 +12,15 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iomanip>
#include <map>
#include <deque>
#include <forward_list>
#include <iomanip>
#include <map>
#include <unordered_set>
#include "ngraph/util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
...
...
@@ -135,8 +135,7 @@ size_t ngraph::hash_combine(const std::vector<size_t>& list)
return
seed
;
}
void
ngraph
::
traverse_nodes
(
const
std
::
shared_ptr
<
ngraph
::
Node
>&
p
,
std
::
function
<
void
(
Node
*
)
>
f
)
void
ngraph
::
traverse_nodes
(
const
std
::
shared_ptr
<
ngraph
::
Node
>&
p
,
std
::
function
<
void
(
Node
*
)
>
f
)
{
std
::
unordered_set
<
Node
*>
instances_seen
;
deque
<
Node
*>
stack
;
...
...
@@ -151,7 +150,10 @@ void ngraph::traverse_nodes(const std::shared_ptr<ngraph::Node>& p,
f
(
n
);
}
stack
.
pop_front
();
for
(
auto
arg
:
n
->
get_arguments
())
{
stack
.
push_front
(
arg
.
get
());
}
for
(
auto
arg
:
n
->
get_arguments
())
{
stack
.
push_front
(
arg
.
get
());
}
}
}
...
...
@@ -159,10 +161,7 @@ void ngraph::free_nodes(shared_ptr<Node> p)
{
std
::
deque
<
Node
*>
sorted_list
;
traverse_nodes
(
p
,
[
&
](
Node
*
n
)
{
sorted_list
.
push_front
(
n
);
});
traverse_nodes
(
p
,
[
&
](
Node
*
n
)
{
sorted_list
.
push_front
(
n
);
});
for
(
Node
*
n
:
sorted_list
)
{
...
...
src/ngraph/util.hpp
View file @
c7b51d2d
...
...
@@ -18,10 +18,10 @@
#include <chrono>
#include <iostream>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <memory>
namespace
ngraph
{
...
...
src/ngraph/uuid.hpp
View file @
c7b51d2d
src/ngraph/visualize.cpp
View file @
c7b51d2d
...
...
@@ -17,8 +17,8 @@
#include <list>
#include "ngraph/node.hpp"
#include "ngraph/visualize.hpp"
#include "ngraph/util.hpp"
#include "ngraph/visualize.hpp"
using
namespace
ngraph
;
using
namespace
std
;
...
...
src/ngraph/visualize.hpp
View file @
c7b51d2d
test/build_graph.cpp
View file @
c7b51d2d
...
...
@@ -33,8 +33,10 @@ TEST(build_graph, build_simple)
ASSERT_EQ
(
dot
->
get_arguments
()[
0
],
arg2
);
ASSERT_EQ
(
dot
->
get_arguments
()[
1
],
arg0
);
auto
result_type
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{
10
,
32
,
7
});
auto
cluster_0
=
make_shared
<
Function
>
(
dot
,
result_type
,
op
::
Parameters
{
arg0
,
arg1
,
arg2
,
arg3
});
auto
result_type
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{
10
,
32
,
7
});
auto
cluster_0
=
make_shared
<
Function
>
(
dot
,
result_type
,
op
::
Parameters
{
arg0
,
arg1
,
arg2
,
arg3
});
ASSERT_EQ
(
cluster_0
->
get_result
(),
dot
);
}
...
...
@@ -182,4 +184,6 @@ TEST(build_graph, set_value_type_checked)
}
// Check argument inverses
TEST
(
build_graph
,
arg_inverse
)
{}
TEST
(
build_graph
,
arg_inverse
)
{
}
test/execute.cpp
View file @
c7b51d2d
This diff is collapsed.
Click to expand it.
test/input_output_assign.cpp
View file @
c7b51d2d
test/main.cpp
View file @
c7b51d2d
test/mkldnn.cpp
View file @
c7b51d2d
...
...
@@ -13,12 +13,12 @@
// ----------------------------------------------------------------------------
#include <iostream>
#include <vector>
#include <mkldnn.hpp>
#include <vector>
#include "gtest/gtest.h"
static
int
tensor_volume
(
const
mkldnn
::
memory
::
dims
&
t
)
static
int
tensor_volume
(
const
mkldnn
::
memory
::
dims
&
t
)
{
int
x
=
1
;
for
(
const
auto
i
:
t
)
...
...
@@ -26,7 +26,6 @@ static int tensor_volume(const mkldnn::memory::dims &t)
return
x
;
}
TEST
(
mkldnn
,
engine
)
{
using
namespace
mkldnn
;
...
...
@@ -39,13 +38,15 @@ TEST(mkldnn, engine)
const
int
mb
=
2
;
const
int
groups
=
2
;
memory
::
dims
input_tz
=
{
mb
,
256
,
13
,
13
};
memory
::
dims
weights_tz
=
{
groups
,
384
/
groups
,
256
/
groups
,
3
,
3
};
memory
::
dims
weights_tz
=
{
groups
,
384
/
groups
,
256
/
groups
,
3
,
3
};
memory
::
dims
bias_tz
=
{
384
};
memory
::
dims
strides
=
{
1
,
1
};
memory
::
dims
padding
=
{
0
,
0
};
memory
::
dims
output_tz
=
{
mb
,
384
,
(
input_tz
[
2
]
+
2
*
padding
[
0
]
-
weights_tz
[
3
])
/
strides
[
0
]
+
1
,
(
input_tz
[
3
]
+
2
*
padding
[
1
]
-
weights_tz
[
4
])
/
strides
[
1
]
+
1
,
memory
::
dims
output_tz
=
{
mb
,
384
,
(
input_tz
[
2
]
+
2
*
padding
[
0
]
-
weights_tz
[
3
])
/
strides
[
0
]
+
1
,
(
input_tz
[
3
]
+
2
*
padding
[
1
]
-
weights_tz
[
4
])
/
strides
[
1
]
+
1
,
};
std
::
vector
<
float
>
input
(
tensor_volume
(
input_tz
),
.0
f
);
...
...
@@ -54,7 +55,8 @@ TEST(mkldnn, engine)
std
::
vector
<
float
>
output
(
tensor_volume
(
output_tz
),
.0
f
);
auto
c3_src_desc
=
memory
::
desc
({
input_tz
},
memory
::
data_type
::
f32
,
memory
::
format
::
nchw
);
auto
c3_weights_desc
=
memory
::
desc
({
weights_tz
},
memory
::
data_type
::
f32
,
memory
::
format
::
goihw
);
auto
c3_weights_desc
=
memory
::
desc
({
weights_tz
},
memory
::
data_type
::
f32
,
memory
::
format
::
goihw
);
auto
c3_bias_desc
=
memory
::
desc
({
bias_tz
},
memory
::
data_type
::
f32
,
memory
::
format
::
x
);
auto
c3_dst_desc
=
memory
::
desc
({
output_tz
},
memory
::
data_type
::
f32
,
memory
::
format
::
nchw
);
...
...
@@ -63,11 +65,22 @@ TEST(mkldnn, engine)
auto
c3_bias
=
memory
({
c3_bias_desc
,
cpu_engine
},
bias
.
data
());
auto
c3_dst
=
memory
({
c3_dst_desc
,
cpu_engine
},
output
.
data
());
auto
c3
=
convolution_forward
(
convolution_forward
::
primitive_desc
(
convolution_forward
::
desc
(
prop_kind
::
forward
,
auto
c3
=
convolution_forward
(
convolution_forward
::
primitive_desc
(
convolution_forward
::
desc
(
prop_kind
::
forward
,
algorithm
::
convolution_direct
,
c3_src_desc
,
c3_weights_desc
,
c3_bias_desc
,
c3_dst_desc
,
strides
,
padding
,
padding
,
padding_kind
::
zero
),
cpu_engine
),
c3_src
,
c3_weights
,
c3_bias
,
c3_dst
);
c3_src_desc
,
c3_weights_desc
,
c3_bias_desc
,
c3_dst_desc
,
strides
,
padding
,
padding
,
padding_kind
::
zero
),
cpu_engine
),
c3_src
,
c3_weights
,
c3_bias
,
c3_dst
);
stream
(
stream
::
kind
::
eager
).
submit
({
c3
}).
wait
();
}));
...
...
test/pass_liveness.cpp
View file @
c7b51d2d
...
...
@@ -19,16 +19,16 @@
#include "gtest/gtest.h"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "test_tools.hpp"
...
...
@@ -81,8 +81,6 @@ TEST(pass, liveness)
// auto exc = ex.executor(seq_stuff);
// return exc;
// lg = LivenessGraph(exc.exop.ops)
// lg.layout_memory()
...
...
test/pass_manager.cpp
View file @
c7b51d2d
test/pass_memory_layout.cpp
View file @
c7b51d2d
...
...
@@ -20,15 +20,15 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "test_tools.hpp"
using
namespace
ngraph
;
...
...
test/tensor.cpp
View file @
c7b51d2d
...
...
@@ -12,20 +12,20 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <memory>
#include "gtest/gtest.h"
#include "ngraph/function.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/function.hpp"
#include "test_tools.hpp"
using
namespace
std
;
...
...
test/test_tools.cpp
View file @
c7b51d2d
...
...
@@ -14,9 +14,9 @@
#include <algorithm>
#include "test_tools.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/util.hpp"
#include "test_tools.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -73,7 +73,8 @@ shared_ptr<Function> make_test_graph()
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{});
auto
f0
=
make_shared
<
Function
>
(
r0
,
rt
,
op
::
Parameters
{
arg_0
,
arg_1
,
arg_2
,
arg_3
,
arg_4
,
arg_5
});
auto
f0
=
make_shared
<
Function
>
(
r0
,
rt
,
op
::
Parameters
{
arg_0
,
arg_1
,
arg_2
,
arg_3
,
arg_4
,
arg_5
});
return
f0
;
}
...
...
@@ -81,9 +82,6 @@ shared_ptr<Function> make_test_graph()
size_t
get_node_count
(
std
::
shared_ptr
<
Node
>
n
)
{
size_t
node_count
=
0
;
traverse_nodes
(
n
,
[
&
](
const
Node
*
node
)
{
node_count
++
;
});
traverse_nodes
(
n
,
[
&
](
const
Node
*
node
)
{
node_count
++
;
});
return
node_count
;
}
test/topological_sort.cpp
View file @
c7b51d2d
This diff is collapsed.
Click to expand it.
test/type_prop.cpp
View file @
c7b51d2d
This diff is collapsed.
Click to expand it.
test/util.cpp
View file @
c7b51d2d
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment