Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
dd5bd9ad
Commit
dd5bd9ad
authored
Mar 07, 2018
by
Louis Feng
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into louisfeng/NGMX-296-conv_bias
parents
97c2ce20
ad58cb29
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
39 changed files
with
908 additions
and
425 deletions
+908
-425
INSTALL
INSTALL
+1
-1
patch_json.cmake
cmake/Modules/patch_json.cmake
+10
-0
external_json.cmake
cmake/external_json.cmake
+7
-1
CMakeLists.txt
src/ngraph/CMakeLists.txt
+2
-1
function.cpp
src/ngraph/function.cpp
+41
-23
function.hpp
src/ngraph/function.hpp
+10
-5
graph_util.cpp
src/ngraph/graph_util.cpp
+42
-10
graph_util.hpp
src/ngraph/graph_util.hpp
+4
-3
node.cpp
src/ngraph/node.cpp
+3
-12
node.hpp
src/ngraph/node.hpp
+1
-3
node_vector.hpp
src/ngraph/node_vector.hpp
+5
-0
result.cpp
src/ngraph/ops/result.cpp
+53
-0
result.hpp
src/ngraph/ops/result.hpp
+47
-0
result_vector.hpp
src/ngraph/ops/result_vector.hpp
+52
-0
manager.cpp
src/ngraph/pass/manager.cpp
+0
-13
manager.hpp
src/ngraph/pass/manager.hpp
+0
-1
reshape_elimination.cpp
src/ngraph/pass/reshape_elimination.cpp
+47
-0
reshape_elimination.hpp
src/ngraph/pass/reshape_elimination.hpp
+44
-42
cpu_emitter.cpp
src/ngraph/runtime/cpu/cpu_emitter.cpp
+21
-4
cpu_external_function.cpp
src/ngraph/runtime/cpu/cpu_external_function.cpp
+9
-66
mkldnn_emitter.hpp
src/ngraph/runtime/cpu/mkldnn_emitter.hpp
+1
-0
matmul_bias.cpp
src/ngraph/runtime/cpu/ops/matmul_bias.cpp
+88
-81
cpu_assignment.cpp
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
+2
-2
cpu_fusion.cpp
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
+0
-0
cpu_fusion.hpp
src/ngraph/runtime/cpu/pass/cpu_fusion.hpp
+10
-4
cpu_layout.cpp
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
+12
-0
int_call_frame.cpp
src/ngraph/runtime/interpreter/int_call_frame.cpp
+8
-45
int_call_frame.hpp
src/ngraph/runtime/interpreter/int_call_frame.hpp
+9
-0
result.hpp
src/ngraph/runtime/kernel/result.hpp
+36
-0
serializer.cpp
src/ngraph/serializer.cpp
+8
-0
util.cpp
src/ngraph/util.cpp
+25
-6
build_graph.cpp
test/build_graph.cpp
+1
-1
cpu_fusion.cpp
test/cpu_fusion.cpp
+174
-2
graph_partition.cpp
test/graph_partition.cpp
+7
-5
pass_liveness.cpp
test/pass_liveness.cpp
+11
-3
pass_memory_layout.cpp
test/pass_memory_layout.cpp
+1
-1
pattern.cpp
test/pattern.cpp
+5
-4
reshape_elimination.cpp
test/reshape_elimination.cpp
+108
-84
test_tools.cpp
test/util/test_tools.cpp
+3
-2
No files found.
INSTALL
View file @
dd5bd9ad
...
...
@@ -36,7 +36,7 @@ General Instructions
These instructions assume that your system has been prepared in accordance
with the above prerequisites.
$ cd
private-
ngraph-cpp
$ cd ngraph-cpp
$ mkdir build
$ cd build
$ cmake .. \
...
...
cmake/Modules/patch_json.cmake
0 → 100644
View file @
dd5bd9ad
set
(
FILE_NAME
${
CMAKE_BINARY_DIR
}
/include/nlohmann/detail/macro_scope.hpp
)
file
(
READ
${
FILE_NAME
}
FILE_CONTENTS
)
string
(
REPLACE
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900"
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805"
REWRITTEN_FILE
"
${
FILE_CONTENTS
}
"
)
file
(
WRITE
${
FILE_NAME
}
"
${
REWRITTEN_FILE
}
"
)
message
(
STATUS
"json library gcc minimum version number patched"
)
cmake/external_json.cmake
View file @
dd5bd9ad
...
...
@@ -34,6 +34,9 @@ if (${CMAKE_VERSION} VERSION_LESS 3.2)
BUILD_COMMAND
""
INSTALL_COMMAND
""
UPDATE_COMMAND
""
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND
${
CMAKE_COMMAND
}
-P
${
CMAKE_MODULE_PATH
}
patch_json.cmake
)
else
()
ExternalProject_Add
(
...
...
@@ -44,7 +47,10 @@ else()
BUILD_COMMAND
""
INSTALL_COMMAND
""
UPDATE_COMMAND
""
)
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND
${
CMAKE_COMMAND
}
-P
${
CMAKE_MODULE_PATH
}
patch_json.cmake
)
endif
()
#------------------------------------------------------------------------------
...
...
src/ngraph/CMakeLists.txt
View file @
dd5bd9ad
...
...
@@ -67,6 +67,7 @@ set (SRC
ops/replace_slice.cpp
ops/reshape.cpp
ops/reverse.cpp
ops/result.cpp
ops/select.cpp
ops/select_and_scatter.cpp
ops/sin.cpp
...
...
@@ -184,8 +185,8 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/cpu/mkldnn_emitter.cpp
runtime/cpu/mkldnn_invoke.cpp
runtime/cpu/mkldnn_utils.cpp
runtime/cpu/ops/conv_bias.cpp
runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/conv_bias.cpp
runtime/cpu/ops/matmul_bias.cpp
runtime/cpu/pass/cpu_assignment.cpp
runtime/cpu/pass/cpu_fusion.cpp
...
...
src/ngraph/function.cpp
View file @
dd5bd9ad
...
...
@@ -27,7 +27,7 @@ using namespace ngraph;
atomic
<
size_t
>
Function
::
m_next_instance_id
(
0
);
Function
::
Function
(
const
Node
Vector
&
results
,
Function
::
Function
(
const
Result
Vector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
m_results
(
results
)
...
...
@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results,
,
m_name
(
name
)
,
m_unique_name
(
"Function_"
+
to_string
(
m_instance_id
))
{
init
();
}
Function
::
Function
(
const
NodeVector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
m_results
(
results
.
size
())
,
m_parameters
(
parameters
)
,
m_temporary_pool_size
(
0
)
,
m_instance_id
(
m_next_instance_id
.
fetch_add
(
1
))
,
m_name
(
name
)
,
m_unique_name
(
"Function_"
+
to_string
(
m_instance_id
))
{
std
::
transform
(
results
.
begin
(),
results
.
end
(),
m_results
.
begin
(),
[](
std
::
shared_ptr
<
Node
>
n
)
{
return
std
::
make_shared
<
op
::
Result
>
(
n
);
});
init
();
}
Function
::
Function
(
const
std
::
shared_ptr
<
Node
>&
result
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
Function
(
NodeVector
{
result
},
parameters
,
name
)
{
}
void
Function
::
init
()
{
for
(
auto
r
:
m_results
)
{
for
(
descriptor
::
Output
&
output
:
r
->
get_outputs
())
{
output
.
get_tensor
().
set_is_output
();
}
}
traverse_nodes
(
this
,
[
&
](
shared_ptr
<
Node
>
node
)
{
std
::
shared_ptr
<
op
::
Parameter
>
p
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
node
);
if
(
nullptr
!=
p
)
{
auto
it
=
std
::
find_if
(
parameters
.
begin
(),
parameters
.
end
(),
auto
it
=
std
::
find_if
(
m_
parameters
.
begin
(),
m_
parameters
.
end
(),
[
p
](
std
::
shared_ptr
<
op
::
Parameter
>
q
)
{
return
(
p
==
q
);
});
if
(
it
==
parameters
.
end
())
if
(
it
==
m_
parameters
.
end
())
{
throw
ngraph_error
(
"Function references undeclared parameter"
);
}
...
...
@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results,
});
}
Function
::
Function
(
const
std
::
shared_ptr
<
Node
>&
result
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
Function
(
NodeVector
{
result
},
parameters
,
name
)
{
}
std
::
list
<
shared_ptr
<
Node
>>
Function
::
get_ordered_ops
()
{
return
topological_sort
(
get_ops
());
...
...
@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const
return
ops
;
}
void
Function
::
replace_output_op
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
)
{
auto
it
=
std
::
find
(
begin
(
m_results
),
end
(
m_results
),
old
);
if
(
it
!=
end
(
m_results
))
{
NGRAPH_DEBUG
<<
"Replacing output "
<<
old
->
get_name
()
<<
" w/ "
<<
repl
->
get_name
();
*
it
=
repl
;
}
}
void
Function
::
replace_node
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
)
{
replace_output_op
(
old
,
repl
);
ngraph
::
replace_node
(
old
,
repl
,
true
);
ngraph
::
replace_node
(
old
,
repl
);
}
src/ngraph/function.hpp
View file @
dd5bd9ad
...
...
@@ -25,6 +25,7 @@
#include "ngraph/node.hpp"
#include "ngraph/ops/parameter_vector.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/types/type.hpp"
namespace
ngraph
...
...
@@ -41,6 +42,12 @@ namespace ngraph
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
=
""
);
Function
(
const
ResultVector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
=
""
);
void
init
();
virtual
~
Function
()
{}
public
:
/// Return the number of outputs for this function.
...
...
@@ -57,8 +64,8 @@ namespace ngraph
/// Return the function parameters
const
op
::
ParameterVector
&
get_parameters
()
const
{
return
m_parameters
;
}
/// Return
the ops that generate the resul
ts
const
NodeVector
get_results
()
const
{
return
m_results
;
}
/// Return
a list of function's outpu
ts
const
ResultVector
&
get_results
()
const
{
return
m_results
;
}
/// Check that there is a single result and return it.
std
::
shared_ptr
<
Node
>
get_result
()
const
;
...
...
@@ -73,13 +80,11 @@ namespace ngraph
size_t
get_instance_id
()
{
return
m_instance_id
;
}
size_t
get_temporary_pool_size
();
void
set_temporary_pool_size
(
size_t
);
// updates old w/ repl in m_results list
void
replace_output_op
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
);
// updates graph and m_results list
void
replace_node
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
);
protected
:
Node
Vector
m_results
;
Result
Vector
m_results
;
op
::
ParameterVector
m_parameters
;
size_t
m_temporary_pool_size
;
...
...
src/ngraph/graph_util.cpp
View file @
dd5bd9ad
...
...
@@ -29,6 +29,8 @@
#include "ngraph/node_vector.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/placement.hpp"
#include "ngraph/util.hpp"
...
...
@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p)
}
}
void
ngraph
::
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
,
bool
replace_output
)
void
ngraph
::
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
)
{
if
(
target
->
is_output
()
&&
!
replace_output
)
if
(
target
->
is_output
())
{
return
;
throw
ngraph_error
(
"Result nodes cannot be replaced."
)
;
}
// Fix input/output descriptors
...
...
@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>>
return
result_list
;
}
void
ngraph
::
NodeMap
::
update
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
val
)
{
if
(
!
exists
(
orig
))
{
throw
ngraph_error
(
"Node doesn't exist!"
);
}
m_node_map
[
orig
]
=
val
;
}
void
ngraph
::
NodeMap
::
add
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
replacement
)
{
...
...
@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph:
clone_nodes
(
func
->
get_ops
(),
node_map
);
// get cloned function results and parameters
Node
Vector
cloned_results
;
Result
Vector
cloned_results
;
for
(
shared_ptr
<
Node
>
node
:
func
->
get_results
())
{
cloned_results
.
push_back
(
node_map
.
get
(
node
));
auto
result
=
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
node_map
.
get
(
node
));
if
(
!
result
)
{
throw
ngraph_error
(
"Results should be of type op::Result"
);
}
cloned_results
.
push_back
(
result
);
}
std
::
vector
<
std
::
shared_ptr
<
op
::
Parameter
>>
cloned_params
;
for
(
auto
param
:
func
->
get_parameters
())
...
...
@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function(
}
}
}
return
make_shared
<
Function
>
(
outputs
,
collected_parameters
)
;
auto
func
=
make_shared
<
Function
>
(
outputs
,
collected_parameters
);
return
func
;
}
// The returned nodes contains the node N with highest order. If N is placed at P, the returned
...
...
@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
// Remove input-output and constant-output aliasing
if
(
f_parameters
.
count
(
node
)
==
0
&&
node
->
description
()
!=
"Constant"
)
{
unvisited_outputs
.
insert
(
node
);
unvisited_outputs
.
insert
(
node
->
get_input_op
(
0
)
);
}
}
...
...
@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
unvisited_outputs
=
updated_unvisited_outputs
;
}
unordered_map
<
shared_ptr
<
Node
>
,
shared_ptr
<
Node
>>
map_source_node_to_result
;
for
(
auto
cf
:
colocated_functions
)
{
for
(
auto
r
:
cf
->
get_results
())
{
map_source_node_to_result
[
r
->
get_input_op
(
0
)]
=
r
;
}
}
for
(
auto
it
=
map_parameter_to_source_node
.
begin
();
it
!=
map_parameter_to_source_node
.
end
();
++
it
)
{
if
(
map_source_node_to_result
.
count
(
it
->
second
)
!=
0
)
{
it
->
second
=
map_source_node_to_result
[
it
->
second
];
}
}
// The colocated_functions should be called in reversed order
reverse
(
colocated_functions
.
begin
(),
colocated_functions
.
end
());
return
colocated_functions
;
...
...
src/ngraph/graph_util.hpp
View file @
dd5bd9ad
...
...
@@ -48,9 +48,8 @@ namespace ngraph
void
free_nodes
(
std
::
shared_ptr
<
Function
>
);
void
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
,
bool
replace_output
=
false
);
void
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
);
void
replace_node_users_arguments
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
);
...
...
@@ -78,6 +77,8 @@ namespace ngraph
return
(
m_node_map
.
count
(
orig
)
!=
0
);
}
void
update
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
val
);
const
std
::
unordered_map
<
std
::
shared_ptr
<
ngraph
::
Node
>
,
std
::
shared_ptr
<
ngraph
::
Node
>>&
get_node_map
()
const
{
...
...
src/ngraph/node.cpp
View file @
dd5bd9ad
...
...
@@ -23,6 +23,7 @@
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/placement.hpp"
using
namespace
std
;
...
...
@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments)
:
m_node_type
(
node_type
)
,
m_instance_id
(
m_next_instance_id
.
fetch_add
(
1
))
,
m_unique_name
(
description
()
+
"_"
+
to_string
(
m_instance_id
))
,
m_is_output
(
false
)
,
m_arguments
(
arguments
)
{
// Add this node as a user of each argument.
...
...
@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape)
auto
tensor_view_descriptor
=
make_shared
<
descriptor
::
PrimaryTensorView
>
(
tensor_view_type
,
ngraph
::
descriptor
::
Tensor
::
make_tensor_name
(
this
,
i
),
is_output
()
,
false
,
is_parameter
(),
is_constant
());
m_outputs
.
emplace_back
(
this
,
i
,
tensor_view_descriptor
);
...
...
@@ -96,16 +96,7 @@ bool Node::is_parameter() const
bool
Node
::
is_output
()
const
{
return
m_is_output
;
}
void
Node
::
set_is_output
()
{
m_is_output
=
true
;
for
(
descriptor
::
Output
&
output
:
get_outputs
())
{
output
.
get_tensor
().
set_is_output
();
}
return
false
;
}
bool
Node
::
is_constant
()
const
...
...
src/ngraph/node.hpp
View file @
dd5bd9ad
...
...
@@ -102,8 +102,7 @@ namespace ngraph
void
set_value_type_checked
(
const
element
::
Type
&
element_type
,
const
Shape
&
shape
);
bool
is_parameter
()
const
;
bool
is_output
()
const
;
void
set_is_output
();
virtual
bool
is_output
()
const
;
virtual
bool
is_constant
()
const
;
virtual
bool
is_commutative
()
{
return
false
;
}
size_t
get_instance_id
()
const
{
return
m_instance_id
;
}
...
...
@@ -200,7 +199,6 @@ namespace ngraph
static
std
::
atomic
<
size_t
>
m_next_instance_id
;
std
::
deque
<
descriptor
::
Input
>
m_inputs
;
std
::
deque
<
descriptor
::
Output
>
m_outputs
;
bool
m_is_output
;
std
::
unordered_map
<
Node
*
,
autodiff
::
Adjoints
>
m_adjoint_map
;
Placement
m_placement
=
Placement
::
DEFAULT
;
...
...
src/ngraph/node_vector.hpp
View file @
dd5bd9ad
...
...
@@ -23,6 +23,11 @@ namespace ngraph
{
class
Node
;
namespace
op
{
class
Result
;
}
/// \brief Zero or more nodes.
class
NodeVector
:
public
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
...
...
src/ngraph/ops/result.cpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include <typeindex>
#include <typeinfo>
#include "ngraph/node.hpp"
#include "ngraph/ops/result.hpp"
using
namespace
std
;
using
namespace
ngraph
;
op
::
Result
::
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
)
:
RequiresTensorViewArgs
(
"Result"
,
{
arg
})
{
if
(
arg
->
get_outputs
().
size
()
!=
1
)
{
throw
ngraph_error
(
"Expected a single-output argument"
);
}
//always borrow the placement conf even the default one
set_placement
(
arg
->
get_placement
());
set_value_type_checked
(
arg
->
get_element_type
(),
arg
->
get_shape
());
}
std
::
shared_ptr
<
Node
>
op
::
Result
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
1
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
if
(
new_args
.
at
(
0
)
->
get_outputs
().
size
()
!=
1
)
{
throw
ngraph_error
(
"Expected a single-output argument"
);
}
return
std
::
make_shared
<
Result
>
(
new_args
.
at
(
0
));
}
src/ngraph/ops/result.hpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace
ngraph
{
namespace
op
{
class
Result
:
public
util
::
RequiresTensorViewArgs
{
public
:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
);
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
virtual
bool
is_output
()
const
override
{
return
true
;
}
protected
:
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
std
::
shared_ptr
<
Node
>&
delta
)
override
{
adjoints
.
add_delta
(
get_input_op
(
0
),
delta
);
}
};
}
}
src/ngraph/ops/result_vector.hpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "ngraph/ops/result.hpp"
namespace
ngraph
{
/// \brief Zero or more nodes.
class
ResultVector
:
public
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
{
public
:
ResultVector
(
size_t
size
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
size
)
{
}
ResultVector
(
const
std
::
initializer_list
<
std
::
shared_ptr
<
op
::
Result
>>&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
(
const
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
(
const
ResultVector
&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
()
{}
};
}
src/ngraph/pass/manager.cpp
View file @
dd5bd9ad
...
...
@@ -30,12 +30,10 @@ using namespace std;
using
namespace
ngraph
;
ngraph
::
pass
::
Manager
::
Manager
()
:
m_to_set_is_output
(
true
)
{
}
ngraph
::
pass
::
Manager
::
Manager
(
bool
to_set_is_output
)
:
m_to_set_is_output
(
to_set_is_output
)
{
}
...
...
@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
set
<
shared_ptr
<
Function
>>
tfs
(
begin
(
fs
),
end
(
fs
));
get_state
().
set_functions
(
tfs
);
if
(
m_to_set_is_output
)
{
for
(
shared_ptr
<
Function
>
f
:
get_state
().
get_functions
())
{
for
(
size_t
i
=
0
;
i
<
f
->
get_output_size
();
++
i
)
{
f
->
get_output_op
(
i
)
->
set_is_output
();
}
}
}
for
(
shared_ptr
<
PassBase
>
pass
:
m_pass_list
)
{
pass
->
set_state
(
get_state
());
...
...
src/ngraph/pass/manager.hpp
View file @
dd5bd9ad
...
...
@@ -57,5 +57,4 @@ public:
private
:
std
::
vector
<
std
::
shared_ptr
<
PassBase
>>
m_pass_list
;
ManagerState
m_state
;
bool
m_to_set_is_output
;
};
src/ngraph/pass/reshape_elimination.cpp
View file @
dd5bd9ad
...
...
@@ -150,3 +150,50 @@ void ngraph::pass::ReshapeElimination::construct_reshapex2_pattern()
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
reshape2
,
callback
);
this
->
add_matcher
(
m
);
}
void
ngraph
::
pass
::
ReshapeElimination
::
construct_dot_transpose_pattern
()
{
//dot(A,B).T = dot (B.T, A.T)
auto
dot_pred
=
[](
std
::
shared_ptr
<
Node
>
n
)
{
return
static_cast
<
bool
>
(
std
::
dynamic_pointer_cast
<
op
::
Dot
>
(
n
));
};
auto
pdot
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{
2
,
1
},
dot_pred
);
auto
preshape
=
std
::
make_shared
<
op
::
Reshape
>
(
pdot
,
AxisVector
{
1
,
0
},
Shape
{
1
,
2
});
ngraph
::
pattern
::
gr_callback_fn
callback
=
[](
pattern
::
Matcher
&
m
)
{
NGRAPH_DEBUG
<<
"In callback for construct_dot_transpose_pattern against node = "
<<
m
.
match_root
()
->
get_name
();
std
::
shared_ptr
<
Node
>
nn
;
auto
mtranspose
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
m
.
match_root
());
//this also checks the rank
if
(
mtranspose
->
get_input_order
()
!=
AxisVector
{
1
,
0
})
{
NGRAPH_DEBUG
<<
"Reshape isn't transpose. "
<<
vector_to_string
(
mtranspose
->
get_input_order
());
return
nn
;
}
auto
mdot
=
mtranspose
->
get_input_op
(
0
);
if
(
mdot
->
get_shape
().
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"Dot has the wrong shape. "
<<
vector_to_string
(
mdot
->
get_shape
());
return
nn
;
}
auto
arg0
=
mdot
->
get_input_op
(
0
);
auto
reshape0_shape
=
Shape
{
arg0
->
get_shape
().
at
(
1
),
arg0
->
get_shape
().
at
(
0
)};
auto
reshape0
=
std
::
make_shared
<
op
::
Reshape
>
(
arg0
,
AxisVector
{
1
,
0
},
reshape0_shape
);
auto
arg1
=
mdot
->
get_input_op
(
1
);
auto
reshape1_shape
=
Shape
{
arg1
->
get_shape
().
at
(
1
),
arg1
->
get_shape
().
at
(
0
)};
auto
reshape1
=
std
::
make_shared
<
op
::
Reshape
>
(
arg1
,
AxisVector
{
1
,
0
},
reshape1_shape
);
auto
tdot
=
std
::
shared_ptr
<
Node
>
(
new
op
::
Dot
(
reshape1
,
reshape0
));
return
tdot
;
};
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
preshape
,
callback
);
this
->
add_matcher
(
m
);
}
src/ngraph/pass/reshape_elimination.hpp
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/pass/graph_rewrite.hpp"
namespace
ngraph
{
namespace
pass
{
class
ReshapeElimination
;
}
}
class
ngraph
::
pass
::
ReshapeElimination
:
public
ngraph
::
pass
::
GraphRewrite
{
public
:
ReshapeElimination
()
:
GraphRewrite
()
{
construct_identity_reshape_pattern
();
construct_reshapex2_pattern
();
}
private
:
void
construct_identity_reshape_pattern
();
void
construct_reshapex2_pattern
();
};
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/pass/graph_rewrite.hpp"
namespace
ngraph
{
namespace
pass
{
class
ReshapeElimination
;
}
}
class
ngraph
::
pass
::
ReshapeElimination
:
public
ngraph
::
pass
::
GraphRewrite
{
public
:
ReshapeElimination
()
:
GraphRewrite
()
{
construct_dot_transpose_pattern
();
construct_identity_reshape_pattern
();
construct_reshapex2_pattern
();
}
private
:
void
construct_dot_transpose_pattern
();
void
construct_identity_reshape_pattern
();
void
construct_reshapex2_pattern
();
};
src/ngraph/runtime/cpu/cpu_emitter.cpp
View file @
dd5bd9ad
...
...
@@ -72,6 +72,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -240,7 +241,7 @@ namespace ngraph
const
Shape
&
arg0_shape
=
cg
->
get_arg0_shape
();
//W
const
Shape
&
arg1_shape
=
cg
->
get_arg1_shape
();
//x
const
Shape
&
arg2_shape
=
args
[
2
].
get_shape
();
//bias (C)
const
Shape
&
arg2_shape
=
node
->
get_shape
();
//bias (C)
static
const
char
*
ctranspose
=
"cblas::Transpose::Transpose, "
;
static
const
char
*
cnotranspose
=
"cblas::Transpose::None, "
;
...
...
@@ -270,16 +271,23 @@ namespace ngraph
writer
<<
"{ // "
<<
node
->
get_name
()
<<
"
\n
"
;
writer
.
indent
++
;
writer
<<
"memcpy("
<<
out
[
0
].
get_name
()
<<
", "
<<
args
[
2
].
get_name
()
<<
", "
<<
out
[
0
].
get_size
()
*
out
[
0
].
get_element_type
().
size
()
<<
");
\n
"
;
const
char
*
cbeta
=
"0.0f"
;
if
(
args
.
size
()
>
2
)
{
writer
<<
"memcpy("
<<
out
[
0
].
get_name
()
<<
", "
<<
args
[
2
].
get_name
()
<<
", "
<<
out
[
0
].
get_size
()
*
out
[
0
].
get_element_type
().
size
()
<<
");
\n
"
;
cbeta
=
"1.0f"
;
}
writer
<<
"cblas::cblas_sgemm("
<<
"cblas::Layout::RowMajor, "
<<
tranpose_a
<<
tranpose_b
<<
m
<<
", "
<<
n
<<
", "
<<
k
<<
",
\n
"
<<
" 1.0f, "
<<
args
[
0
].
get_name
()
<<
", "
<<
max
(
1UL
,
lda
)
<<
", "
<<
args
[
1
].
get_name
()
<<
", "
<<
max
(
1UL
,
ldb
)
<<
",
1.0f
,
\n
"
<<
args
[
1
].
get_name
()
<<
", "
<<
max
(
1UL
,
ldb
)
<<
",
"
<<
cbeta
<<
"
,
\n
"
<<
" "
<<
out
[
0
].
get_name
()
<<
", "
<<
max
(
1UL
,
arg2_shape
[
1
])
<<
");
\n
"
;
writer
.
indent
--
;
writer
<<
"}
\n
"
;
}
...
...
@@ -3526,6 +3534,15 @@ namespace ngraph
}
}
}
template
<>
void
CPU_Emitter
::
EMITTER_DECL
(
ngraph
::
op
::
Result
)
{
writer
<<
"kernel::result<"
<<
out
[
0
].
get_type
()
<<
">("
<<
args
[
0
].
get_name
()
<<
",
\n
"
;
writer
<<
" "
<<
out
[
0
].
get_name
()
<<
",
\n
"
;
writer
<<
" "
<<
shape_size
(
node
->
get_shape
())
<<
");
\n
"
;
}
}
}
}
...
...
src/ngraph/runtime/cpu/cpu_external_function.cpp
View file @
dd5bd9ad
...
...
@@ -82,6 +82,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -234,6 +235,7 @@ static const runtime::cpu::OpMap dispatcher{
{
TI
(
ngraph
::
op
::
Not
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Not
>
},
{
TI
(
ngraph
::
op
::
MaxPool
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
MaxPool
>
},
{
TI
(
ngraph
::
op
::
Reverse
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Reverse
>
},
{
TI
(
ngraph
::
op
::
Result
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Result
>
},
{
TI
(
ngraph
::
op
::
ReduceWindow
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
ReduceWindow
>
},
{
TI
(
ngraph
::
op
::
SelectAndScatter
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
SelectAndScatter
>
},
{
TI
(
ngraph
::
op
::
AvgPool
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
AvgPool
>
},
...
...
@@ -323,6 +325,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp"
#include "ngraph/runtime/kernel/slice.hpp"
...
...
@@ -611,6 +614,7 @@ using namespace ngraph::runtime;
}
// create output alias map
/*
size_t output_index = 0;
unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map;
vector<size_t> aliases;
...
...
@@ -626,48 +630,17 @@ using namespace ngraph::runtime;
}
output_index++;
}
*/
// Add outputs to the variable name map
output_index
=
0
;
for
(
size_t
i
=
0
;
i
<
current_function
->
get_output_size
();
++
i
)
{
shared_ptr
<
Node
>
op
=
current_function
->
get_output_op
(
i
);
shared_ptr
<
descriptor
::
TensorView
>
tv
=
op
->
get_output_tensor_view
();
const
element
::
Type
&
et
=
tv
->
get_tensor_view_type
()
->
get_element_type
();
bool
parameter_as_output
=
false
;
for
(
shared_ptr
<
ngraph
::
op
::
Parameter
>
param
:
current_function
->
get_parameters
())
{
for
(
const
descriptor
::
Output
&
pout
:
param
->
get_outputs
())
{
shared_ptr
<
descriptor
::
TensorView
>
ptv
=
pout
.
get_tensor_view
();
if
(
tv
==
ptv
)
{
parameter_as_output
=
true
;
writer
<<
"memcpy(static_cast<"
<<
et
.
c_type_string
()
<<
"*>(outputs["
<<
output_index
<<
"]), "
<<
m_variable_name_map
[
ptv
->
get_tensor
().
get_name
()]
<<
", "
<<
ptv
->
get_tensor
().
size
()
<<
");
\n
"
;
break
;
}
}
}
if
(
!
parameter_as_output
&&
!
contains
(
aliases
,
output_index
))
{
if
(
contains
(
constants
,
tv
.
get
()))
{
writer
<<
"memcpy(outputs["
<<
output_index
<<
"], "
<<
tv
->
get_tensor
().
get_name
()
<<
", "
<<
tv
->
get_tensor
().
size
()
<<
");
\n
"
;
}
else
{
string
type
=
et
.
c_type_string
();
stringstream
ss
;
ss
<<
"(("
<<
type
<<
"*)(outputs["
<<
output_index
<<
"]))"
;
m_variable_name_map
[
tv
->
get_tensor
().
get_name
()]
=
ss
.
str
();
}
}
output_index
++
;
string
type
=
tv
->
get_tensor_view_type
()
->
get_element_type
().
c_type_string
();
stringstream
ss
;
ss
<<
"(("
<<
type
<<
"*)(outputs["
<<
i
<<
"]))"
;
m_variable_name_map
[
tv
->
get_tensor
().
get_name
()]
=
ss
.
str
();
}
for
(
shared_ptr
<
Node
>
node
:
current_function
->
get_ordered_ops
())
...
...
@@ -758,7 +731,6 @@ using namespace ngraph::runtime;
// Emit operation epilogue
if
(
!
node
->
is_parameter
()
&&
!
node
->
is_constant
())
{
handle_output_alias
(
writer
,
*
node
,
output_alias_map
);
if
(
m_emit_timing
)
{
emit_debug_function_exit
(
writer
,
node
.
get
(),
in
,
out
);
...
...
@@ -895,35 +867,6 @@ using namespace ngraph::runtime;
}
}
void
runtime
::
cpu
::
CPU_ExternalFunction
::
handle_output_alias
(
codegen
::
CodeWriter
&
writer
,
const
Node
&
node
,
const
unordered_map
<
descriptor
::
TensorView
*
,
vector
<
size_t
>>&
output_alias_map
)
{
for
(
const
descriptor
::
Output
&
output
:
node
.
get_outputs
())
{
shared_ptr
<
descriptor
::
TensorView
>
otv
=
output
.
get_tensor_view
();
auto
it
=
output_alias_map
.
find
(
otv
.
get
());
if
(
it
!=
output_alias_map
.
end
())
{
const
vector
<
size_t
>&
outputs
=
it
->
second
;
if
(
outputs
.
size
()
>
1
)
{
writer
<<
"{ // handle output alias for previous op
\n
"
;
writer
.
indent
++
;
for
(
size_t
i
=
1
;
i
<
outputs
.
size
();
i
++
)
{
writer
<<
"memcpy(static_cast<void*>(outputs["
<<
outputs
[
i
]
<<
"]), static_cast<void*>(outputs["
<<
outputs
[
0
]
<<
"]), "
<<
otv
->
get_tensor
().
size
()
<<
");
\n
"
;
}
writer
.
indent
--
;
writer
<<
"}
\n
"
;
}
}
}
}
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
runtime
::
cpu
::
CPU_ExternalFunction
::
make_call_frame
()
{
if
(
!
m_is_compiled
)
...
...
src/ngraph/runtime/cpu/mkldnn_emitter.hpp
View file @
dd5bd9ad
...
...
@@ -113,6 +113,7 @@ namespace ngraph
size_t
build_relu_forward
(
const
mkldnn
::
memory
::
desc
&
input_desc
,
const
mkldnn
::
memory
::
desc
&
result_desc
);
size_t
build_elementwise_add
(
const
mkldnn
::
memory
::
desc
&
input0_data_desc
,
const
mkldnn
::
memory
::
desc
&
input1_data_desc
,
...
...
src/ngraph/runtime/cpu/ops/matmul_bias.cpp
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "matmul_bias.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
std
::
shared_ptr
<
ngraph
::
Node
>
ngraph
::
op
::
MatmulBias
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
2
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
return
std
::
make_shared
<
MatmulBias
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
1
),
m_shape_w
,
m_shape_x
,
m_transpose_w
,
m_transpose_x
);
}
ngraph
::
op
::
MatmulBias
::
MatmulBias
(
std
::
shared_ptr
<
ngraph
::
Node
>
W
,
std
::
shared_ptr
<
ngraph
::
Node
>
x
,
std
::
shared_ptr
<
ngraph
::
Node
>
b
,
Shape
shape_w
,
Shape
shape_x
,
bool
transpose_w
,
bool
transpose_x
)
:
RequiresTensorViewArgs
(
"MatMulBias"
,
{
W
,
x
,
b
})
,
m_shape_w
(
shape_w
)
,
m_shape_x
(
shape_x
)
,
m_transpose_w
(
transpose_w
)
,
m_transpose_x
(
transpose_x
)
{
if
(
shape_w
.
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"W shape = "
<<
vector_to_string
(
shape_w
);
throw
ngraph_error
(
"W.shape.rank != 2 while creating MatmulBias"
);
}
if
(
shape_x
.
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"x shape = "
<<
vector_to_string
(
shape_x
);
throw
ngraph_error
(
"x.shape.rank != 2 while creating MatmulBias"
);
}
size_t
dot_dimension_w
=
(
transpose_w
)
?
0
:
1
;
size_t
dot_dimension_x
=
(
transpose_x
)
?
1
:
0
;
NGRAPH_DEBUG
<<
"dot_dimension_w = "
<<
dot_dimension_w
<<
" , dot_dimension_x = "
<<
dot_dimension_x
;
NGRAPH_DEBUG
<<
"W shape = "
<<
vector_to_string
(
shape_w
)
<<
" , x shape = "
<<
vector_to_string
(
shape_x
);
if
(
shape_w
.
at
(
dot_dimension_w
)
!=
shape_x
.
at
(
dot_dimension_x
))
{
throw
ngraph_error
(
"product dimensions are not equal while creating MatmulBias"
);
}
Shape
dot_shape
{
shape_w
.
at
(
1
-
dot_dimension_w
),
shape_x
.
at
(
1
-
dot_dimension_x
)};
NGRAPH_DEBUG
<<
"dot_shape shape = "
<<
vector_to_string
(
dot_shape
)
<<
" , b shape = "
<<
vector_to_string
(
b
->
get_shape
());
add_output
(
W
->
get_element_type
(),
dot_shape
);
}
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "matmul_bias.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
std
::
shared_ptr
<
ngraph
::
Node
>
ngraph
::
op
::
MatmulBias
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
2
&&
new_args
.
size
()
!=
3
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
return
std
::
make_shared
<
MatmulBias
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
size
()
==
3
?
new_args
.
at
(
2
)
:
nullptr
,
m_shape_w
,
m_shape_x
,
m_transpose_w
,
m_transpose_x
);
}
ngraph
::
op
::
MatmulBias
::
MatmulBias
(
std
::
shared_ptr
<
ngraph
::
Node
>
W
,
std
::
shared_ptr
<
ngraph
::
Node
>
x
,
std
::
shared_ptr
<
ngraph
::
Node
>
b
,
Shape
shape_w
,
Shape
shape_x
,
bool
transpose_w
,
bool
transpose_x
)
:
RequiresTensorViewArgs
(
"MatMulBias"
,
b
==
nullptr
?
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
W
,
x
}
:
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
W
,
x
,
b
})
,
m_shape_w
(
shape_w
)
,
m_shape_x
(
shape_x
)
,
m_transpose_w
(
transpose_w
)
,
m_transpose_x
(
transpose_x
)
{
if
(
shape_w
.
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"W shape = "
<<
vector_to_string
(
shape_w
);
throw
ngraph_error
(
"W.shape.rank != 2 while creating MatmulBias"
);
}
if
(
shape_x
.
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"x shape = "
<<
vector_to_string
(
shape_x
);
throw
ngraph_error
(
"x.shape.rank != 2 while creating MatmulBias"
);
}
size_t
dot_dimension_w
=
(
transpose_w
)
?
0
:
1
;
size_t
dot_dimension_x
=
(
transpose_x
)
?
1
:
0
;
NGRAPH_DEBUG
<<
"dot_dimension_w = "
<<
dot_dimension_w
<<
" , dot_dimension_x = "
<<
dot_dimension_x
;
NGRAPH_DEBUG
<<
"W shape = "
<<
vector_to_string
(
shape_w
)
<<
" , x shape = "
<<
vector_to_string
(
shape_x
);
if
(
shape_w
.
at
(
dot_dimension_w
)
!=
shape_x
.
at
(
dot_dimension_x
))
{
throw
ngraph_error
(
"product dimensions are not equal while creating MatmulBias"
);
}
Shape
dot_shape
{
shape_w
.
at
(
1
-
dot_dimension_w
),
shape_x
.
at
(
1
-
dot_dimension_x
)};
NGRAPH_DEBUG
<<
"dot_shape shape = "
<<
vector_to_string
(
dot_shape
);
if
(
b
)
{
NGRAPH_DEBUG
<<
"b shape = "
<<
vector_to_string
(
b
->
get_shape
());
}
add_output
(
W
->
get_element_type
(),
dot_shape
);
}
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
View file @
dd5bd9ad
...
...
@@ -119,7 +119,7 @@ namespace ngraph
node
->
get_input_element_type
(
0
)
==
element
::
f32
)
{
auto
op_annotations
=
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
op_annotations
->
set_mkldnn_op
(
true
);
convolution
->
set_op_annotations
(
op_annotations
);
}
...
...
@@ -200,7 +200,7 @@ namespace ngraph
{
std
::
cout
<<
"assigned ConvolutionBiasBackpropFiltersBias"
<<
std
::
endl
;
auto
op_annotations
=
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
std
::
make_shared
<
ngraph
::
runtime
::
cpu
::
CPUOpAnnotations
>
();
op_annotations
->
set_mkldnn_op
(
true
);
convolution
->
set_op_annotations
(
op_annotations
);
}
...
...
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
View file @
dd5bd9ad
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/cpu/pass/cpu_fusion.hpp
View file @
dd5bd9ad
...
...
@@ -38,13 +38,19 @@ public:
CPUFusion
()
:
GraphRewrite
()
{
construct_gemm_pattern
();
construct_matmul_pattern
();
construct_matmulbias_pattern
();
construct_fprop_bn
();
construct_conv_bias
();
construct_zero_padded_reshaped_conv
();
construct_zero_padded_conv
();
construct_conv_bias
();
}
private
:
void
construct_gemm_pattern
();
void
construct_matmul_pattern
();
void
construct_matmulbias_pattern
();
void
construct_fprop_bn
();
void
construct_conv_bias
();
void
construct_zero_padded_reshaped_conv
();
void
construct_zero_padded_conv
();
construct_conv_bias
();
};
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
View file @
dd5bd9ad
...
...
@@ -31,6 +31,7 @@
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/op.hpp"
#include "ngraph/ops/relu.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
...
...
@@ -629,6 +630,16 @@ namespace ngraph
}
}
template
<>
void
CPULayout
::
LAYOUT_DECL
(
ngraph
::
op
::
Result
)
{
auto
input_layout
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
.
get
(),
0
);
vector
<
memory
::
format
>
prim_output_formats
;
prim_output_formats
.
push_back
(
input_layout
);
set_output_layouts
(
node
,
prim_output_formats
);
}
template
<>
void
CPULayout
::
LAYOUT_DECL
(
ngraph
::
op
::
Relu
)
{
...
...
@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{
{
TI
(
ngraph
::
op
::
AvgPoolBackprop
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
AvgPoolBackprop
>
},
{
TI
(
ngraph
::
op
::
Relu
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
Relu
>
},
{
TI
(
ngraph
::
op
::
Result
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
Result
>
},
{
TI
(
ngraph
::
op
::
ReluBackprop
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
ReluBackprop
>
},
};
...
...
src/ngraph/runtime/interpreter/int_call_frame.cpp
View file @
dd5bd9ad
...
...
@@ -18,6 +18,7 @@
#include <cstdlib>
#include <iomanip>
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/host_tensor_view.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
...
...
@@ -52,31 +53,16 @@ void runtime::interpreter::INT_CallFrame::call(
tensor_map
.
insert
({
tv
,
input_tvs
[
arg_index
++
]});
}
}
std
::
vector
<
size_t
>
aliased_outputs
;
for
(
size_t
i
=
0
;
i
<
output_tvs
.
size
();
i
++
)
for
(
size_t
i
=
0
;
i
<
function
->
get_output_
size
();
i
++
)
{
shared_ptr
<
Node
>
op
=
function
->
get_output_op
(
i
);
descriptor
::
TensorView
*
tv
=
op
->
get_output_tensor_view
(
0
).
get
();
string
name
=
tv
->
get_tensor
().
get_name
();
if
(
contains_key
(
tensor_map
,
tv
))
{
if
(
op
->
description
()
==
"Parameter"
)
{
// Here we handle the special case where an output is just a copy of an input
memcpy
(
output_tvs
[
i
]
->
get_data_ptr
(),
tensor_map
.
at
(
tv
)
->
get_data_ptr
(),
tv
->
get_tensor
().
size
());
}
else
{
// This is a computed value returned more than once and will need to be copied at the end
aliased_outputs
.
push_back
(
i
);
}
}
else
auto
output_op
=
function
->
get_output_op
(
i
);
if
(
!
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
output_op
))
{
t
ensor_map
.
insert
({
tv
,
output_tvs
[
i
]}
);
t
hrow
ngraph_error
(
"One of function's outputs isn't op::Result"
);
}
descriptor
::
TensorView
*
tv
=
function
->
get_output_op
(
i
)
->
get_output_tensor_view
(
0
).
get
();
tensor_map
.
insert
({
tv
,
output_tvs
[
i
]});
}
// Invoke computation
...
...
@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call(
}
}
}
for
(
size_t
i
:
aliased_outputs
)
{
shared_ptr
<
Node
>
op
=
function
->
get_output_op
(
i
);
size_t
first_output
;
for
(
first_output
=
0
;
first_output
<=
i
;
++
first_output
)
{
if
(
function
->
get_output_op
(
first_output
)
==
op
)
{
break
;
}
}
if
(
first_output
==
i
)
{
throw
ngraph_error
(
"Internal error: duplicate output missing"
);
}
descriptor
::
TensorView
*
tv
=
op
->
get_output_tensor_view
(
0
).
get
();
string
name
=
tv
->
get_tensor
().
get_name
();
// Here we handle the special case where an output is just a copy of an input
memcpy
(
output_tvs
[
i
]
->
get_data_ptr
(),
output_tvs
[
first_output
]
->
get_data_ptr
(),
tv
->
get_tensor
().
size
());
}
}
void
runtime
::
interpreter
::
INT_CallFrame
::
generate_calls
(
...
...
src/ngraph/runtime/interpreter/int_call_frame.hpp
View file @
dd5bd9ad
...
...
@@ -39,6 +39,7 @@
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/ops/slice.hpp"
...
...
@@ -89,6 +90,7 @@
#include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp"
...
...
@@ -720,6 +722,13 @@ private:
reshape
->
get_input_order
(),
out
[
0
]
->
get_shape
());
}
else
if
(
node_op
==
"Result"
)
{
ngraph
::
op
::
Result
*
res
=
dynamic_cast
<
ngraph
::
op
::
Result
*>
(
&
node
);
kernel
::
result
(
reinterpret_cast
<
T
*>
(
args
[
0
]
->
get_data_ptr
()),
reinterpret_cast
<
T
*>
(
out
[
0
]
->
get_data_ptr
()),
shape_size
(
res
->
get_shape
()));
}
else
if
(
node_op
==
"Reverse"
)
{
ngraph
::
op
::
Reverse
*
reverse
=
dynamic_cast
<
ngraph
::
op
::
Reverse
*>
(
&
node
);
...
...
src/ngraph/runtime/kernel/result.hpp
0 → 100644
View file @
dd5bd9ad
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/shape.hpp"
namespace
ngraph
{
namespace
runtime
{
namespace
kernel
{
template
<
typename
T
>
void
result
(
T
*
arg
,
T
*
out
,
size_t
count
)
{
memcpy
(
out
,
arg
,
sizeof
(
T
)
*
count
);
}
}
}
}
src/ngraph/serializer.cpp
View file @
dd5bd9ad
...
...
@@ -64,6 +64,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function>
auto
output_shape
=
node_js
.
at
(
"output_shape"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
Reshape
>
(
args
[
0
],
input_order
,
output_shape
);
}
else
if
(
node_op
==
"Result"
)
{
node
=
make_shared
<
op
::
Result
>
(
args
[
0
]);
}
else
if
(
node_op
==
"Reverse"
)
{
auto
reversed_axes
=
node_js
.
at
(
"reversed_axes"
).
get
<
set
<
size_t
>>
();
...
...
@@ -1061,6 +1066,9 @@ static json write(const Node& n)
node
[
"input_order"
]
=
tmp
->
get_input_order
();
node
[
"output_shape"
]
=
tmp
->
get_output_shape
();
}
else
if
(
node_op
==
"Result"
)
{
}
else
if
(
node_op
==
"Reverse"
)
{
auto
tmp
=
dynamic_cast
<
const
op
::
Reverse
*>
(
&
n
);
...
...
src/ngraph/util.cpp
View file @
dd5bd9ad
...
...
@@ -25,9 +25,12 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp"
#include <iostream>
using
namespace
std
;
std
::
string
ngraph
::
to_cplusplus_sourcecode_literal
(
bool
val
)
...
...
@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
}
// create the new outputs for fprop and the new fprop function
NodeVector
fprop_outputs
{
fprop
->
get_results
()};
fprop_outputs
.
insert
(
fprop_outputs
.
end
(),
fprop_cache
.
fprop_output_nodes
.
begin
(),
fprop_cache
.
fprop_output_nodes
.
end
());
ResultVector
fprop_outputs
;
for
(
auto
fpr
:
fprop
->
get_results
())
{
fprop_outputs
.
push_back
(
fpr
);
}
for
(
auto
fpir
:
fprop_cache
.
fprop_output_nodes
)
{
if
(
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
fpir
))
{
throw
ngraph_error
(
"Expected op::Result in fprop->get_results()"
);
}
fprop_outputs
.
push_back
(
std
::
make_shared
<
op
::
Result
>
(
fpir
));
}
fprop_cache
.
fprop
=
std
::
make_shared
<
Function
>
(
fprop_outputs
,
fprop
->
get_parameters
());
...
...
@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
ngraph
::
clone_nodes
(
bprop
->
get_ops
(),
node_param_map
);
// get cloned bprop results
Node
Vector
cloned_results
;
Result
Vector
cloned_results
;
for
(
auto
node
:
bprop
->
get_results
())
{
cloned_results
.
push_back
(
node_param_map
.
get
(
node
));
auto
result
=
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
node_param_map
.
get
(
node
));
if
(
!
result
)
{
throw
ngraph_error
(
"Expected op::Result values for op::Result keys in node_param_map"
);
}
cloned_results
.
push_back
(
result
);
}
// get clone bprop parameters
...
...
test/build_graph.cpp
View file @
dd5bd9ad
...
...
@@ -40,7 +40,7 @@ TEST(build_graph, build_simple)
auto
cluster_0
=
make_shared
<
Function
>
(
dot
,
op
::
ParameterVector
{
arg0
,
arg1
,
arg2
,
arg3
});
ASSERT_EQ
(
cluster_0
->
get_output_op
(
0
),
dot
);
ASSERT_EQ
(
cluster_0
->
get_output_op
(
0
)
->
get_input_op
(
0
)
,
dot
);
}
// Check node comparisons
...
...
test/cpu_fusion.cpp
View file @
dd5bd9ad
...
...
@@ -134,6 +134,42 @@ TEST(cpu_fusion, gemm_cpu)
ASSERT_TRUE
(
read_vector
<
float
>
(
result
)
==
expected
);
}
TEST
(
cpu_fusion
,
gemm_cpu_no_bias
)
{
auto
shapeA
=
Shape
{
3
,
2
};
auto
shapeB
=
Shape
{
2
,
3
};
auto
shapeC
=
Shape
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
auto
B
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeB
);
auto
reshape_w
=
make_shared
<
op
::
Reshape
>
(
A
,
AxisVector
{
1
,
0
},
Shape
{
2
,
3
});
auto
reshape_x
=
make_shared
<
op
::
Reshape
>
(
B
,
AxisVector
{
1
,
0
},
Shape
{
3
,
2
});
auto
cg
=
make_shared
<
op
::
MatmulBias
>
(
A
,
B
,
nullptr
,
A
->
get_shape
(),
B
->
get_shape
(),
true
,
true
);
auto
f
=
make_shared
<
Function
>
(
cg
,
op
::
ParameterVector
{
A
,
B
});
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
shared_ptr
<
runtime
::
TensorView
>
a
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeA
);
shared_ptr
<
runtime
::
TensorView
>
b
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeB
);
shared_ptr
<
runtime
::
TensorView
>
result
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeC
);
vector
<
float
>
dataA
{
1.0
f
,
4.0
f
,
1.0
f
,
4.0
f
,
1.0
f
,
4.0
f
};
vector
<
float
>
dataB
{
3.0
f
,
3.0
f
,
3.0
f
,
9.0
f
,
9.0
f
,
9.0
f
};
copy_data
(
a
,
dataA
);
copy_data
(
b
,
dataB
);
cf
->
call
({
a
,
b
},
{
result
});
vector
<
float
>
expected
{
9
,
27
,
36
,
108
};
ASSERT_TRUE
(
read_vector
<
float
>
(
result
)
==
expected
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_basic
)
{
Shape
shape
{};
...
...
@@ -155,6 +191,50 @@ TEST(cpu_fusion, cpu_fusion_pass_basic)
ASSERT_NE
(
std
::
dynamic_pointer_cast
<
op
::
MatmulBias
>
(
graph
->
get_input_op
(
0
)),
nullptr
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_matmul_bias
)
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
Shape
shape_b
{
1
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
b
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_b
);
auto
mmb
=
std
::
make_shared
<
op
::
MatmulBias
>
(
W
,
x
,
nullptr
,
W
->
get_shape
(),
x
->
get_shape
(),
false
,
false
);
auto
broadcast
=
std
::
make_shared
<
op
::
Broadcast
>
(
b
,
mmb
->
get_shape
(),
AxisSet
{
0
});
auto
add
=
mmb
+
broadcast
;
auto
graph
=
make_shared
<
op
::
Abs
>
(
add
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
,
b
});
pass_manager
.
run_passes
(
func
);
auto
gmm
=
graph
->
get_input_op
(
0
);
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
MatmulBias
>
(
gmm
));
ASSERT_EQ
(
gmm
->
get_input_op
(
2
),
broadcast
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_matmul_no_bias
)
{
Shape
shape_w
{
4
,
2
};
Shape
shape_x
{
1
,
4
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
reshape_w
=
std
::
make_shared
<
op
::
Reshape
>
(
W
,
AxisVector
{
1
,
0
},
Shape
{
2
,
4
});
auto
reshape_x
=
std
::
make_shared
<
op
::
Reshape
>
(
x
,
AxisVector
{
1
,
0
},
Shape
{
4
,
1
});
auto
re_dot
=
make_shared
<
op
::
Dot
>
(
reshape_w
,
reshape_x
);
auto
graph
=
make_shared
<
op
::
Abs
>
(
re_dot
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
});
pass_manager
.
run_passes
(
func
);
size_t
mmb
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
mmb
,
1
);
}
TEST
(
cpu_fusion
,
gemm_mlp
)
{
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/mnist_mlp_forward.json"
);
...
...
@@ -164,8 +244,8 @@ TEST(cpu_fusion, gemm_mlp)
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
pass_manager
.
run_passes
(
func
);
size_t
ccg
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
ccg
,
3
);
size_t
mmb
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
mmb
,
3
);
}
//TODO: Move this test to backend_test.in.cpp once we have the INTERPRETER
...
...
@@ -403,6 +483,98 @@ TEST(cpu_fusion, bn_bprop_n4c3h2w2)
vector
<
float
>
expected_dbeta
{
320.
f
,
320.
f
,
320.
f
};
ASSERT_TRUE
(
ngraph
::
test
::
all_close
(
read_vector
<
float
>
(
_dbeta
),
expected_dbeta
,
1e-4
f
,
1e-8
f
));
}
TEST
(
cpu_fusion
,
zero_padded_reshaped_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
2
,
1
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
0.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
1
,
0
,
0
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
reshape
=
make_shared
<
op
::
Reshape
>
(
pad
,
AxisVector
{
0
,
3
,
1
,
2
},
Shape
{
1
,
1
,
3
,
3
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
reshape
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
0
);
}
TEST
(
cpu_fusion
,
zero_padded_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
2
,
2
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
0.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
0
,
0
,
1
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
pad
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
0
);
}
TEST
(
cpu_fusion
,
non_zero_padded_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
2
,
2
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
1.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
0
,
0
,
1
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
pad
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
}
TEST
(
cpu_fusion
,
fuse_conv_bias
)
{
pass
::
Manager
pass_manager
;
...
...
test/graph_partition.cpp
View file @
dd5bd9ad
...
...
@@ -218,11 +218,6 @@ public:
{
map_parameter_to_index
[
f
->
get_parameters
().
at
(
i
)]
=
i
;
}
unordered_map
<
shared_ptr
<
Node
>
,
size_t
>
map_result_to_index
;
for
(
size_t
i
=
0
;
i
<
f
->
get_results
().
size
();
++
i
)
{
map_result_to_index
[
f
->
get_results
().
at
(
i
)]
=
i
;
}
// Parameter's source is either itself, or the output node of the upstream function
unordered_map
<
shared_ptr
<
op
::
Parameter
>
,
shared_ptr
<
Node
>>
map_parameter_to_source_node
;
...
...
@@ -231,6 +226,13 @@ public:
vector
<
shared_ptr
<
Function
>>
funcs
=
split_function_by_placement
(
f
,
map_parameter_to_source_node
);
auto
main_func
=
funcs
.
back
();
unordered_map
<
shared_ptr
<
Node
>
,
size_t
>
map_result_to_index
;
for
(
size_t
i
=
0
;
i
<
main_func
->
get_results
().
size
();
++
i
)
{
map_result_to_index
[
main_func
->
get_results
().
at
(
i
)]
=
i
;
}
// Make call frames
vector
<
shared_ptr
<
runtime
::
CallFrame
>>
call_frames
;
for
(
auto
func
:
funcs
)
...
...
test/pass_liveness.cpp
View file @
dd5bd9ad
...
...
@@ -47,14 +47,22 @@ TEST(liveness, constant)
auto
tmp
=
f
->
get_ordered_ops
();
vector
<
shared_ptr
<
Node
>>
sorted
{
tmp
.
begin
(),
tmp
.
end
()};
ASSERT_EQ
(
2
,
sorted
.
size
());
ASSERT_EQ
(
3
,
sorted
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_new_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_free_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_new_list
.
size
());
//op::Negative is live on output to op::Result
EXPECT_EQ
(
1
,
sorted
[
1
]
->
liveness_live_list
.
size
());
//op::Negative is new
EXPECT_EQ
(
1
,
sorted
[
1
]
->
liveness_new_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_free_list
.
size
());
//op::Negative is live on input to op::Result
EXPECT_EQ
(
1
,
sorted
[
2
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
2
]
->
liveness_new_list
.
size
());
//op::Negative is freed
EXPECT_EQ
(
1
,
sorted
[
2
]
->
liveness_free_list
.
size
());
}
TEST
(
liveness
,
liveness
)
...
...
test/pass_memory_layout.cpp
View file @
dd5bd9ad
...
...
@@ -234,5 +234,5 @@ TEST(memory_layout, constant)
pass_manager
.
run_passes
(
f
);
auto
sorted
=
f
->
get_ordered_ops
();
size_t
temporary_pool_size
=
f
->
get_temporary_pool_size
();
EXPECT_EQ
(
0
,
temporary_pool_size
);
EXPECT_EQ
(
4
,
temporary_pool_size
);
}
test/pattern.cpp
View file @
dd5bd9ad
...
...
@@ -42,6 +42,7 @@
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/serializer.hpp"
#include "util/matcher.hpp"
#include "util/test_tools.hpp"
using
namespace
ngraph
;
using
namespace
std
;
...
...
@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn)
return
false
;
}
NGRAPH_DEBUG
<<
"looking at function's result "
<<
r
->
get_functions
()[
0
]
->
get_result
()
->
get_name
();
if
(
auto
sum
=
std
::
dynamic_pointer_cast
<
op
::
Add
>
(
r
->
get_functions
()[
0
]
->
get_result
()
))
auto
result
=
r
->
get_functions
()[
0
]
->
get_result
()
->
get_input_op
(
0
);
NGRAPH_DEBUG
<<
"looking at function's result "
<<
result
->
get_name
();
if
(
auto
sum
=
std
::
dynamic_pointer_cast
<
op
::
Add
>
(
r
esult
))
{
auto
parm1
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
sum
->
get_input_op
(
0
));
auto
parm2
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
sum
->
get_input_op
(
1
));
...
...
@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite)
ASSERT_TRUE
(
graph_b
->
get_output_inputs
(
0
).
empty
());
auto
expected
=
ngraph
::
NodeVector
{
a
,
b
,
a
,
c
,
b
};
ASSERT_TRUE
(
f
->
get_results
()
==
expected
);
ASSERT_TRUE
(
count_ops_of_type
<
op
::
Add
>
(
f
)
==
0
);
}
{
...
...
test/reshape_elimination.cpp
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <algorithm>
#include <cstdio>
#include <iostream>
#include <list>
#include <memory>
#include "gtest/gtest.h"
#include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/reshape_elimination.hpp"
#include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/any.hpp"
#include "ngraph/pattern/op/label.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "nlohmann/json.hpp"
#include "util/matcher.hpp"
#include "util/test_tools.hpp"
using
namespace
ngraph
;
using
namespace
std
;
TEST
(
reshape_elimination
,
remove_reshape
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/bn_fprop.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
remove_tranpose
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/tranpose.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
bn_bprop_rewrite
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/bn_bprop.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <algorithm>
#include <cstdio>
#include <iostream>
#include <list>
#include <memory>
#include "gtest/gtest.h"
#include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/reshape_elimination.hpp"
#include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/any.hpp"
#include "ngraph/pattern/op/label.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "nlohmann/json.hpp"
#include "util/matcher.hpp"
#include "util/test_tools.hpp"
using
namespace
ngraph
;
using
namespace
std
;
TEST
(
reshape_elimination
,
remove_reshape
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/bn_fprop.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
remove_tranpose
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/tranpose.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
bn_bprop_rewrite
)
{
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/bn_bprop.json"
);
const
string
json_string
=
file_util
::
read_file_to_string
(
json_path
);
stringstream
ss
(
json_string
);
shared_ptr
<
Function
>
func
=
ngraph
::
deserialize
(
ss
);
size_t
count_before
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
pass_manager
.
run_passes
(
func
);
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
dot_transpose_to_dot_w_transpose_args
)
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
dot
=
make_shared
<
op
::
Dot
>
(
W
,
x
);
auto
reshape_dot
=
std
::
make_shared
<
op
::
Reshape
>
(
dot
,
AxisVector
{
1
,
0
},
Shape
{
1
,
2
});
auto
graph
=
make_shared
<
op
::
Abs
>
(
reshape_dot
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
});
pass_manager
.
run_passes
(
func
);
auto
gdot
=
graph
->
get_input_op
(
0
);
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Dot
>
(
gdot
));
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
gdot
->
get_input_op
(
0
)));
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
gdot
->
get_input_op
(
1
)));
ASSERT_EQ
(
gdot
->
get_input_op
(
0
)
->
get_input_op
(
0
),
x
);
ASSERT_EQ
(
gdot
->
get_input_op
(
1
)
->
get_input_op
(
0
),
W
);
ASSERT_EQ
(
gdot
->
get_shape
(),
(
Shape
{
1
,
2
}));
}
test/util/test_tools.cpp
View file @
dd5bd9ad
...
...
@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes)
auto
node_tmp
=
*
it
;
auto
dependencies_tmp
=
node_tmp
->
get_input_ops
();
vector
<
Node
*>
dependencies
;
for
(
shared_ptr
<
Node
>
n
:
dependencies_tmp
)
{
dependencies
.
push_back
(
n
.
get
());
}
auto
tmp
=
it
++
;
for
(;
tmp
!=
nodes
.
rend
();
tmp
++
)
auto
tmp
=
it
;
for
(
tmp
++
;
tmp
!=
nodes
.
rend
();
tmp
++
)
{
auto
dep_tmp
=
*
tmp
;
auto
found
=
find
(
dependencies
.
begin
(),
dependencies
.
end
(),
dep_tmp
.
get
());
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment