Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
dd5bd9ad
Commit
dd5bd9ad
authored
Mar 07, 2018
by
Louis Feng
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into louisfeng/NGMX-296-conv_bias
parents
97c2ce20
ad58cb29
Show whitespace changes
Inline
Side-by-side
Showing
39 changed files
with
948 additions
and
224 deletions
+948
-224
INSTALL
INSTALL
+1
-1
patch_json.cmake
cmake/Modules/patch_json.cmake
+10
-0
external_json.cmake
cmake/external_json.cmake
+6
-0
CMakeLists.txt
src/ngraph/CMakeLists.txt
+2
-1
function.cpp
src/ngraph/function.cpp
+41
-23
function.hpp
src/ngraph/function.hpp
+10
-5
graph_util.cpp
src/ngraph/graph_util.cpp
+42
-10
graph_util.hpp
src/ngraph/graph_util.hpp
+4
-3
node.cpp
src/ngraph/node.cpp
+3
-12
node.hpp
src/ngraph/node.hpp
+1
-3
node_vector.hpp
src/ngraph/node_vector.hpp
+5
-0
result.cpp
src/ngraph/ops/result.cpp
+53
-0
result.hpp
src/ngraph/ops/result.hpp
+47
-0
result_vector.hpp
src/ngraph/ops/result_vector.hpp
+52
-0
manager.cpp
src/ngraph/pass/manager.cpp
+0
-13
manager.hpp
src/ngraph/pass/manager.hpp
+0
-1
reshape_elimination.cpp
src/ngraph/pass/reshape_elimination.cpp
+47
-0
reshape_elimination.hpp
src/ngraph/pass/reshape_elimination.hpp
+2
-0
cpu_emitter.cpp
src/ngraph/runtime/cpu/cpu_emitter.cpp
+19
-2
cpu_external_function.cpp
src/ngraph/runtime/cpu/cpu_external_function.cpp
+7
-64
mkldnn_emitter.hpp
src/ngraph/runtime/cpu/mkldnn_emitter.hpp
+1
-0
matmul_bias.cpp
src/ngraph/runtime/cpu/ops/matmul_bias.cpp
+12
-5
cpu_assignment.cpp
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
+0
-0
cpu_fusion.cpp
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
+251
-10
cpu_fusion.hpp
src/ngraph/runtime/cpu/pass/cpu_fusion.hpp
+9
-3
cpu_layout.cpp
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
+12
-0
int_call_frame.cpp
src/ngraph/runtime/interpreter/int_call_frame.cpp
+7
-44
int_call_frame.hpp
src/ngraph/runtime/interpreter/int_call_frame.hpp
+9
-0
result.hpp
src/ngraph/runtime/kernel/result.hpp
+36
-0
serializer.cpp
src/ngraph/serializer.cpp
+8
-0
util.cpp
src/ngraph/util.cpp
+25
-6
build_graph.cpp
test/build_graph.cpp
+1
-1
cpu_fusion.cpp
test/cpu_fusion.cpp
+174
-2
graph_partition.cpp
test/graph_partition.cpp
+7
-5
pass_liveness.cpp
test/pass_liveness.cpp
+11
-3
pass_memory_layout.cpp
test/pass_memory_layout.cpp
+1
-1
pattern.cpp
test/pattern.cpp
+5
-4
reshape_elimination.cpp
test/reshape_elimination.cpp
+24
-0
test_tools.cpp
test/util/test_tools.cpp
+3
-2
No files found.
INSTALL
View file @
dd5bd9ad
...
...
@@ -36,7 +36,7 @@ General Instructions
These instructions assume that your system has been prepared in accordance
with the above prerequisites.
$ cd
private-
ngraph-cpp
$ cd ngraph-cpp
$ mkdir build
$ cd build
$ cmake .. \
...
...
cmake/Modules/patch_json.cmake
0 → 100644
View file @
dd5bd9ad
set
(
FILE_NAME
${
CMAKE_BINARY_DIR
}
/include/nlohmann/detail/macro_scope.hpp
)
file
(
READ
${
FILE_NAME
}
FILE_CONTENTS
)
string
(
REPLACE
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900"
"#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805"
REWRITTEN_FILE
"
${
FILE_CONTENTS
}
"
)
file
(
WRITE
${
FILE_NAME
}
"
${
REWRITTEN_FILE
}
"
)
message
(
STATUS
"json library gcc minimum version number patched"
)
cmake/external_json.cmake
View file @
dd5bd9ad
...
...
@@ -34,6 +34,9 @@ if (${CMAKE_VERSION} VERSION_LESS 3.2)
BUILD_COMMAND
""
INSTALL_COMMAND
""
UPDATE_COMMAND
""
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND
${
CMAKE_COMMAND
}
-P
${
CMAKE_MODULE_PATH
}
patch_json.cmake
)
else
()
ExternalProject_Add
(
...
...
@@ -44,6 +47,9 @@ else()
BUILD_COMMAND
""
INSTALL_COMMAND
""
UPDATE_COMMAND
""
# cmake does not allow calling cmake functions so we call a cmake script in the Module
# directory.
PATCH_COMMAND
${
CMAKE_COMMAND
}
-P
${
CMAKE_MODULE_PATH
}
patch_json.cmake
)
endif
()
...
...
src/ngraph/CMakeLists.txt
View file @
dd5bd9ad
...
...
@@ -67,6 +67,7 @@ set (SRC
ops/replace_slice.cpp
ops/reshape.cpp
ops/reverse.cpp
ops/result.cpp
ops/select.cpp
ops/select_and_scatter.cpp
ops/sin.cpp
...
...
@@ -184,8 +185,8 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/cpu/mkldnn_emitter.cpp
runtime/cpu/mkldnn_invoke.cpp
runtime/cpu/mkldnn_utils.cpp
runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/conv_bias.cpp
runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/matmul_bias.cpp
runtime/cpu/pass/cpu_assignment.cpp
runtime/cpu/pass/cpu_fusion.cpp
...
...
src/ngraph/function.cpp
View file @
dd5bd9ad
...
...
@@ -27,7 +27,7 @@ using namespace ngraph;
atomic
<
size_t
>
Function
::
m_next_instance_id
(
0
);
Function
::
Function
(
const
Node
Vector
&
results
,
Function
::
Function
(
const
Result
Vector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
m_results
(
results
)
...
...
@@ -37,14 +37,50 @@ Function::Function(const NodeVector& results,
,
m_name
(
name
)
,
m_unique_name
(
"Function_"
+
to_string
(
m_instance_id
))
{
init
();
}
Function
::
Function
(
const
NodeVector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
m_results
(
results
.
size
())
,
m_parameters
(
parameters
)
,
m_temporary_pool_size
(
0
)
,
m_instance_id
(
m_next_instance_id
.
fetch_add
(
1
))
,
m_name
(
name
)
,
m_unique_name
(
"Function_"
+
to_string
(
m_instance_id
))
{
std
::
transform
(
results
.
begin
(),
results
.
end
(),
m_results
.
begin
(),
[](
std
::
shared_ptr
<
Node
>
n
)
{
return
std
::
make_shared
<
op
::
Result
>
(
n
);
});
init
();
}
Function
::
Function
(
const
std
::
shared_ptr
<
Node
>&
result
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
Function
(
NodeVector
{
result
},
parameters
,
name
)
{
}
void
Function
::
init
()
{
for
(
auto
r
:
m_results
)
{
for
(
descriptor
::
Output
&
output
:
r
->
get_outputs
())
{
output
.
get_tensor
().
set_is_output
();
}
}
traverse_nodes
(
this
,
[
&
](
shared_ptr
<
Node
>
node
)
{
std
::
shared_ptr
<
op
::
Parameter
>
p
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
node
);
if
(
nullptr
!=
p
)
{
auto
it
=
std
::
find_if
(
parameters
.
begin
(),
parameters
.
end
(),
auto
it
=
std
::
find_if
(
m_
parameters
.
begin
(),
m_
parameters
.
end
(),
[
p
](
std
::
shared_ptr
<
op
::
Parameter
>
q
)
{
return
(
p
==
q
);
});
if
(
it
==
parameters
.
end
())
if
(
it
==
m_
parameters
.
end
())
{
throw
ngraph_error
(
"Function references undeclared parameter"
);
}
...
...
@@ -52,13 +88,6 @@ Function::Function(const NodeVector& results,
});
}
Function
::
Function
(
const
std
::
shared_ptr
<
Node
>&
result
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
)
:
Function
(
NodeVector
{
result
},
parameters
,
name
)
{
}
std
::
list
<
shared_ptr
<
Node
>>
Function
::
get_ordered_ops
()
{
return
topological_sort
(
get_ops
());
...
...
@@ -156,18 +185,7 @@ std::list<shared_ptr<Node>> Function::get_ops() const
return
ops
;
}
void
Function
::
replace_output_op
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
)
{
auto
it
=
std
::
find
(
begin
(
m_results
),
end
(
m_results
),
old
);
if
(
it
!=
end
(
m_results
))
{
NGRAPH_DEBUG
<<
"Replacing output "
<<
old
->
get_name
()
<<
" w/ "
<<
repl
->
get_name
();
*
it
=
repl
;
}
}
void
Function
::
replace_node
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
)
{
replace_output_op
(
old
,
repl
);
ngraph
::
replace_node
(
old
,
repl
,
true
);
ngraph
::
replace_node
(
old
,
repl
);
}
src/ngraph/function.hpp
View file @
dd5bd9ad
...
...
@@ -25,6 +25,7 @@
#include "ngraph/node.hpp"
#include "ngraph/ops/parameter_vector.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/types/type.hpp"
namespace
ngraph
...
...
@@ -41,6 +42,12 @@ namespace ngraph
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
=
""
);
Function
(
const
ResultVector
&
results
,
const
op
::
ParameterVector
&
parameters
,
const
std
::
string
&
name
=
""
);
void
init
();
virtual
~
Function
()
{}
public
:
/// Return the number of outputs for this function.
...
...
@@ -57,8 +64,8 @@ namespace ngraph
/// Return the function parameters
const
op
::
ParameterVector
&
get_parameters
()
const
{
return
m_parameters
;
}
/// Return
the ops that generate the resul
ts
const
NodeVector
get_results
()
const
{
return
m_results
;
}
/// Return
a list of function's outpu
ts
const
ResultVector
&
get_results
()
const
{
return
m_results
;
}
/// Check that there is a single result and return it.
std
::
shared_ptr
<
Node
>
get_result
()
const
;
...
...
@@ -73,13 +80,11 @@ namespace ngraph
size_t
get_instance_id
()
{
return
m_instance_id
;
}
size_t
get_temporary_pool_size
();
void
set_temporary_pool_size
(
size_t
);
// updates old w/ repl in m_results list
void
replace_output_op
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
);
// updates graph and m_results list
void
replace_node
(
std
::
shared_ptr
<
Node
>
old
,
std
::
shared_ptr
<
Node
>
repl
);
protected
:
Node
Vector
m_results
;
Result
Vector
m_results
;
op
::
ParameterVector
m_parameters
;
size_t
m_temporary_pool_size
;
...
...
src/ngraph/graph_util.cpp
View file @
dd5bd9ad
...
...
@@ -29,6 +29,8 @@
#include "ngraph/node_vector.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/placement.hpp"
#include "ngraph/util.hpp"
...
...
@@ -114,13 +116,11 @@ void ngraph::free_nodes(shared_ptr<Function> p)
}
}
void
ngraph
::
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
,
bool
replace_output
)
void
ngraph
::
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
)
{
if
(
target
->
is_output
()
&&
!
replace_output
)
if
(
target
->
is_output
())
{
return
;
throw
ngraph_error
(
"Result nodes cannot be replaced."
)
;
}
// Fix input/output descriptors
...
...
@@ -197,6 +197,15 @@ std::list<std::shared_ptr<ngraph::Node>>
return
result_list
;
}
void
ngraph
::
NodeMap
::
update
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
val
)
{
if
(
!
exists
(
orig
))
{
throw
ngraph_error
(
"Node doesn't exist!"
);
}
m_node_map
[
orig
]
=
val
;
}
void
ngraph
::
NodeMap
::
add
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
replacement
)
{
...
...
@@ -252,10 +261,15 @@ std::shared_ptr<ngraph::Function> ngraph::clone_function(std::shared_ptr<ngraph:
clone_nodes
(
func
->
get_ops
(),
node_map
);
// get cloned function results and parameters
Node
Vector
cloned_results
;
Result
Vector
cloned_results
;
for
(
shared_ptr
<
Node
>
node
:
func
->
get_results
())
{
cloned_results
.
push_back
(
node_map
.
get
(
node
));
auto
result
=
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
node_map
.
get
(
node
));
if
(
!
result
)
{
throw
ngraph_error
(
"Results should be of type op::Result"
);
}
cloned_results
.
push_back
(
result
);
}
std
::
vector
<
std
::
shared_ptr
<
op
::
Parameter
>>
cloned_params
;
for
(
auto
param
:
func
->
get_parameters
())
...
...
@@ -435,8 +449,8 @@ static shared_ptr<Function> build_largest_colocated_function(
}
}
}
return
make_shared
<
Function
>
(
outputs
,
collected_parameters
)
;
auto
func
=
make_shared
<
Function
>
(
outputs
,
collected_parameters
);
return
func
;
}
// The returned nodes contains the node N with highest order. If N is placed at P, the returned
...
...
@@ -528,7 +542,7 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
// Remove input-output and constant-output aliasing
if
(
f_parameters
.
count
(
node
)
==
0
&&
node
->
description
()
!=
"Constant"
)
{
unvisited_outputs
.
insert
(
node
);
unvisited_outputs
.
insert
(
node
->
get_input_op
(
0
)
);
}
}
...
...
@@ -571,6 +585,24 @@ vector<shared_ptr<Function>> ngraph::split_function_by_placement(
unvisited_outputs
=
updated_unvisited_outputs
;
}
unordered_map
<
shared_ptr
<
Node
>
,
shared_ptr
<
Node
>>
map_source_node_to_result
;
for
(
auto
cf
:
colocated_functions
)
{
for
(
auto
r
:
cf
->
get_results
())
{
map_source_node_to_result
[
r
->
get_input_op
(
0
)]
=
r
;
}
}
for
(
auto
it
=
map_parameter_to_source_node
.
begin
();
it
!=
map_parameter_to_source_node
.
end
();
++
it
)
{
if
(
map_source_node_to_result
.
count
(
it
->
second
)
!=
0
)
{
it
->
second
=
map_source_node_to_result
[
it
->
second
];
}
}
// The colocated_functions should be called in reversed order
reverse
(
colocated_functions
.
begin
(),
colocated_functions
.
end
());
return
colocated_functions
;
...
...
src/ngraph/graph_util.hpp
View file @
dd5bd9ad
...
...
@@ -48,9 +48,8 @@ namespace ngraph
void
free_nodes
(
std
::
shared_ptr
<
Function
>
);
void
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
,
bool
replace_output
=
false
);
void
replace_node
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
);
void
replace_node_users_arguments
(
std
::
shared_ptr
<
Node
>
target
,
std
::
shared_ptr
<
Node
>
replacement
);
...
...
@@ -78,6 +77,8 @@ namespace ngraph
return
(
m_node_map
.
count
(
orig
)
!=
0
);
}
void
update
(
std
::
shared_ptr
<
ngraph
::
Node
>
orig
,
std
::
shared_ptr
<
ngraph
::
Node
>
val
);
const
std
::
unordered_map
<
std
::
shared_ptr
<
ngraph
::
Node
>
,
std
::
shared_ptr
<
ngraph
::
Node
>>&
get_node_map
()
const
{
...
...
src/ngraph/node.cpp
View file @
dd5bd9ad
...
...
@@ -23,6 +23,7 @@
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/placement.hpp"
using
namespace
std
;
...
...
@@ -34,7 +35,6 @@ Node::Node(const std::string& node_type, const NodeVector& arguments)
:
m_node_type
(
node_type
)
,
m_instance_id
(
m_next_instance_id
.
fetch_add
(
1
))
,
m_unique_name
(
description
()
+
"_"
+
to_string
(
m_instance_id
))
,
m_is_output
(
false
)
,
m_arguments
(
arguments
)
{
// Add this node as a user of each argument.
...
...
@@ -68,7 +68,7 @@ void Node::add_output(const element::Type& element_type, const Shape& shape)
auto
tensor_view_descriptor
=
make_shared
<
descriptor
::
PrimaryTensorView
>
(
tensor_view_type
,
ngraph
::
descriptor
::
Tensor
::
make_tensor_name
(
this
,
i
),
is_output
()
,
false
,
is_parameter
(),
is_constant
());
m_outputs
.
emplace_back
(
this
,
i
,
tensor_view_descriptor
);
...
...
@@ -96,16 +96,7 @@ bool Node::is_parameter() const
bool
Node
::
is_output
()
const
{
return
m_is_output
;
}
void
Node
::
set_is_output
()
{
m_is_output
=
true
;
for
(
descriptor
::
Output
&
output
:
get_outputs
())
{
output
.
get_tensor
().
set_is_output
();
}
return
false
;
}
bool
Node
::
is_constant
()
const
...
...
src/ngraph/node.hpp
View file @
dd5bd9ad
...
...
@@ -102,8 +102,7 @@ namespace ngraph
void
set_value_type_checked
(
const
element
::
Type
&
element_type
,
const
Shape
&
shape
);
bool
is_parameter
()
const
;
bool
is_output
()
const
;
void
set_is_output
();
virtual
bool
is_output
()
const
;
virtual
bool
is_constant
()
const
;
virtual
bool
is_commutative
()
{
return
false
;
}
size_t
get_instance_id
()
const
{
return
m_instance_id
;
}
...
...
@@ -200,7 +199,6 @@ namespace ngraph
static
std
::
atomic
<
size_t
>
m_next_instance_id
;
std
::
deque
<
descriptor
::
Input
>
m_inputs
;
std
::
deque
<
descriptor
::
Output
>
m_outputs
;
bool
m_is_output
;
std
::
unordered_map
<
Node
*
,
autodiff
::
Adjoints
>
m_adjoint_map
;
Placement
m_placement
=
Placement
::
DEFAULT
;
...
...
src/ngraph/node_vector.hpp
View file @
dd5bd9ad
...
...
@@ -23,6 +23,11 @@ namespace ngraph
{
class
Node
;
namespace
op
{
class
Result
;
}
/// \brief Zero or more nodes.
class
NodeVector
:
public
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
...
...
src/ngraph/ops/result.cpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include <typeindex>
#include <typeinfo>
#include "ngraph/node.hpp"
#include "ngraph/ops/result.hpp"
using
namespace
std
;
using
namespace
ngraph
;
op
::
Result
::
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
)
:
RequiresTensorViewArgs
(
"Result"
,
{
arg
})
{
if
(
arg
->
get_outputs
().
size
()
!=
1
)
{
throw
ngraph_error
(
"Expected a single-output argument"
);
}
//always borrow the placement conf even the default one
set_placement
(
arg
->
get_placement
());
set_value_type_checked
(
arg
->
get_element_type
(),
arg
->
get_shape
());
}
std
::
shared_ptr
<
Node
>
op
::
Result
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
1
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
if
(
new_args
.
at
(
0
)
->
get_outputs
().
size
()
!=
1
)
{
throw
ngraph_error
(
"Expected a single-output argument"
);
}
return
std
::
make_shared
<
Result
>
(
new_args
.
at
(
0
));
}
src/ngraph/ops/result.hpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace
ngraph
{
namespace
op
{
class
Result
:
public
util
::
RequiresTensorViewArgs
{
public
:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
);
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
virtual
bool
is_output
()
const
override
{
return
true
;
}
protected
:
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
std
::
shared_ptr
<
Node
>&
delta
)
override
{
adjoints
.
add_delta
(
get_input_op
(
0
),
delta
);
}
};
}
}
src/ngraph/ops/result_vector.hpp
0 → 100644
View file @
dd5bd9ad
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "ngraph/ops/result.hpp"
namespace
ngraph
{
/// \brief Zero or more nodes.
class
ResultVector
:
public
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
{
public
:
ResultVector
(
size_t
size
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
size
)
{
}
ResultVector
(
const
std
::
initializer_list
<
std
::
shared_ptr
<
op
::
Result
>>&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
(
const
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
(
const
ResultVector
&
nodes
)
:
std
::
vector
<
std
::
shared_ptr
<
op
::
Result
>>
(
nodes
)
{
}
ResultVector
()
{}
};
}
src/ngraph/pass/manager.cpp
View file @
dd5bd9ad
...
...
@@ -30,12 +30,10 @@ using namespace std;
using
namespace
ngraph
;
ngraph
::
pass
::
Manager
::
Manager
()
:
m_to_set_is_output
(
true
)
{
}
ngraph
::
pass
::
Manager
::
Manager
(
bool
to_set_is_output
)
:
m_to_set_is_output
(
to_set_is_output
)
{
}
...
...
@@ -56,17 +54,6 @@ void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
set
<
shared_ptr
<
Function
>>
tfs
(
begin
(
fs
),
end
(
fs
));
get_state
().
set_functions
(
tfs
);
if
(
m_to_set_is_output
)
{
for
(
shared_ptr
<
Function
>
f
:
get_state
().
get_functions
())
{
for
(
size_t
i
=
0
;
i
<
f
->
get_output_size
();
++
i
)
{
f
->
get_output_op
(
i
)
->
set_is_output
();
}
}
}
for
(
shared_ptr
<
PassBase
>
pass
:
m_pass_list
)
{
pass
->
set_state
(
get_state
());
...
...
src/ngraph/pass/manager.hpp
View file @
dd5bd9ad
...
...
@@ -57,5 +57,4 @@ public:
private
:
std
::
vector
<
std
::
shared_ptr
<
PassBase
>>
m_pass_list
;
ManagerState
m_state
;
bool
m_to_set_is_output
;
};
src/ngraph/pass/reshape_elimination.cpp
View file @
dd5bd9ad
...
...
@@ -150,3 +150,50 @@ void ngraph::pass::ReshapeElimination::construct_reshapex2_pattern()
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
reshape2
,
callback
);
this
->
add_matcher
(
m
);
}
void
ngraph
::
pass
::
ReshapeElimination
::
construct_dot_transpose_pattern
()
{
//dot(A,B).T = dot (B.T, A.T)
auto
dot_pred
=
[](
std
::
shared_ptr
<
Node
>
n
)
{
return
static_cast
<
bool
>
(
std
::
dynamic_pointer_cast
<
op
::
Dot
>
(
n
));
};
auto
pdot
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{
2
,
1
},
dot_pred
);
auto
preshape
=
std
::
make_shared
<
op
::
Reshape
>
(
pdot
,
AxisVector
{
1
,
0
},
Shape
{
1
,
2
});
ngraph
::
pattern
::
gr_callback_fn
callback
=
[](
pattern
::
Matcher
&
m
)
{
NGRAPH_DEBUG
<<
"In callback for construct_dot_transpose_pattern against node = "
<<
m
.
match_root
()
->
get_name
();
std
::
shared_ptr
<
Node
>
nn
;
auto
mtranspose
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
m
.
match_root
());
//this also checks the rank
if
(
mtranspose
->
get_input_order
()
!=
AxisVector
{
1
,
0
})
{
NGRAPH_DEBUG
<<
"Reshape isn't transpose. "
<<
vector_to_string
(
mtranspose
->
get_input_order
());
return
nn
;
}
auto
mdot
=
mtranspose
->
get_input_op
(
0
);
if
(
mdot
->
get_shape
().
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"Dot has the wrong shape. "
<<
vector_to_string
(
mdot
->
get_shape
());
return
nn
;
}
auto
arg0
=
mdot
->
get_input_op
(
0
);
auto
reshape0_shape
=
Shape
{
arg0
->
get_shape
().
at
(
1
),
arg0
->
get_shape
().
at
(
0
)};
auto
reshape0
=
std
::
make_shared
<
op
::
Reshape
>
(
arg0
,
AxisVector
{
1
,
0
},
reshape0_shape
);
auto
arg1
=
mdot
->
get_input_op
(
1
);
auto
reshape1_shape
=
Shape
{
arg1
->
get_shape
().
at
(
1
),
arg1
->
get_shape
().
at
(
0
)};
auto
reshape1
=
std
::
make_shared
<
op
::
Reshape
>
(
arg1
,
AxisVector
{
1
,
0
},
reshape1_shape
);
auto
tdot
=
std
::
shared_ptr
<
Node
>
(
new
op
::
Dot
(
reshape1
,
reshape0
));
return
tdot
;
};
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
preshape
,
callback
);
this
->
add_matcher
(
m
);
}
src/ngraph/pass/reshape_elimination.hpp
View file @
dd5bd9ad
...
...
@@ -32,11 +32,13 @@ public:
ReshapeElimination
()
:
GraphRewrite
()
{
construct_dot_transpose_pattern
();
construct_identity_reshape_pattern
();
construct_reshapex2_pattern
();
}
private
:
void
construct_dot_transpose_pattern
();
void
construct_identity_reshape_pattern
();
void
construct_reshapex2_pattern
();
};
src/ngraph/runtime/cpu/cpu_emitter.cpp
View file @
dd5bd9ad
...
...
@@ -72,6 +72,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -240,7 +241,7 @@ namespace ngraph
const
Shape
&
arg0_shape
=
cg
->
get_arg0_shape
();
//W
const
Shape
&
arg1_shape
=
cg
->
get_arg1_shape
();
//x
const
Shape
&
arg2_shape
=
args
[
2
].
get_shape
();
//bias (C)
const
Shape
&
arg2_shape
=
node
->
get_shape
();
//bias (C)
static
const
char
*
ctranspose
=
"cblas::Transpose::Transpose, "
;
static
const
char
*
cnotranspose
=
"cblas::Transpose::None, "
;
...
...
@@ -270,16 +271,23 @@ namespace ngraph
writer
<<
"{ // "
<<
node
->
get_name
()
<<
"
\n
"
;
writer
.
indent
++
;
const
char
*
cbeta
=
"0.0f"
;
if
(
args
.
size
()
>
2
)
{
writer
<<
"memcpy("
<<
out
[
0
].
get_name
()
<<
", "
<<
args
[
2
].
get_name
()
<<
", "
<<
out
[
0
].
get_size
()
*
out
[
0
].
get_element_type
().
size
()
<<
");
\n
"
;
cbeta
=
"1.0f"
;
}
writer
<<
"cblas::cblas_sgemm("
<<
"cblas::Layout::RowMajor, "
<<
tranpose_a
<<
tranpose_b
<<
m
<<
", "
<<
n
<<
", "
<<
k
<<
",
\n
"
<<
" 1.0f, "
<<
args
[
0
].
get_name
()
<<
", "
<<
max
(
1UL
,
lda
)
<<
", "
<<
args
[
1
].
get_name
()
<<
", "
<<
max
(
1UL
,
ldb
)
<<
",
1.0f
,
\n
"
<<
args
[
1
].
get_name
()
<<
", "
<<
max
(
1UL
,
ldb
)
<<
",
"
<<
cbeta
<<
"
,
\n
"
<<
" "
<<
out
[
0
].
get_name
()
<<
", "
<<
max
(
1UL
,
arg2_shape
[
1
])
<<
");
\n
"
;
writer
.
indent
--
;
writer
<<
"}
\n
"
;
}
...
...
@@ -3526,6 +3534,15 @@ namespace ngraph
}
}
}
template
<>
void
CPU_Emitter
::
EMITTER_DECL
(
ngraph
::
op
::
Result
)
{
writer
<<
"kernel::result<"
<<
out
[
0
].
get_type
()
<<
">("
<<
args
[
0
].
get_name
()
<<
",
\n
"
;
writer
<<
" "
<<
out
[
0
].
get_name
()
<<
",
\n
"
;
writer
<<
" "
<<
shape_size
(
node
->
get_shape
())
<<
");
\n
"
;
}
}
}
}
...
...
src/ngraph/runtime/cpu/cpu_external_function.cpp
View file @
dd5bd9ad
...
...
@@ -82,6 +82,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -234,6 +235,7 @@ static const runtime::cpu::OpMap dispatcher{
{
TI
(
ngraph
::
op
::
Not
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Not
>
},
{
TI
(
ngraph
::
op
::
MaxPool
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
MaxPool
>
},
{
TI
(
ngraph
::
op
::
Reverse
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Reverse
>
},
{
TI
(
ngraph
::
op
::
Result
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
Result
>
},
{
TI
(
ngraph
::
op
::
ReduceWindow
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
ReduceWindow
>
},
{
TI
(
ngraph
::
op
::
SelectAndScatter
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
SelectAndScatter
>
},
{
TI
(
ngraph
::
op
::
AvgPool
),
&
runtime
::
cpu
::
CPU_Emitter
::
emit
<
op
::
AvgPool
>
},
...
...
@@ -323,6 +325,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp"
#include "ngraph/runtime/kernel/slice.hpp"
...
...
@@ -611,6 +614,7 @@ using namespace ngraph::runtime;
}
// create output alias map
/*
size_t output_index = 0;
unordered_map<descriptor::TensorView*, vector<size_t>> output_alias_map;
vector<size_t> aliases;
...
...
@@ -626,49 +630,18 @@ using namespace ngraph::runtime;
}
output_index++;
}
*/
// Add outputs to the variable name map
output_index
=
0
;
for
(
size_t
i
=
0
;
i
<
current_function
->
get_output_size
();
++
i
)
{
shared_ptr
<
Node
>
op
=
current_function
->
get_output_op
(
i
);
shared_ptr
<
descriptor
::
TensorView
>
tv
=
op
->
get_output_tensor_view
();
const
element
::
Type
&
et
=
tv
->
get_tensor_view_type
()
->
get_element_type
();
bool
parameter_as_output
=
false
;
for
(
shared_ptr
<
ngraph
::
op
::
Parameter
>
param
:
current_function
->
get_parameters
())
{
for
(
const
descriptor
::
Output
&
pout
:
param
->
get_outputs
())
{
shared_ptr
<
descriptor
::
TensorView
>
ptv
=
pout
.
get_tensor_view
();
if
(
tv
==
ptv
)
{
parameter_as_output
=
true
;
writer
<<
"memcpy(static_cast<"
<<
et
.
c_type_string
()
<<
"*>(outputs["
<<
output_index
<<
"]), "
<<
m_variable_name_map
[
ptv
->
get_tensor
().
get_name
()]
<<
", "
<<
ptv
->
get_tensor
().
size
()
<<
");
\n
"
;
break
;
}
}
}
if
(
!
parameter_as_output
&&
!
contains
(
aliases
,
output_index
))
{
if
(
contains
(
constants
,
tv
.
get
()))
{
writer
<<
"memcpy(outputs["
<<
output_index
<<
"], "
<<
tv
->
get_tensor
().
get_name
()
<<
", "
<<
tv
->
get_tensor
().
size
()
<<
");
\n
"
;
}
else
{
string
type
=
et
.
c_type_string
();
string
type
=
tv
->
get_tensor_view_type
()
->
get_element_type
().
c_type_string
();
stringstream
ss
;
ss
<<
"(("
<<
type
<<
"*)(outputs["
<<
output_index
<<
"]))"
;
ss
<<
"(("
<<
type
<<
"*)(outputs["
<<
i
<<
"]))"
;
m_variable_name_map
[
tv
->
get_tensor
().
get_name
()]
=
ss
.
str
();
}
}
output_index
++
;
}
for
(
shared_ptr
<
Node
>
node
:
current_function
->
get_ordered_ops
())
{
...
...
@@ -758,7 +731,6 @@ using namespace ngraph::runtime;
// Emit operation epilogue
if
(
!
node
->
is_parameter
()
&&
!
node
->
is_constant
())
{
handle_output_alias
(
writer
,
*
node
,
output_alias_map
);
if
(
m_emit_timing
)
{
emit_debug_function_exit
(
writer
,
node
.
get
(),
in
,
out
);
...
...
@@ -895,35 +867,6 @@ using namespace ngraph::runtime;
}
}
void
runtime
::
cpu
::
CPU_ExternalFunction
::
handle_output_alias
(
codegen
::
CodeWriter
&
writer
,
const
Node
&
node
,
const
unordered_map
<
descriptor
::
TensorView
*
,
vector
<
size_t
>>&
output_alias_map
)
{
for
(
const
descriptor
::
Output
&
output
:
node
.
get_outputs
())
{
shared_ptr
<
descriptor
::
TensorView
>
otv
=
output
.
get_tensor_view
();
auto
it
=
output_alias_map
.
find
(
otv
.
get
());
if
(
it
!=
output_alias_map
.
end
())
{
const
vector
<
size_t
>&
outputs
=
it
->
second
;
if
(
outputs
.
size
()
>
1
)
{
writer
<<
"{ // handle output alias for previous op
\n
"
;
writer
.
indent
++
;
for
(
size_t
i
=
1
;
i
<
outputs
.
size
();
i
++
)
{
writer
<<
"memcpy(static_cast<void*>(outputs["
<<
outputs
[
i
]
<<
"]), static_cast<void*>(outputs["
<<
outputs
[
0
]
<<
"]), "
<<
otv
->
get_tensor
().
size
()
<<
");
\n
"
;
}
writer
.
indent
--
;
writer
<<
"}
\n
"
;
}
}
}
}
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
runtime
::
cpu
::
CPU_ExternalFunction
::
make_call_frame
()
{
if
(
!
m_is_compiled
)
...
...
src/ngraph/runtime/cpu/mkldnn_emitter.hpp
View file @
dd5bd9ad
...
...
@@ -113,6 +113,7 @@ namespace ngraph
size_t
build_relu_forward
(
const
mkldnn
::
memory
::
desc
&
input_desc
,
const
mkldnn
::
memory
::
desc
&
result_desc
);
size_t
build_elementwise_add
(
const
mkldnn
::
memory
::
desc
&
input0_data_desc
,
const
mkldnn
::
memory
::
desc
&
input1_data_desc
,
...
...
src/ngraph/runtime/cpu/ops/matmul_bias.cpp
View file @
dd5bd9ad
...
...
@@ -21,13 +21,14 @@
std
::
shared_ptr
<
ngraph
::
Node
>
ngraph
::
op
::
MatmulBias
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
2
)
if
(
new_args
.
size
()
!=
2
&&
new_args
.
size
()
!=
3
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
return
std
::
make_shared
<
MatmulBias
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
1
),
new_args
.
size
()
==
3
?
new_args
.
at
(
2
)
:
nullptr
,
m_shape_w
,
m_shape_x
,
m_transpose_w
,
...
...
@@ -41,7 +42,9 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W,
Shape
shape_x
,
bool
transpose_w
,
bool
transpose_x
)
:
RequiresTensorViewArgs
(
"MatMulBias"
,
{
W
,
x
,
b
})
:
RequiresTensorViewArgs
(
"MatMulBias"
,
b
==
nullptr
?
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
W
,
x
}
:
std
::
vector
<
std
::
shared_ptr
<
Node
>>
{
W
,
x
,
b
})
,
m_shape_w
(
shape_w
)
,
m_shape_x
(
shape_x
)
,
m_transpose_w
(
transpose_w
)
...
...
@@ -74,8 +77,12 @@ ngraph::op::MatmulBias::MatmulBias(std::shared_ptr<ngraph::Node> W,
}
Shape
dot_shape
{
shape_w
.
at
(
1
-
dot_dimension_w
),
shape_x
.
at
(
1
-
dot_dimension_x
)};
NGRAPH_DEBUG
<<
"dot_shape shape = "
<<
vector_to_string
(
dot_shape
)
<<
" , b shape = "
<<
vector_to_string
(
b
->
get_shape
());
NGRAPH_DEBUG
<<
"dot_shape shape = "
<<
vector_to_string
(
dot_shape
);
if
(
b
)
{
NGRAPH_DEBUG
<<
"b shape = "
<<
vector_to_string
(
b
->
get_shape
());
}
add_output
(
W
->
get_element_type
(),
dot_shape
);
}
src/ngraph/runtime/cpu/pass/cpu_assignment.cpp
View file @
dd5bd9ad
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
View file @
dd5bd9ad
...
...
@@ -31,6 +31,7 @@
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/sqrt.hpp"
...
...
@@ -51,6 +52,12 @@ static bool init_cblas_arg(std::shared_ptr<ngraph::Node> reshape,
if
(
!
r_w
)
{
if
(
arg
->
get_shape
().
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
arg
->
get_name
()
<<
" 's rank != 2 "
<<
ngraph
::
vector_to_string
(
arg
->
get_shape
());
return
false
;
}
return
true
;
//nth to do; reshape isn't a reshape
}
...
...
@@ -108,7 +115,38 @@ static std::vector<T> apply_permutation(std::vector<T> input, ngraph::AxisVector
return
output
;
}
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_gemm_pattern
()
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_matmulbias_pattern
()
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
Shape
shape_b
{
1
};
auto
W
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
shape_w
);
auto
x
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
shape_x
);
auto
b
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
shape_b
);
auto
pmmb
=
std
::
make_shared
<
op
::
MatmulBias
>
(
W
,
x
,
nullptr
,
W
->
get_shape
(),
x
->
get_shape
(),
false
,
false
);
auto
pbroadcast
=
std
::
make_shared
<
op
::
Broadcast
>
(
b
,
pmmb
->
get_shape
(),
AxisSet
{
0
});
auto
padd
=
pmmb
+
pbroadcast
;
ngraph
::
pattern
::
gr_callback_fn
callback
=
[
W
,
x
](
pattern
::
Matcher
&
m
)
{
NGRAPH_DEBUG
<<
"In callback for construct_matmulbias_pattern against node = "
<<
m
.
match_root
()
->
get_name
();
auto
mpattern
=
m
.
match_root
();
//add
auto
m_matmul
=
mpattern
->
get_input_op
(
0
);
auto
m_broadcast
=
mpattern
->
get_input_op
(
1
);
auto
pattern_map
=
m
.
get_pattern_map
();
return
m_matmul
->
copy_with_new_args
(
NodeVector
{
pattern_map
[
W
],
pattern_map
[
x
],
m_broadcast
});
};
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
padd
,
callback
);
this
->
add_matcher
(
m
);
}
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_matmul_pattern
()
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
...
...
@@ -126,30 +164,34 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_gemm_pattern()
auto
skip_x
=
std
::
make_shared
<
pattern
::
op
::
Any
>
(
x
,
reshape_pred
);
auto
pdot
=
std
::
make_shared
<
op
::
Dot
>
(
skip_w
,
skip_x
);
auto
b
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
shape_b
);
auto
pbroadcast
=
std
::
make_shared
<
op
::
Broadcast
>
(
b
,
shape_dot
,
AxisSet
{
0
});
auto
padd
=
pdot
+
pbroadcast
;
ngraph
::
pattern
::
gr_callback_fn
callback
=
[
W
,
x
,
b
](
pattern
::
Matcher
&
m
)
{
NGRAPH_DEBUG
<<
"In callback for construct_
gemm_pattern against node = "
ngraph
::
pattern
::
gr_callback_fn
callback
=
[
W
,
x
](
pattern
::
Matcher
&
m
)
{
NGRAPH_DEBUG
<<
"In callback for construct_
matmul_pattern against node = "
<<
m
.
match_root
()
->
get_name
();
auto
pattern_map
=
m
.
get_pattern_map
();
std
::
shared_ptr
<
Node
>
nn
=
nullptr
;
std
::
shared_ptr
<
Node
>
nn
;
auto
mpattern
=
m
.
match_root
();
auto
dot
=
m
.
match_root
();
if
(
mpattern
->
get_element_type
()
!=
element
::
f32
)
{
NGRAPH_DEBUG
<<
"mpattern = "
<<
mpattern
->
get_name
()
<<
" type is not float!"
;
return
nn
;
}
auto
dot
=
mpattern
->
get_input_op
(
0
);
if
(
dot
->
get_shape
().
size
()
!=
2
)
{
NGRAPH_DEBUG
<<
"dot = "
<<
dot
->
get_name
()
<<
" shape is not equal to 2!"
;
return
nn
;
}
if
(
shape_size
(
dot
->
get_shape
())
==
0
)
{
NGRAPH_DEBUG
<<
"dot has a zero dimension"
;
return
nn
;
}
bool
transpose_w
=
false
;
Shape
shape_arg0
{
pattern_map
[
W
]
->
get_shape
()};
if
(
!
init_cblas_arg
(
dot
->
get_input_op
(
0
),
pattern_map
[
W
],
transpose_w
,
shape_arg0
))
...
...
@@ -166,7 +208,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_gemm_pattern()
auto
cg
=
std
::
shared_ptr
<
Node
>
(
new
op
::
MatmulBias
(
pattern_map
[
W
],
pattern_map
[
x
],
mpattern
->
get_input_op
(
1
),
nullptr
,
shape_arg0
,
shape_arg1
,
transpose_w
,
...
...
@@ -174,7 +216,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_gemm_pattern()
return
cg
;
};
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
p
add
,
callback
);
auto
m
=
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
p
dot
,
callback
);
this
->
add_matcher
(
m
);
}
...
...
@@ -274,6 +316,205 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_fprop_bn()
this
->
add_matcher
(
m
);
}
static
bool
zero_padded_conv_consistency_check
(
const
std
::
shared_ptr
<
ngraph
::
Node
>&
match_root
,
const
std
::
shared_ptr
<
ngraph
::
op
::
Constant
>&
pad_value_op
,
const
std
::
shared_ptr
<
ngraph
::
Node
>&
pad_input
,
const
std
::
shared_ptr
<
ngraph
::
op
::
Pad
>&
matched_pad
,
const
std
::
shared_ptr
<
ngraph
::
op
::
Convolution
>&
matched_conv
,
size_t
batch_index
,
size_t
channel_index
)
{
// Only match float32 convolutions
if
(
match_root
->
get_element_type
()
!=
ngraph
::
element
::
f32
)
{
return
false
;
}
// Only match zero padding
if
(
pad_value_op
->
get_vector
<
float
>
().
at
(
0
)
!=
0.0
f
)
{
return
false
;
}
// Only match 4D tensors
if
(
pad_input
->
get_shape
().
size
()
!=
4
)
{
return
false
;
}
// Only match no interior padding
if
(
matched_pad
->
get_padding_interior
()
!=
ngraph
::
Shape
(
pad_input
->
get_shape
().
size
()))
{
return
false
;
}
// Only match convolutions with no padding specification
if
(
matched_conv
->
get_padding_below
()
!=
ngraph
::
CoordinateDiff
(
2
)
||
matched_conv
->
get_padding_above
()
!=
ngraph
::
CoordinateDiff
(
2
))
{
return
false
;
}
// Only match no padding in the batch dimension
if
(
matched_pad
->
get_padding_above
().
at
(
batch_index
)
!=
0
||
matched_pad
->
get_padding_below
().
at
(
batch_index
)
!=
0
)
{
return
false
;
}
// Only match no padding in the channel dimension
if
(
matched_pad
->
get_padding_above
().
at
(
channel_index
)
!=
0
||
matched_pad
->
get_padding_below
().
at
(
channel_index
)
!=
0
)
{
return
false
;
}
return
true
;
}
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_zero_padded_reshaped_conv
()
{
auto
pad_input
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{});
auto
pad_value
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{});
auto
pad
=
std
::
make_shared
<
op
::
Pad
>
(
pad_input
,
pad_value
,
Shape
{},
Shape
{},
Shape
{});
auto
pad_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
pad
,
nullptr
,
NodeVector
{
pad
});
auto
reshape
=
std
::
make_shared
<
op
::
Reshape
>
(
pad_label
,
AxisVector
{},
Shape
{
1
,
1
,
1
,
1
});
auto
reshape_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
reshape
,
nullptr
,
NodeVector
{
reshape
});
auto
conv_filter
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
conv
=
std
::
make_shared
<
op
::
Convolution
>
(
reshape_label
,
conv_filter
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
1
,
1
},
CoordinateDiff
{
1
,
1
},
Strides
{
1
,
1
});
auto
conv_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
conv
,
nullptr
,
NodeVector
{
conv
});
ngraph
::
pattern
::
gr_callback_fn
callback
=
[
pad_input
,
pad_value
,
pad_label
,
reshape_label
,
conv_filter
,
conv_label
](
pattern
::
Matcher
&
m
)
->
std
::
shared_ptr
<
Node
>
{
auto
pattern_map
=
m
.
get_pattern_map
();
auto
pad_value_op
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
pad_value
]);
const
auto
&
matched_conv
=
std
::
dynamic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
dynamic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
const
auto
&
matched_reshape
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
pattern_map
[
reshape_label
]);
const
auto
&
input_order
=
matched_reshape
->
get_input_order
();
auto
hoisted_reshape_output_shape
=
apply_permutation
<
Shape
::
value_type
>
(
pattern_map
[
pad_input
]
->
get_shape
(),
input_order
);
auto
hoisted_reshape
=
std
::
make_shared
<
op
::
Reshape
>
(
pattern_map
[
pad_input
],
input_order
,
Shape
(
hoisted_reshape_output_shape
.
begin
(),
hoisted_reshape_output_shape
.
end
()));
if
(
!
zero_padded_conv_consistency_check
(
m
.
match_root
(),
pad_value_op
,
pattern_map
[
pad_input
],
matched_pad
,
matched_conv
,
input_order
[
0
],
input_order
[
1
]))
{
return
nullptr
;
}
CoordinateDiff
padding_below
{
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_below
().
at
(
input_order
[
2
])),
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_below
().
at
(
input_order
[
3
]))};
CoordinateDiff
padding_above
{
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_above
().
at
(
input_order
[
2
])),
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_above
().
at
(
input_order
[
3
]))};
auto
zero_padded_conv
=
std
::
make_shared
<
op
::
Convolution
>
(
hoisted_reshape
,
pattern_map
[
conv_filter
],
matched_conv
->
get_window_movement_strides
(),
matched_conv
->
get_window_dilation_strides
(),
padding_below
,
padding_above
,
matched_conv
->
get_data_dilation_strides
());
return
zero_padded_conv
;
};
this
->
add_matcher
(
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
conv_label
,
callback
));
}
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_zero_padded_conv
()
{
auto
pad_input
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{});
auto
pad
=
std
::
make_shared
<
op
::
Pad
>
(
pad_input
,
pad_value
,
Shape
{
0
,
0
,
0
,
0
},
Shape
{
0
,
0
,
0
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
pad_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
pad
,
nullptr
,
NodeVector
{
pad
});
auto
conv_filter
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
conv
=
std
::
make_shared
<
op
::
Convolution
>
(
pad_label
,
conv_filter
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
1
,
1
},
CoordinateDiff
{
1
,
1
},
Strides
{
1
,
1
});
auto
conv_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
conv
,
nullptr
,
NodeVector
{
conv
});
ngraph
::
pattern
::
gr_callback_fn
callback
=
[
pad_input
,
pad_value
,
pad_label
,
conv_filter
,
conv_label
](
pattern
::
Matcher
&
m
)
->
std
::
shared_ptr
<
Node
>
{
auto
pattern_map
=
m
.
get_pattern_map
();
auto
pad_value_op
=
std
::
dynamic_pointer_cast
<
op
::
Constant
>
(
pattern_map
[
pad_value
]);
const
auto
&
matched_conv
=
std
::
dynamic_pointer_cast
<
op
::
Convolution
>
(
pattern_map
[
conv_label
]);
const
auto
&
matched_pad
=
std
::
dynamic_pointer_cast
<
op
::
Pad
>
(
pattern_map
[
pad_label
]);
if
(
!
zero_padded_conv_consistency_check
(
m
.
match_root
(),
pad_value_op
,
pattern_map
[
pad_input
],
matched_pad
,
matched_conv
,
0
,
1
))
{
return
nullptr
;
}
CoordinateDiff
padding_below
{
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_below
().
at
(
2
)),
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_below
().
at
(
3
))};
CoordinateDiff
padding_above
{
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_above
().
at
(
2
)),
static_cast
<
CoordinateDiff
::
value_type
>
(
matched_pad
->
get_padding_above
().
at
(
3
))};
auto
zero_padded_conv
=
std
::
make_shared
<
op
::
Convolution
>
(
pattern_map
[
pad_input
],
pattern_map
[
conv_filter
],
matched_conv
->
get_window_movement_strides
(),
matched_conv
->
get_window_dilation_strides
(),
padding_below
,
padding_above
,
matched_conv
->
get_data_dilation_strides
());
return
zero_padded_conv
;
};
this
->
add_matcher
(
std
::
make_shared
<
ngraph
::
pattern
::
Matcher
>
(
conv_label
,
callback
));
}
void
ngraph
::
runtime
::
cpu
::
pass
::
CPUFusion
::
construct_conv_bias
()
{
Shape
shape
{
2
,
2
,
1
,
1
};
...
...
src/ngraph/runtime/cpu/pass/cpu_fusion.hpp
View file @
dd5bd9ad
...
...
@@ -38,13 +38,19 @@ public:
CPUFusion
()
:
GraphRewrite
()
{
construct_gemm_pattern
();
construct_matmul_pattern
();
construct_matmulbias_pattern
();
construct_fprop_bn
();
construct_zero_padded_reshaped_conv
();
construct_zero_padded_conv
();
construct_conv_bias
();
}
private
:
void
construct_gemm_pattern
();
void
construct_matmul_pattern
();
void
construct_matmulbias_pattern
();
void
construct_fprop_bn
();
void
construct_conv_bias
();
void
construct_zero_padded_reshaped_conv
();
void
construct_zero_padded_conv
();
construct_conv_bias
();
};
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
View file @
dd5bd9ad
...
...
@@ -31,6 +31,7 @@
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/op.hpp"
#include "ngraph/ops/relu.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
...
...
@@ -629,6 +630,16 @@ namespace ngraph
}
}
template
<>
void
CPULayout
::
LAYOUT_DECL
(
ngraph
::
op
::
Result
)
{
auto
input_layout
=
runtime
::
cpu
::
mkldnn_utils
::
get_input_mkldnn_format
(
node
.
get
(),
0
);
vector
<
memory
::
format
>
prim_output_formats
;
prim_output_formats
.
push_back
(
input_layout
);
set_output_layouts
(
node
,
prim_output_formats
);
}
template
<>
void
CPULayout
::
LAYOUT_DECL
(
ngraph
::
op
::
Relu
)
{
...
...
@@ -708,6 +719,7 @@ static const runtime::cpu::pass::LayoutOpMap s_dispatcher{
{
TI
(
ngraph
::
op
::
AvgPoolBackprop
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
AvgPoolBackprop
>
},
{
TI
(
ngraph
::
op
::
Relu
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
Relu
>
},
{
TI
(
ngraph
::
op
::
Result
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
Result
>
},
{
TI
(
ngraph
::
op
::
ReluBackprop
),
&
runtime
::
cpu
::
pass
::
CPULayout
::
layout
<
ngraph
::
op
::
ReluBackprop
>
},
};
...
...
src/ngraph/runtime/interpreter/int_call_frame.cpp
View file @
dd5bd9ad
...
...
@@ -18,6 +18,7 @@
#include <cstdlib>
#include <iomanip>
#include "ngraph/ops/result.hpp"
#include "ngraph/runtime/host_tensor_view.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
...
...
@@ -52,32 +53,17 @@ void runtime::interpreter::INT_CallFrame::call(
tensor_map
.
insert
({
tv
,
input_tvs
[
arg_index
++
]});
}
}
std
::
vector
<
size_t
>
aliased_outputs
;
for
(
size_t
i
=
0
;
i
<
output_tvs
.
size
();
i
++
)
{
shared_ptr
<
Node
>
op
=
function
->
get_output_op
(
i
);
descriptor
::
TensorView
*
tv
=
op
->
get_output_tensor_view
(
0
).
get
();
string
name
=
tv
->
get_tensor
().
get_name
();
if
(
contains_key
(
tensor_map
,
tv
))
for
(
size_t
i
=
0
;
i
<
function
->
get_output_size
();
i
++
)
{
if
(
op
->
description
()
==
"Parameter"
)
auto
output_op
=
function
->
get_output_op
(
i
);
if
(
!
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
output_op
))
{
// Here we handle the special case where an output is just a copy of an input
memcpy
(
output_tvs
[
i
]
->
get_data_ptr
(),
tensor_map
.
at
(
tv
)
->
get_data_ptr
(),
tv
->
get_tensor
().
size
());
throw
ngraph_error
(
"One of function's outputs isn't op::Result"
);
}
else
{
// This is a computed value returned more than once and will need to be copied at the end
aliased_outputs
.
push_back
(
i
);
}
}
else
{
descriptor
::
TensorView
*
tv
=
function
->
get_output_op
(
i
)
->
get_output_tensor_view
(
0
).
get
();
tensor_map
.
insert
({
tv
,
output_tvs
[
i
]});
}
}
// Invoke computation
for
(
shared_ptr
<
Node
>
op
:
function
->
get_ordered_ops
())
...
...
@@ -163,29 +149,6 @@ void runtime::interpreter::INT_CallFrame::call(
}
}
}
for
(
size_t
i
:
aliased_outputs
)
{
shared_ptr
<
Node
>
op
=
function
->
get_output_op
(
i
);
size_t
first_output
;
for
(
first_output
=
0
;
first_output
<=
i
;
++
first_output
)
{
if
(
function
->
get_output_op
(
first_output
)
==
op
)
{
break
;
}
}
if
(
first_output
==
i
)
{
throw
ngraph_error
(
"Internal error: duplicate output missing"
);
}
descriptor
::
TensorView
*
tv
=
op
->
get_output_tensor_view
(
0
).
get
();
string
name
=
tv
->
get_tensor
().
get_name
();
// Here we handle the special case where an output is just a copy of an input
memcpy
(
output_tvs
[
i
]
->
get_data_ptr
(),
output_tvs
[
first_output
]
->
get_data_ptr
(),
tv
->
get_tensor
().
size
());
}
}
void
runtime
::
interpreter
::
INT_CallFrame
::
generate_calls
(
...
...
src/ngraph/runtime/interpreter/int_call_frame.hpp
View file @
dd5bd9ad
...
...
@@ -39,6 +39,7 @@
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/ops/slice.hpp"
...
...
@@ -89,6 +90,7 @@
#include "ngraph/runtime/kernel/relu.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/result.hpp"
#include "ngraph/runtime/kernel/reverse.hpp"
#include "ngraph/runtime/kernel/select.hpp"
#include "ngraph/runtime/kernel/select_and_scatter.hpp"
...
...
@@ -720,6 +722,13 @@ private:
reshape
->
get_input_order
(),
out
[
0
]
->
get_shape
());
}
else
if
(
node_op
==
"Result"
)
{
ngraph
::
op
::
Result
*
res
=
dynamic_cast
<
ngraph
::
op
::
Result
*>
(
&
node
);
kernel
::
result
(
reinterpret_cast
<
T
*>
(
args
[
0
]
->
get_data_ptr
()),
reinterpret_cast
<
T
*>
(
out
[
0
]
->
get_data_ptr
()),
shape_size
(
res
->
get_shape
()));
}
else
if
(
node_op
==
"Reverse"
)
{
ngraph
::
op
::
Reverse
*
reverse
=
dynamic_cast
<
ngraph
::
op
::
Reverse
*>
(
&
node
);
...
...
src/ngraph/runtime/kernel/result.hpp
0 → 100644
View file @
dd5bd9ad
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/shape.hpp"
namespace
ngraph
{
namespace
runtime
{
namespace
kernel
{
template
<
typename
T
>
void
result
(
T
*
arg
,
T
*
out
,
size_t
count
)
{
memcpy
(
out
,
arg
,
sizeof
(
T
)
*
count
);
}
}
}
}
src/ngraph/serializer.cpp
View file @
dd5bd9ad
...
...
@@ -64,6 +64,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/result.hpp"
#include "ngraph/ops/reverse.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
...
...
@@ -667,6 +668,10 @@ static shared_ptr<ngraph::Function>
auto
output_shape
=
node_js
.
at
(
"output_shape"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
Reshape
>
(
args
[
0
],
input_order
,
output_shape
);
}
else
if
(
node_op
==
"Result"
)
{
node
=
make_shared
<
op
::
Result
>
(
args
[
0
]);
}
else
if
(
node_op
==
"Reverse"
)
{
auto
reversed_axes
=
node_js
.
at
(
"reversed_axes"
).
get
<
set
<
size_t
>>
();
...
...
@@ -1061,6 +1066,9 @@ static json write(const Node& n)
node
[
"input_order"
]
=
tmp
->
get_input_order
();
node
[
"output_shape"
]
=
tmp
->
get_output_shape
();
}
else
if
(
node_op
==
"Result"
)
{
}
else
if
(
node_op
==
"Reverse"
)
{
auto
tmp
=
dynamic_cast
<
const
op
::
Reverse
*>
(
&
n
);
...
...
src/ngraph/util.cpp
View file @
dd5bd9ad
...
...
@@ -25,9 +25,12 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/result_vector.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/util.hpp"
#include <iostream>
using
namespace
std
;
std
::
string
ngraph
::
to_cplusplus_sourcecode_literal
(
bool
val
)
...
...
@@ -239,10 +242,21 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
}
// create the new outputs for fprop and the new fprop function
NodeVector
fprop_outputs
{
fprop
->
get_results
()};
fprop_outputs
.
insert
(
fprop_outputs
.
end
(),
fprop_cache
.
fprop_output_nodes
.
begin
(),
fprop_cache
.
fprop_output_nodes
.
end
());
ResultVector
fprop_outputs
;
for
(
auto
fpr
:
fprop
->
get_results
())
{
fprop_outputs
.
push_back
(
fpr
);
}
for
(
auto
fpir
:
fprop_cache
.
fprop_output_nodes
)
{
if
(
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
fpir
))
{
throw
ngraph_error
(
"Expected op::Result in fprop->get_results()"
);
}
fprop_outputs
.
push_back
(
std
::
make_shared
<
op
::
Result
>
(
fpir
));
}
fprop_cache
.
fprop
=
std
::
make_shared
<
Function
>
(
fprop_outputs
,
fprop
->
get_parameters
());
...
...
@@ -251,10 +265,15 @@ ngraph::FpropCache ngraph::cache_fprop(std::shared_ptr<ngraph::Function> fprop,
ngraph
::
clone_nodes
(
bprop
->
get_ops
(),
node_param_map
);
// get cloned bprop results
Node
Vector
cloned_results
;
Result
Vector
cloned_results
;
for
(
auto
node
:
bprop
->
get_results
())
{
cloned_results
.
push_back
(
node_param_map
.
get
(
node
));
auto
result
=
std
::
dynamic_pointer_cast
<
op
::
Result
>
(
node_param_map
.
get
(
node
));
if
(
!
result
)
{
throw
ngraph_error
(
"Expected op::Result values for op::Result keys in node_param_map"
);
}
cloned_results
.
push_back
(
result
);
}
// get clone bprop parameters
...
...
test/build_graph.cpp
View file @
dd5bd9ad
...
...
@@ -40,7 +40,7 @@ TEST(build_graph, build_simple)
auto
cluster_0
=
make_shared
<
Function
>
(
dot
,
op
::
ParameterVector
{
arg0
,
arg1
,
arg2
,
arg3
});
ASSERT_EQ
(
cluster_0
->
get_output_op
(
0
),
dot
);
ASSERT_EQ
(
cluster_0
->
get_output_op
(
0
)
->
get_input_op
(
0
)
,
dot
);
}
// Check node comparisons
...
...
test/cpu_fusion.cpp
View file @
dd5bd9ad
...
...
@@ -134,6 +134,42 @@ TEST(cpu_fusion, gemm_cpu)
ASSERT_TRUE
(
read_vector
<
float
>
(
result
)
==
expected
);
}
TEST
(
cpu_fusion
,
gemm_cpu_no_bias
)
{
auto
shapeA
=
Shape
{
3
,
2
};
auto
shapeB
=
Shape
{
2
,
3
};
auto
shapeC
=
Shape
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
auto
B
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeB
);
auto
reshape_w
=
make_shared
<
op
::
Reshape
>
(
A
,
AxisVector
{
1
,
0
},
Shape
{
2
,
3
});
auto
reshape_x
=
make_shared
<
op
::
Reshape
>
(
B
,
AxisVector
{
1
,
0
},
Shape
{
3
,
2
});
auto
cg
=
make_shared
<
op
::
MatmulBias
>
(
A
,
B
,
nullptr
,
A
->
get_shape
(),
B
->
get_shape
(),
true
,
true
);
auto
f
=
make_shared
<
Function
>
(
cg
,
op
::
ParameterVector
{
A
,
B
});
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
shared_ptr
<
runtime
::
TensorView
>
a
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeA
);
shared_ptr
<
runtime
::
TensorView
>
b
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeB
);
shared_ptr
<
runtime
::
TensorView
>
result
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shapeC
);
vector
<
float
>
dataA
{
1.0
f
,
4.0
f
,
1.0
f
,
4.0
f
,
1.0
f
,
4.0
f
};
vector
<
float
>
dataB
{
3.0
f
,
3.0
f
,
3.0
f
,
9.0
f
,
9.0
f
,
9.0
f
};
copy_data
(
a
,
dataA
);
copy_data
(
b
,
dataB
);
cf
->
call
({
a
,
b
},
{
result
});
vector
<
float
>
expected
{
9
,
27
,
36
,
108
};
ASSERT_TRUE
(
read_vector
<
float
>
(
result
)
==
expected
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_basic
)
{
Shape
shape
{};
...
...
@@ -155,6 +191,50 @@ TEST(cpu_fusion, cpu_fusion_pass_basic)
ASSERT_NE
(
std
::
dynamic_pointer_cast
<
op
::
MatmulBias
>
(
graph
->
get_input_op
(
0
)),
nullptr
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_matmul_bias
)
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
Shape
shape_b
{
1
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
b
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_b
);
auto
mmb
=
std
::
make_shared
<
op
::
MatmulBias
>
(
W
,
x
,
nullptr
,
W
->
get_shape
(),
x
->
get_shape
(),
false
,
false
);
auto
broadcast
=
std
::
make_shared
<
op
::
Broadcast
>
(
b
,
mmb
->
get_shape
(),
AxisSet
{
0
});
auto
add
=
mmb
+
broadcast
;
auto
graph
=
make_shared
<
op
::
Abs
>
(
add
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
,
b
});
pass_manager
.
run_passes
(
func
);
auto
gmm
=
graph
->
get_input_op
(
0
);
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
MatmulBias
>
(
gmm
));
ASSERT_EQ
(
gmm
->
get_input_op
(
2
),
broadcast
);
}
TEST
(
cpu_fusion
,
cpu_fusion_pass_matmul_no_bias
)
{
Shape
shape_w
{
4
,
2
};
Shape
shape_x
{
1
,
4
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
reshape_w
=
std
::
make_shared
<
op
::
Reshape
>
(
W
,
AxisVector
{
1
,
0
},
Shape
{
2
,
4
});
auto
reshape_x
=
std
::
make_shared
<
op
::
Reshape
>
(
x
,
AxisVector
{
1
,
0
},
Shape
{
4
,
1
});
auto
re_dot
=
make_shared
<
op
::
Dot
>
(
reshape_w
,
reshape_x
);
auto
graph
=
make_shared
<
op
::
Abs
>
(
re_dot
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
});
pass_manager
.
run_passes
(
func
);
size_t
mmb
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
mmb
,
1
);
}
TEST
(
cpu_fusion
,
gemm_mlp
)
{
const
string
json_path
=
file_util
::
path_join
(
SERIALIZED_ZOO
,
"mxnet/mnist_mlp_forward.json"
);
...
...
@@ -164,8 +244,8 @@ TEST(cpu_fusion, gemm_mlp)
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
runtime
::
cpu
::
pass
::
CPUFusion
>
();
pass_manager
.
run_passes
(
func
);
size_t
ccg
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
ccg
,
3
);
size_t
mmb
=
count_ops_of_type
<
op
::
MatmulBias
>
(
func
);
ASSERT_EQ
(
mmb
,
3
);
}
//TODO: Move this test to backend_test.in.cpp once we have the INTERPRETER
...
...
@@ -403,6 +483,98 @@ TEST(cpu_fusion, bn_bprop_n4c3h2w2)
vector
<
float
>
expected_dbeta
{
320.
f
,
320.
f
,
320.
f
};
ASSERT_TRUE
(
ngraph
::
test
::
all_close
(
read_vector
<
float
>
(
_dbeta
),
expected_dbeta
,
1e-4
f
,
1e-8
f
));
}
TEST
(
cpu_fusion
,
zero_padded_reshaped_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
2
,
2
,
1
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
0.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
1
,
0
,
0
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
reshape
=
make_shared
<
op
::
Reshape
>
(
pad
,
AxisVector
{
0
,
3
,
1
,
2
},
Shape
{
1
,
1
,
3
,
3
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
reshape
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
0
);
}
TEST
(
cpu_fusion
,
zero_padded_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
2
,
2
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
0.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
0
,
0
,
1
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
pad
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
0
);
}
TEST
(
cpu_fusion
,
non_zero_padded_conv
)
{
auto
X
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
2
,
2
});
auto
F
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
1
});
auto
pad_value
=
op
::
Constant
::
create
<
float
>
(
element
::
f32
,
Shape
{},
std
::
vector
<
float
>
{
1.0
f
});
auto
pad
=
make_shared
<
op
::
Pad
>
(
X
,
pad_value
,
Shape
{
0
,
0
,
0
,
1
},
Shape
{
0
,
0
,
1
,
0
},
Shape
{
0
,
0
,
0
,
0
});
auto
conv
=
make_shared
<
op
::
Convolution
>
(
pad
,
F
,
Strides
{
1
,
1
},
Strides
{
1
,
1
},
CoordinateDiff
{
0
,
0
},
CoordinateDiff
{
0
,
0
},
Strides
{
1
,
1
});
auto
func
=
make_shared
<
Function
>
(
conv
,
op
::
ParameterVector
{
X
,
F
});
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
func
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
ASSERT_EQ
(
count_ops_of_type
<
op
::
Pad
>
(
func
),
1
);
}
TEST
(
cpu_fusion
,
fuse_conv_bias
)
{
pass
::
Manager
pass_manager
;
...
...
test/graph_partition.cpp
View file @
dd5bd9ad
...
...
@@ -218,11 +218,6 @@ public:
{
map_parameter_to_index
[
f
->
get_parameters
().
at
(
i
)]
=
i
;
}
unordered_map
<
shared_ptr
<
Node
>
,
size_t
>
map_result_to_index
;
for
(
size_t
i
=
0
;
i
<
f
->
get_results
().
size
();
++
i
)
{
map_result_to_index
[
f
->
get_results
().
at
(
i
)]
=
i
;
}
// Parameter's source is either itself, or the output node of the upstream function
unordered_map
<
shared_ptr
<
op
::
Parameter
>
,
shared_ptr
<
Node
>>
map_parameter_to_source_node
;
...
...
@@ -231,6 +226,13 @@ public:
vector
<
shared_ptr
<
Function
>>
funcs
=
split_function_by_placement
(
f
,
map_parameter_to_source_node
);
auto
main_func
=
funcs
.
back
();
unordered_map
<
shared_ptr
<
Node
>
,
size_t
>
map_result_to_index
;
for
(
size_t
i
=
0
;
i
<
main_func
->
get_results
().
size
();
++
i
)
{
map_result_to_index
[
main_func
->
get_results
().
at
(
i
)]
=
i
;
}
// Make call frames
vector
<
shared_ptr
<
runtime
::
CallFrame
>>
call_frames
;
for
(
auto
func
:
funcs
)
...
...
test/pass_liveness.cpp
View file @
dd5bd9ad
...
...
@@ -47,14 +47,22 @@ TEST(liveness, constant)
auto
tmp
=
f
->
get_ordered_ops
();
vector
<
shared_ptr
<
Node
>>
sorted
{
tmp
.
begin
(),
tmp
.
end
()};
ASSERT_EQ
(
2
,
sorted
.
size
());
ASSERT_EQ
(
3
,
sorted
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_new_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
0
]
->
liveness_free_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_new_list
.
size
());
//op::Negative is live on output to op::Result
EXPECT_EQ
(
1
,
sorted
[
1
]
->
liveness_live_list
.
size
());
//op::Negative is new
EXPECT_EQ
(
1
,
sorted
[
1
]
->
liveness_new_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
1
]
->
liveness_free_list
.
size
());
//op::Negative is live on input to op::Result
EXPECT_EQ
(
1
,
sorted
[
2
]
->
liveness_live_list
.
size
());
EXPECT_EQ
(
0
,
sorted
[
2
]
->
liveness_new_list
.
size
());
//op::Negative is freed
EXPECT_EQ
(
1
,
sorted
[
2
]
->
liveness_free_list
.
size
());
}
TEST
(
liveness
,
liveness
)
...
...
test/pass_memory_layout.cpp
View file @
dd5bd9ad
...
...
@@ -234,5 +234,5 @@ TEST(memory_layout, constant)
pass_manager
.
run_passes
(
f
);
auto
sorted
=
f
->
get_ordered_ops
();
size_t
temporary_pool_size
=
f
->
get_temporary_pool_size
();
EXPECT_EQ
(
0
,
temporary_pool_size
);
EXPECT_EQ
(
4
,
temporary_pool_size
);
}
test/pattern.cpp
View file @
dd5bd9ad
...
...
@@ -42,6 +42,7 @@
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/serializer.hpp"
#include "util/matcher.hpp"
#include "util/test_tools.hpp"
using
namespace
ngraph
;
using
namespace
std
;
...
...
@@ -89,9 +90,9 @@ bool sum_predicate(std::shared_ptr<Node> gn)
return
false
;
}
NGRAPH_DEBUG
<<
"looking at function's result "
<<
r
->
get_functions
()[
0
]
->
get_result
()
->
get_name
();
if
(
auto
sum
=
std
::
dynamic_pointer_cast
<
op
::
Add
>
(
r
->
get_functions
()[
0
]
->
get_result
()
))
auto
result
=
r
->
get_functions
()[
0
]
->
get_result
()
->
get_input_op
(
0
);
NGRAPH_DEBUG
<<
"looking at function's result "
<<
result
->
get_name
();
if
(
auto
sum
=
std
::
dynamic_pointer_cast
<
op
::
Add
>
(
r
esult
))
{
auto
parm1
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
sum
->
get_input_op
(
0
));
auto
parm2
=
std
::
dynamic_pointer_cast
<
op
::
Parameter
>
(
sum
->
get_input_op
(
1
));
...
...
@@ -297,7 +298,7 @@ TEST(pattern, graph_rewrite)
ASSERT_TRUE
(
graph_b
->
get_output_inputs
(
0
).
empty
());
auto
expected
=
ngraph
::
NodeVector
{
a
,
b
,
a
,
c
,
b
};
ASSERT_TRUE
(
f
->
get_results
()
==
expected
);
ASSERT_TRUE
(
count_ops_of_type
<
op
::
Add
>
(
f
)
==
0
);
}
{
...
...
test/reshape_elimination.cpp
View file @
dd5bd9ad
...
...
@@ -82,3 +82,27 @@ TEST(reshape_elimination, bn_bprop_rewrite)
size_t
count_after
=
count_ops_of_type
<
op
::
Reshape
>
(
func
);
ASSERT_TRUE
(
count_after
<
count_before
);
}
TEST
(
reshape_elimination
,
dot_transpose_to_dot_w_transpose_args
)
{
Shape
shape_w
{
2
,
4
};
Shape
shape_x
{
4
,
1
};
auto
W
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_w
);
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_x
);
auto
dot
=
make_shared
<
op
::
Dot
>
(
W
,
x
);
auto
reshape_dot
=
std
::
make_shared
<
op
::
Reshape
>
(
dot
,
AxisVector
{
1
,
0
},
Shape
{
1
,
2
});
auto
graph
=
make_shared
<
op
::
Abs
>
(
reshape_dot
);
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
ReshapeElimination
>
();
auto
func
=
make_shared
<
Function
>
(
graph
,
op
::
ParameterVector
{
W
,
x
});
pass_manager
.
run_passes
(
func
);
auto
gdot
=
graph
->
get_input_op
(
0
);
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Dot
>
(
gdot
));
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
gdot
->
get_input_op
(
0
)));
ASSERT_TRUE
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
gdot
->
get_input_op
(
1
)));
ASSERT_EQ
(
gdot
->
get_input_op
(
0
)
->
get_input_op
(
0
),
x
);
ASSERT_EQ
(
gdot
->
get_input_op
(
1
)
->
get_input_op
(
0
),
W
);
ASSERT_EQ
(
gdot
->
get_shape
(),
(
Shape
{
1
,
2
}));
}
test/util/test_tools.cpp
View file @
dd5bd9ad
...
...
@@ -33,12 +33,13 @@ bool validate_list(const list<shared_ptr<Node>>& nodes)
auto
node_tmp
=
*
it
;
auto
dependencies_tmp
=
node_tmp
->
get_input_ops
();
vector
<
Node
*>
dependencies
;
for
(
shared_ptr
<
Node
>
n
:
dependencies_tmp
)
{
dependencies
.
push_back
(
n
.
get
());
}
auto
tmp
=
it
++
;
for
(;
tmp
!=
nodes
.
rend
();
tmp
++
)
auto
tmp
=
it
;
for
(
tmp
++
;
tmp
!=
nodes
.
rend
();
tmp
++
)
{
auto
dep_tmp
=
*
tmp
;
auto
found
=
find
(
dependencies
.
begin
(),
dependencies
.
end
(),
dep_tmp
.
get
());
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment