Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
ad6b0f07
Commit
ad6b0f07
authored
Jan 20, 2018
by
Robert Kimball
Committed by
Scott Cyphers
Jan 20, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
using namespace flatten (#400)
* wip * using namespace cleanup
parent
379300b7
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
214 additions
and
220 deletions
+214
-220
compiler.cpp
src/ngraph/codegen/compiler.cpp
+27
-28
dense_tensor_view_layout.cpp
src/ngraph/descriptor/layout/dense_tensor_view_layout.cpp
+5
-4
tensor_view_layout.cpp
src/ngraph/descriptor/layout/tensor_view_layout.cpp
+4
-4
output.cpp
src/ngraph/descriptor/output.cpp
+8
-9
tensor.cpp
src/ngraph/descriptor/tensor.cpp
+15
-15
tensor_view.cpp
src/ngraph/descriptor/tensor_view.cpp
+3
-2
binary_elementwise_comparison.cpp
src/ngraph/ops/binary_elementwise_comparison.cpp
+3
-4
not.cpp
src/ngraph/ops/not.cpp
+3
-3
dump_sorted.cpp
src/ngraph/pass/dump_sorted.cpp
+5
-6
liveness.cpp
src/ngraph/pass/liveness.cpp
+18
-19
memory_layout.cpp
src/ngraph/pass/memory_layout.cpp
+8
-9
memory_visualize.cpp
src/ngraph/pass/memory_visualize.cpp
+16
-15
cpu_kernel_emitters.cpp
src/ngraph/runtime/cpu/cpu_kernel_emitters.cpp
+0
-0
cpu_kernel_utils.cpp
src/ngraph/runtime/cpu/cpu_kernel_utils.cpp
+31
-33
manager.cpp
src/ngraph/runtime/manager.cpp
+20
-19
tensor_view.cpp
src/ngraph/runtime/tensor_view.cpp
+11
-11
builder.cpp
test/builder.cpp
+25
-26
tensor.cpp
test/tensor.cpp
+12
-13
No files found.
src/ngraph/codegen/compiler.cpp
View file @
ad6b0f07
...
...
@@ -67,46 +67,45 @@
using
namespace
clang
;
using
namespace
llvm
;
using
namespace
llvm
::
opt
;
using
namespace
std
;
using
namespace
ngraph
::
codegen
;
using
namespace
ngraph
;
static
StaticCompiler
s_static_compiler
;
static
codegen
::
StaticCompiler
s_static_compiler
;
static
std
::
mutex
m_mutex
;
ngraph
::
codegen
::
Module
::
Module
(
std
::
unique_ptr
<
llvm
::
Module
>
module
)
codegen
::
Module
::
Module
(
std
::
unique_ptr
<
llvm
::
Module
>
module
)
:
m_module
(
move
(
module
))
{
}
ngraph
::
codegen
::
Module
::~
Module
()
codegen
::
Module
::~
Module
()
{
}
std
::
unique_ptr
<
llvm
::
Module
>
ngraph
::
codegen
::
Module
::
take_module
()
std
::
unique_ptr
<
llvm
::
Module
>
codegen
::
Module
::
take_module
()
{
return
move
(
m_module
);
}
Compiler
::
Compiler
()
codegen
::
Compiler
::
Compiler
()
{
}
Compiler
::~
Compiler
()
codegen
::
Compiler
::~
Compiler
()
{
}
void
Compiler
::
set_precompiled_header_source
(
const
std
::
string
&
source
)
void
codegen
::
Compiler
::
set_precompiled_header_source
(
const
std
::
string
&
source
)
{
s_static_compiler
.
set_precompiled_header_source
(
source
);
}
void
Compiler
::
add_header_search_path
(
const
std
::
string
&
path
)
void
codegen
::
Compiler
::
add_header_search_path
(
const
std
::
string
&
path
)
{
s_static_compiler
.
add_header_search_path
(
path
);
}
std
::
unique_ptr
<
ngraph
::
codegen
::
Module
>
Compiler
::
compile
(
const
std
::
string
&
source
)
std
::
unique_ptr
<
codegen
::
Module
>
codegen
::
Compiler
::
compile
(
const
std
::
string
&
source
)
{
lock_guard
<
mutex
>
lock
(
m_mutex
);
return
s_static_compiler
.
compile
(
m_compiler_action
,
source
);
...
...
@@ -120,7 +119,7 @@ static std::string GetExecutablePath(const char* Argv0)
return
llvm
::
sys
::
fs
::
getMainExecutable
(
Argv0
,
MainAddr
);
}
StaticCompiler
::
StaticCompiler
()
codegen
::
StaticCompiler
::
StaticCompiler
()
:
m_precompiled_header_valid
(
false
)
,
m_debuginfo_enabled
(
false
)
,
m_enable_diag_output
((
std
::
getenv
(
"NGRAPH_COMPILER_DIAG_ENABLE"
)
!=
nullptr
))
...
...
@@ -129,7 +128,7 @@ StaticCompiler::StaticCompiler()
initialize
();
}
void
StaticCompiler
::
initialize
()
void
codegen
::
StaticCompiler
::
initialize
()
{
m_extra_search_path_list
.
clear
();
#if NGCPU_DEBUGINFO
...
...
@@ -223,7 +222,7 @@ void StaticCompiler::initialize()
TO
.
FeaturesAsWritten
.
emplace_back
(
"+fma"
);
}
StaticCompiler
::~
StaticCompiler
()
codegen
::
StaticCompiler
::~
StaticCompiler
()
{
// This is causing a segfault after program terminates
// will address later
...
...
@@ -239,10 +238,10 @@ StaticCompiler::~StaticCompiler()
// }
}
bool
StaticCompiler
::
is_version_number
(
const
string
&
path
)
bool
codegen
::
StaticCompiler
::
is_version_number
(
const
string
&
path
)
{
bool
rc
=
true
;
vector
<
string
>
tokens
=
ngraph
::
split
(
path
,
'.'
);
vector
<
string
>
tokens
=
split
(
path
,
'.'
);
for
(
string
s
:
tokens
)
{
for
(
char
c
:
s
)
...
...
@@ -256,7 +255,7 @@ bool StaticCompiler::is_version_number(const string& path)
return
rc
;
}
void
StaticCompiler
::
add_header_search_path
(
const
string
&
path
)
void
codegen
::
StaticCompiler
::
add_header_search_path
(
const
string
&
path
)
{
if
(
!
contains
(
m_extra_search_path_list
,
path
))
{
...
...
@@ -266,9 +265,9 @@ void StaticCompiler::add_header_search_path(const string& path)
}
}
std
::
unique_ptr
<
ngraph
::
codegen
::
Module
>
StaticCompiler
::
compile
(
std
::
unique_ptr
<
clang
::
CodeGenAction
>&
m_compiler_action
,
const
string
&
source
)
std
::
unique_ptr
<
codegen
::
Module
>
codegen
::
StaticCompiler
::
compile
(
std
::
unique_ptr
<
clang
::
CodeGenAction
>&
m_compiler_action
,
const
string
&
source
)
{
PreprocessorOptions
&
preprocessor_options
=
m_compiler
->
getInvocation
().
getPreprocessorOpts
();
if
(
!
m_precompiled_header_valid
&&
m_precomiled_header_source
.
empty
()
==
false
)
...
...
@@ -304,25 +303,25 @@ std::unique_ptr<ngraph::codegen::Module>
preprocessor_options
.
RemappedFileBuffers
.
pop_back
();
unique_ptr
<
ngraph
::
codegen
::
Module
>
result
;
unique_ptr
<
codegen
::
Module
>
result
;
if
(
rc
)
{
result
=
move
(
unique_ptr
<
ngraph
::
codegen
::
Module
>
(
new
ngraph
::
codegen
::
Module
(
move
(
rc
))));
result
=
move
(
unique_ptr
<
codegen
::
Module
>
(
new
codegen
::
Module
(
move
(
rc
))));
}
else
{
result
=
move
(
unique_ptr
<
ngraph
::
codegen
::
Module
>
(
nullptr
));
result
=
move
(
unique_ptr
<
codegen
::
Module
>
(
nullptr
));
}
if
(
reinitialize
)
{
StaticCompiler
::
initialize
();
codegen
::
StaticCompiler
::
initialize
();
}
return
result
;
}
void
StaticCompiler
::
generate_pch
(
const
string
&
source
)
void
codegen
::
StaticCompiler
::
generate_pch
(
const
string
&
source
)
{
PreprocessorOptions
&
preprocessor_options
=
m_compiler
->
getInvocation
().
getPreprocessorOpts
();
m_pch_path
=
file_util
::
tmp_filename
();
...
...
@@ -346,7 +345,7 @@ void StaticCompiler::generate_pch(const string& source)
delete
compilerAction
;
}
void
StaticCompiler
::
configure_search_path
()
void
codegen
::
StaticCompiler
::
configure_search_path
()
{
#ifdef USE_BUILTIN
load_headers_from_resource
();
...
...
@@ -400,7 +399,7 @@ void StaticCompiler::configure_search_path()
#endif
}
void
StaticCompiler
::
load_headers_from_resource
()
void
codegen
::
StaticCompiler
::
load_headers_from_resource
()
{
HeaderSearchOptions
&
hso
=
m_compiler
->
getInvocation
().
getHeaderSearchOpts
();
PreprocessorOptions
&
preprocessor_options
=
m_compiler
->
getInvocation
().
getPreprocessorOpts
();
...
...
@@ -423,7 +422,7 @@ void StaticCompiler::load_headers_from_resource()
}
}
void
StaticCompiler
::
set_precompiled_header_source
(
const
std
::
string
&
source
)
void
codegen
::
StaticCompiler
::
set_precompiled_header_source
(
const
std
::
string
&
source
)
{
m_precomiled_header_source
=
source
;
}
src/ngraph/descriptor/layout/dense_tensor_view_layout.cpp
View file @
ad6b0f07
...
...
@@ -18,12 +18,12 @@
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
using
namespace
ngraph
::
descriptor
::
layout
;
using
namespace
ngraph
;
using
ngraph
::
Shape
;
using
ngraph
::
descriptor
::
TensorView
;
using
ngraph
::
TensorViewType
;
DenseTensorViewLayout
::
DenseTensorViewLayout
(
const
TensorView
&
tensor_view
)
descriptor
::
layout
::
DenseTensorViewLayout
::
DenseTensorViewLayout
(
const
TensorView
&
tensor_view
)
:
TensorViewLayout
(
tensor_view
)
{
auto
tensor_view_type
=
tensor_view
.
get_tensor_view_type
();
...
...
@@ -32,7 +32,8 @@ DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
m_strides
=
ngraph
::
row_major_strides
(
shape
);
}
size_t
DenseTensorViewLayout
::
get_index_offset
(
const
std
::
vector
<
size_t
>&
indices
)
size_t
descriptor
::
layout
::
DenseTensorViewLayout
::
get_index_offset
(
const
std
::
vector
<
size_t
>&
indices
)
{
if
(
indices
.
size
()
!=
m_strides
.
size
())
{
...
...
@@ -46,7 +47,7 @@ size_t DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indice
return
result
;
}
bool
DenseTensorViewLayout
::
operator
==
(
const
TensorViewLayout
&
other
)
const
bool
descriptor
::
layout
::
DenseTensorViewLayout
::
operator
==
(
const
TensorViewLayout
&
other
)
const
{
const
DenseTensorViewLayout
*
p_other
=
dynamic_cast
<
const
DenseTensorViewLayout
*>
(
&
other
);
if
(
nullptr
==
p_other
)
...
...
src/ngraph/descriptor/layout/tensor_view_layout.cpp
View file @
ad6b0f07
...
...
@@ -17,19 +17,19 @@
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
using
namespace
ngraph
::
descriptor
::
layout
;
using
namespace
ngraph
;
TensorViewLayout
::
TensorViewLayout
(
const
ngraph
::
descriptor
::
TensorView
&
tensor_view
)
descriptor
::
layout
::
TensorViewLayout
::
TensorViewLayout
(
const
descriptor
::
TensorView
&
tensor_view
)
:
m_tensor_view_type
(
tensor_view
.
get_tensor_view_type
())
{
}
const
ngraph
::
element
::
Type
&
TensorViewLayout
::
get_element_type
()
const
const
element
::
Type
&
descriptor
::
layout
::
TensorViewLayout
::
get_element_type
()
const
{
return
m_tensor_view_type
->
get_element_type
();
}
const
ngraph
::
Shape
&
TensorViewLayout
::
get_shape
()
const
const
Shape
&
descriptor
::
layout
::
TensorViewLayout
::
get_shape
()
const
{
return
m_tensor_view_type
->
get_shape
();
}
src/ngraph/descriptor/output.cpp
View file @
ad6b0f07
...
...
@@ -18,9 +18,8 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
Output
::
Output
(
Node
*
node
,
size_t
index
,
const
std
::
shared_ptr
<
TensorView
>&
tensor_view
)
descriptor
::
Output
::
Output
(
Node
*
node
,
size_t
index
,
const
shared_ptr
<
TensorView
>&
tensor_view
)
:
m_node
(
node
)
,
m_index
(
index
)
,
m_tensor_view
(
tensor_view
)
...
...
@@ -28,37 +27,37 @@ Output::Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tens
}
// Add an input to the vector of inputs that use this output.
void
Output
::
add_input
(
Input
*
input
)
void
descriptor
::
Output
::
add_input
(
Input
*
input
)
{
m_inputs
.
insert
(
input
);
}
void
Output
::
remove_input
(
Input
*
input
)
void
descriptor
::
Output
::
remove_input
(
Input
*
input
)
{
m_inputs
.
erase
(
input
);
}
s
td
::
shared_ptr
<
Node
>
Output
::
get_node
()
const
s
hared_ptr
<
Node
>
descriptor
::
Output
::
get_node
()
const
{
return
m_node
->
shared_from_this
();
}
Tensor
&
Output
::
get_tensor
()
const
descriptor
::
Tensor
&
descriptor
::
Output
::
get_tensor
()
const
{
return
m_tensor_view
->
get_tensor
();
}
s
td
::
shared_ptr
<
const
TensorViewType
>
Output
::
get_tensor_view_type
()
const
s
hared_ptr
<
const
TensorViewType
>
descriptor
::
Output
::
get_tensor_view_type
()
const
{
return
get_tensor_view
()
->
get_tensor_view_type
();
}
const
Shape
&
Output
::
get_shape
()
const
const
Shape
&
descriptor
::
Output
::
get_shape
()
const
{
return
get_tensor_view_type
()
->
get_shape
();
}
const
element
::
Type
&
Output
::
get_element_type
()
const
const
element
::
Type
&
descriptor
::
Output
::
get_element_type
()
const
{
return
get_tensor_view_type
()
->
get_element_type
();
}
src/ngraph/descriptor/tensor.cpp
View file @
ad6b0f07
...
...
@@ -17,14 +17,14 @@
#include "ngraph/node.hpp"
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
using
namespace
std
;
Tensor
::
Tensor
(
const
element
::
Type
&
element_type
,
PrimaryTensorView
*
primary_tensor_view
,
const
std
::
string
&
name
,
bool
is_output
,
bool
is_input
,
bool
is_constant
)
descriptor
::
Tensor
::
Tensor
(
const
element
::
Type
&
element_type
,
PrimaryTensorView
*
primary_tensor_view
,
const
string
&
name
,
bool
is_output
,
bool
is_input
,
bool
is_constant
)
:
m_element_type
(
element_type
)
,
m_primary_tensor_view
(
primary_tensor_view
)
,
m_is_output
{
is_output
}
...
...
@@ -42,32 +42,32 @@ Tensor::Tensor(const element::Type& element_type,
m_size
=
size
*
m_element_type
.
size
();
}
st
d
::
string
Tensor
::
make_tensor_name
(
const
Node
*
node
,
size_t
value_index
)
st
ring
descriptor
::
Tensor
::
make_tensor_name
(
const
Node
*
node
,
size_t
value_index
)
{
return
node
->
get_node_id
()
+
"_"
+
std
::
to_string
(
value_index
);
return
node
->
get_node_id
()
+
"_"
+
to_string
(
value_index
);
}
st
d
::
string
Tensor
::
get_next_view_name
()
st
ring
descriptor
::
Tensor
::
get_next_view_name
()
{
return
m_name
+
"_TV"
+
std
::
to_string
(
m_next_view_id
++
);
return
m_name
+
"_TV"
+
to_string
(
m_next_view_id
++
);
}
size_t
Tensor
::
size
()
const
size_t
descriptor
::
Tensor
::
size
()
const
{
return
m_size
;
}
void
Tensor
::
set_pool_offset
(
size_t
offset
)
void
descriptor
::
Tensor
::
set_pool_offset
(
size_t
offset
)
{
m_pool_offset
=
offset
;
}
size_t
Tensor
::
get_pool_offset
()
const
size_t
descriptor
::
Tensor
::
get_pool_offset
()
const
{
return
m_pool_offset
;
}
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
const
Tensor
&
tensor
)
ostream
&
operator
<<
(
ostream
&
out
,
const
descriptor
::
Tensor
&
tensor
)
{
out
<<
"Tensor("
<<
tensor
.
get_name
()
<<
", "
;
out
<<
(
tensor
.
is_persistent
()
?
"P"
:
""
);
...
...
src/ngraph/descriptor/tensor_view.cpp
View file @
ad6b0f07
...
...
@@ -15,9 +15,10 @@
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/types/type.hpp"
using
namespace
ngraph
::
descriptor
;
using
namespace
ngraph
;
using
namespace
std
;
s
td
::
shared_ptr
<
const
ngraph
::
ValueType
>
TensorView
::
get_value_type
()
const
s
hared_ptr
<
const
ngraph
::
ValueType
>
descriptor
::
TensorView
::
get_value_type
()
const
{
return
m_tensor_view_type
;
}
src/ngraph/ops/binary_elementwise_comparison.cpp
View file @
ad6b0f07
...
...
@@ -16,11 +16,10 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
;
op
::
BinaryElementwiseComparison
::
BinaryElementwiseComparison
(
const
st
d
::
st
ring
&
node_type
,
const
s
td
::
s
hared_ptr
<
Node
>&
arg0
,
const
s
td
::
s
hared_ptr
<
Node
>&
arg1
)
op
::
BinaryElementwiseComparison
::
BinaryElementwiseComparison
(
const
string
&
node_type
,
const
shared_ptr
<
Node
>&
arg0
,
const
shared_ptr
<
Node
>&
arg1
)
:
BinaryElementwise
(
node_type
,
element
::
boolean
,
arg0
,
arg1
)
{
if
(
arg0
->
get_element_type
()
!=
arg1
->
get_element_type
())
...
...
src/ngraph/ops/not.cpp
View file @
ad6b0f07
...
...
@@ -16,10 +16,10 @@
#include "ngraph/ops/op.hpp"
using
namespace
ngraph
;
using
namespace
ngraph
::
op
;
using
namespace
std
;
op
::
Not
::
Not
(
const
s
td
::
s
hared_ptr
<
Node
>&
arg
)
:
UnaryElementwise
(
"Not"
,
arg
->
get_element_type
(),
arg
)
op
::
Not
::
Not
(
const
shared_ptr
<
Node
>&
arg
)
:
op
::
UnaryElementwise
(
"Not"
,
arg
->
get_element_type
(),
arg
)
{
}
...
...
src/ngraph/pass/dump_sorted.cpp
View file @
ad6b0f07
...
...
@@ -21,14 +21,13 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
pass
::
DumpSorted
::
DumpSorted
(
const
string
&
output_file
)
:
m_output_file
{
output_file
}
{
}
bool
pass
::
DumpSorted
::
run_on_module
(
vector
<
shared_ptr
<
ngraph
::
Function
>>&
functions
)
bool
pass
::
DumpSorted
::
run_on_module
(
vector
<
shared_ptr
<
Function
>>&
functions
)
{
ofstream
out
{
m_output_file
};
if
(
out
)
...
...
@@ -42,7 +41,7 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct
{
out
<<
node
->
get_name
()
<<
"("
;
vector
<
string
>
inputs
;
for
(
const
Input
&
input
:
node
->
get_inputs
())
for
(
const
descriptor
::
Input
&
input
:
node
->
get_inputs
())
{
inputs
.
push_back
(
input
.
get_tensor
().
get_name
());
}
...
...
@@ -57,15 +56,15 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct
out
<<
join
(
outputs
);
out
<<
"
\n
"
;
for
(
const
Tensor
*
tensor
:
node
->
liveness_live_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
node
->
liveness_live_list
)
{
out
<<
" L "
<<
tensor
->
get_name
()
<<
"
\n
"
;
}
for
(
const
Tensor
*
tensor
:
node
->
liveness_new_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
node
->
liveness_new_list
)
{
out
<<
" N "
<<
tensor
->
get_name
()
<<
"
\n
"
;
}
for
(
const
Tensor
*
tensor
:
node
->
liveness_free_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
node
->
liveness_free_list
)
{
out
<<
" F "
<<
tensor
->
get_name
()
<<
"
\n
"
;
}
...
...
src/ngraph/pass/liveness.cpp
View file @
ad6b0f07
...
...
@@ -26,11 +26,10 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
bool
pass
::
Liveness
::
run_on_call_graph
(
const
list
<
shared_ptr
<
Node
>>&
ops
)
{
unordered_set
<
Tensor
*>
currently_live
;
unordered_set
<
descriptor
::
Tensor
*>
currently_live
;
for
(
auto
it
=
ops
.
rbegin
();
it
!=
ops
.
rend
();
it
++
)
{
...
...
@@ -38,32 +37,32 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
node
->
liveness_live_list
.
clear
();
node
->
liveness_new_list
.
clear
();
node
->
liveness_free_list
.
clear
();
unordered_set
<
Tensor
*>
input_tensor_decls
;
for
(
Input
&
input_decl
:
node
->
get_inputs
())
unordered_set
<
descriptor
::
Tensor
*>
input_tensor_decls
;
for
(
descriptor
::
Input
&
input_decl
:
node
->
get_inputs
())
{
Tensor
&
tensor
=
input_decl
.
get_tensor
();
descriptor
::
Tensor
&
tensor
=
input_decl
.
get_tensor
();
if
(
is_temporary
(
tensor
))
{
input_tensor_decls
.
insert
(
&
tensor
);
}
}
unordered_set
<
Tensor
*>
output_tensor_decls
;
unordered_set
<
descriptor
::
Tensor
*>
output_tensor_decls
;
for
(
size_t
i
=
0
;
i
<
node
->
get_output_size
();
++
i
)
{
Tensor
&
tensor
=
node
->
get_output_tensor
(
i
);
descriptor
::
Tensor
&
tensor
=
node
->
get_output_tensor
(
i
);
if
(
is_temporary
(
tensor
))
{
output_tensor_decls
.
insert
(
&
tensor
);
}
}
unordered_set
<
Tensor
*>
free_tensor_decls
;
unordered_set
<
Tensor
*>
new_tensor_decls
;
unordered_set
<
Tensor
*>
all_tensor_decls
=
input_tensor_decls
;
unordered_set
<
descriptor
::
Tensor
*>
free_tensor_decls
;
unordered_set
<
descriptor
::
Tensor
*>
new_tensor_decls
;
unordered_set
<
descriptor
::
Tensor
*>
all_tensor_decls
=
input_tensor_decls
;
all_tensor_decls
.
insert
(
output_tensor_decls
.
begin
(),
output_tensor_decls
.
end
());
for
(
Tensor
*
tensor_decl
:
all_tensor_decls
)
for
(
descriptor
::
Tensor
*
tensor_decl
:
all_tensor_decls
)
{
if
(
!
contains
(
currently_live
,
tensor_decl
))
{
...
...
@@ -75,7 +74,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
}
node
->
liveness_live_list
=
currently_live
;
for
(
Tensor
*
output_decl
:
output_tensor_decls
)
for
(
descriptor
::
Tensor
*
output_decl
:
output_tensor_decls
)
{
if
(
contains
(
currently_live
,
output_decl
))
{
...
...
@@ -89,18 +88,18 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
// Anything marked as output must remain live for the remainder of the graph
// Add outputs to live_list and remove from free_list
unordered_set
<
Tensor
*>
outputs
;
unordered_set
<
Tensor
*>
seen
;
unordered_set
<
descriptor
::
Tensor
*>
outputs
;
unordered_set
<
descriptor
::
Tensor
*>
seen
;
for
(
shared_ptr
<
Node
>
node
:
ops
)
{
for
(
Tensor
*
tensor
:
node
->
liveness_live_list
)
for
(
descriptor
::
Tensor
*
tensor
:
node
->
liveness_live_list
)
{
if
(
tensor
->
is_output
())
{
outputs
.
insert
(
tensor
);
}
}
for
(
Tensor
*
tensor
:
outputs
)
for
(
descriptor
::
Tensor
*
tensor
:
outputs
)
{
node
->
liveness_live_list
.
insert
(
tensor
);
node
->
liveness_free_list
.
erase
(
tensor
);
...
...
@@ -123,7 +122,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
return
false
;
}
bool
pass
::
Liveness
::
is_temporary
(
const
Tensor
&
tensor
)
bool
pass
::
Liveness
::
is_temporary
(
const
descriptor
::
Tensor
&
tensor
)
{
return
tensor
.
is_persistent
()
==
false
&&
tensor
.
is_input
()
==
false
&&
tensor
.
is_output
()
==
false
&&
tensor
.
is_constant
()
==
false
;
...
...
@@ -132,13 +131,13 @@ bool pass::Liveness::is_temporary(const Tensor& tensor)
void
pass
::
Liveness
::
validate_liveness
(
const
list
<
Node
*>&
ops
)
{
unordered_set
<
Tensor
*>
dead_tensors
;
unordered_set
<
descriptor
::
Tensor
*>
dead_tensors
;
for
(
const
Node
*
node
:
ops
)
{
auto
active
=
node
->
liveness_live_list
;
active
.
insert
(
node
->
liveness_new_list
.
begin
(),
node
->
liveness_new_list
.
end
());
active
.
insert
(
node
->
liveness_free_list
.
begin
(),
node
->
liveness_free_list
.
end
());
for
(
const
Tensor
*
tensor
:
active
)
for
(
const
descriptor
::
Tensor
*
tensor
:
active
)
{
if
(
contains
(
dead_tensors
,
tensor
))
{
...
...
src/ngraph/pass/memory_layout.cpp
View file @
ad6b0f07
...
...
@@ -24,24 +24,23 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
pass
::
MemoryLayout
::
MemoryLayout
(
size_t
alignment
)
:
m_alignment
(
alignment
)
{
}
bool
pass
::
MemoryLayout
::
run_on_function
(
s
td
::
s
hared_ptr
<
ngraph
::
Function
>
function
)
bool
pass
::
MemoryLayout
::
run_on_function
(
shared_ptr
<
ngraph
::
Function
>
function
)
{
MemoryManager
mm
(
m_alignment
);
for
(
shared_ptr
<
Node
>
node
:
function
->
get_ordered_ops
())
{
for
(
Tensor
*
tensor
:
node
->
liveness_new_list
)
for
(
descriptor
::
Tensor
*
tensor
:
node
->
liveness_new_list
)
{
size_t
offset
=
mm
.
allocate
(
tensor
->
size
());
tensor
->
set_pool_offset
(
offset
);
}
for
(
const
Tensor
*
tensor
:
node
->
liveness_free_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
node
->
liveness_free_list
)
{
mm
.
free
(
tensor
->
get_pool_offset
());
}
...
...
@@ -114,7 +113,7 @@ size_t pass::MemoryManager::best_fit(size_t size)
m_node_list
.
insert
(
best_fit
,
node
{
size
,
block_state
::
ALLOCATED
});
best_fit
->
m_size
-=
size
;
}
m_max_allocated
=
std
::
max
(
m_max_allocated
,
best_offset
+
size
);
m_max_allocated
=
max
(
m_max_allocated
,
best_offset
+
size
);
return
best_offset
;
}
...
...
@@ -148,7 +147,7 @@ size_t pass::MemoryManager::first_fit(size_t size)
{
throw
bad_alloc
();
}
m_max_allocated
=
std
::
max
(
m_max_allocated
,
offset
+
size
);
m_max_allocated
=
max
(
m_max_allocated
,
offset
+
size
);
return
offset
;
}
...
...
@@ -161,7 +160,7 @@ void pass::MemoryManager::free(size_t offset)
{
if
(
offset
==
search_offset
)
{
list
<
node
>::
iterator
it_next
=
std
::
next
(
it
);
list
<
node
>::
iterator
it_next
=
next
(
it
);
if
(
it
==
m_node_list
.
begin
())
{
// free the first node in the list
...
...
@@ -170,7 +169,7 @@ void pass::MemoryManager::free(size_t offset)
else
{
// node has predecessor
list
<
node
>::
iterator
it_prev
=
std
::
prev
(
it
);
list
<
node
>::
iterator
it_prev
=
prev
(
it
);
if
(
it_prev
->
m_state
==
block_state
::
FREE
)
{
it
->
m_size
+=
it_prev
->
m_size
;
...
...
@@ -195,7 +194,7 @@ void pass::MemoryManager::free(size_t offset)
}
}
void
pass
::
MemoryManager
::
dump
(
std
::
ostream
&
out
)
void
pass
::
MemoryManager
::
dump
(
ostream
&
out
)
{
for
(
const
node
&
n
:
m_node_list
)
{
...
...
src/ngraph/pass/memory_visualize.cpp
View file @
ad6b0f07
...
...
@@ -26,7 +26,6 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
pass
::
MemoryVisualize
::
MemoryVisualize
(
const
string
&
filename
)
:
m_filename
{
filename
}
...
...
@@ -104,7 +103,7 @@ shared_ptr<Node> pass::MemoryVisualize::find_largest_op(const list<shared_ptr<No
for
(
shared_ptr
<
Node
>
exop
:
nodes
)
{
size_t
size
=
0
;
for
(
const
Tensor
*
tensor
:
exop
->
liveness_live_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
exop
->
liveness_live_list
)
{
size
+=
tensor
->
size
();
}
...
...
@@ -123,15 +122,15 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
if
(
largest_op
)
{
unordered_set
<
Tensor
*>
largest_live
;
for
(
Tensor
*
tensor
:
largest_op
->
liveness_live_list
)
unordered_set
<
descriptor
::
Tensor
*>
largest_live
;
for
(
descriptor
::
Tensor
*
tensor
:
largest_op
->
liveness_live_list
)
{
largest_live
.
insert
(
tensor
);
}
unordered_map
<
const
Tensor
*
,
size_t
>
age_list
;
vector
<
const
Tensor
*>
tensor_set
;
unordered_map
<
const
Tensor
*
,
shared_ptr
<
Node
>>
generator_op
;
unordered_map
<
const
descriptor
::
Tensor
*
,
size_t
>
age_list
;
vector
<
const
descriptor
::
Tensor
*>
tensor_set
;
unordered_map
<
const
descriptor
::
Tensor
*
,
shared_ptr
<
Node
>>
generator_op
;
file
<<
"<table>
\n
"
;
file
<<
" <tr>"
;
file
<<
"<th align=
\"
left
\"
>tensor</th>"
;
...
...
@@ -142,12 +141,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
size_t
i
=
0
;
for
(
shared_ptr
<
Node
>
exop
:
nodes
)
{
for
(
const
Tensor
*
tensor
:
exop
->
liveness_new_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
exop
->
liveness_new_list
)
{
age_list
[
tensor
]
=
i
;
generator_op
[
tensor
]
=
exop
;
}
for
(
const
Tensor
*
tensor
:
exop
->
liveness_free_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
exop
->
liveness_free_list
)
{
size_t
start
=
age_list
[
tensor
];
age_list
[
tensor
]
=
(
i
-
start
);
...
...
@@ -155,10 +154,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
}
i
++
;
}
sort
(
tensor_set
.
begin
(),
tensor_set
.
end
(),
[](
const
Tensor
*
t1
,
const
Tensor
*
t2
)
{
return
t1
->
size
()
<
t2
->
size
();
});
for
(
const
Tensor
*
tensor
:
tensor_set
)
sort
(
tensor_set
.
begin
(),
tensor_set
.
end
(),
[](
const
descriptor
::
Tensor
*
t1
,
const
descriptor
::
Tensor
*
t2
)
{
return
t1
->
size
()
<
t2
->
size
();
});
for
(
const
descriptor
::
Tensor
*
tensor
:
tensor_set
)
{
int
generator_weight
=
compute_op_weight
(
generator_op
[
tensor
]);
if
(
contains
(
largest_live
,
tensor
))
...
...
@@ -249,14 +250,14 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop)
// tensor = output_decl.tensor
// if tensor.is_persistent is False:
// mass -= tensor->size()
for
(
const
Tensor
*
tensor
:
exop
->
liveness_new_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
exop
->
liveness_new_list
)
{
if
(
tensor
->
is_persistent
()
==
false
)
{
mass
+=
tensor
->
size
();
}
}
for
(
const
Tensor
*
tensor
:
exop
->
liveness_free_list
)
for
(
const
descriptor
::
Tensor
*
tensor
:
exop
->
liveness_free_list
)
{
if
(
tensor
->
is_persistent
()
==
false
)
{
...
...
src/ngraph/runtime/cpu/cpu_kernel_emitters.cpp
View file @
ad6b0f07
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/cpu/cpu_kernel_utils.cpp
View file @
ad6b0f07
...
...
@@ -19,7 +19,7 @@
#include "ngraph/util.hpp"
using
namespace
ngraph
;
using
namespace
ngraph
::
runtime
::
cpu
::
kernel
;
using
namespace
std
;
//
// Given a coordinate transform and a vector of index expressions relative to
...
...
@@ -37,18 +37,17 @@ using namespace ngraph::runtime::cpu::kernel;
// {"((k) * 2 + 5)", "((i) * 2 + 3)", "((j) * 2 + 4)"}
//
//
std
::
vector
<
std
::
string
>
ngraph
::
runtime
::
cpu
::
kernel
::
emit_multi_indices
(
CoordinateTransform
&
trans
,
const
std
::
vector
<
std
::
string
>&
index_vars
)
vector
<
string
>
ngraph
::
runtime
::
cpu
::
kernel
::
emit_multi_indices
(
CoordinateTransform
&
trans
,
const
vector
<
string
>&
index_vars
)
{
std
::
vector
<
std
::
string
>
result
;
vector
<
string
>
result
;
for
(
size_t
i
=
0
;
i
<
index_vars
.
size
();
i
++
)
{
st
d
::
st
ring
index_var
=
index_vars
[
trans
.
get_source_axis_order
()[
i
]];
string
index_var
=
index_vars
[
trans
.
get_source_axis_order
()[
i
]];
size_t
source_stride
=
trans
.
get_source_strides
()[
i
];
size_t
source_start
=
trans
.
get_source_start_corner
()[
i
];
st
d
::
st
ringstream
ss
;
stringstream
ss
;
if
(
source_stride
==
1
&&
source_start
==
0
)
{
...
...
@@ -90,11 +89,10 @@ std::vector<std::string>
// "((4 * ((k) * 2 + 5)) + (2 * ((i) * 2 + 3)) + ((j) * 2 + 4))"
//
//
std
::
string
ngraph
::
runtime
::
cpu
::
kernel
::
emit_linear_index
(
CoordinateTransform
&
trans
,
const
std
::
vector
<
std
::
string
>&
index_vars
)
string
ngraph
::
runtime
::
cpu
::
kernel
::
emit_linear_index
(
CoordinateTransform
&
trans
,
const
vector
<
string
>&
index_vars
)
{
std
::
vector
<
std
::
string
>
multi_indices
=
emit_multi_indices
(
trans
,
index_vars
);
vector
<
string
>
multi_indices
=
emit_multi_indices
(
trans
,
index_vars
);
size_t
stride
=
1
;
...
...
@@ -103,7 +101,7 @@ std::string
// No need to do this (multiply by stride) if it's 1, though it wouldn't hurt anything.
if
(
stride
!=
1
)
{
st
d
::
st
ringstream
ss
;
stringstream
ss
;
ss
<<
"("
<<
stride
<<
" * "
<<
multi_indices
[
i
]
<<
")"
;
multi_indices
[
i
]
=
ss
.
str
();
}
...
...
@@ -111,7 +109,7 @@ std::string
stride
*=
trans
.
get_source_shape
()[
i
];
}
st
d
::
st
ringstream
ss
;
stringstream
ss
;
ss
<<
"("
<<
join
(
multi_indices
,
" + "
)
<<
")"
;
return
ss
.
str
();
...
...
@@ -123,12 +121,12 @@ std::string
//
// Optionally emits an OpenMP parallel pragma, if "omp" is true.
//
st
d
::
string
ngraph
::
runtime
::
cpu
::
kernel
::
start_index_loop
(
const
std
::
string
&
index_var
,
size_t
start
,
size_t
end
,
bool
omp
)
st
ring
ngraph
::
runtime
::
cpu
::
kernel
::
start_index_loop
(
const
string
&
index_var
,
size_t
start
,
size_t
end
,
bool
omp
)
{
st
d
::
st
ringstream
ss
;
stringstream
ss
;
if
(
omp
)
{
...
...
@@ -145,18 +143,18 @@ std::string ngraph::runtime::cpu::kernel::start_index_loop(const std::string& in
//
// Ends an indexing loop on the index variable [index_var].
//
st
d
::
string
ngraph
::
runtime
::
cpu
::
kernel
::
end_index_loop
(
const
std
::
string
&
index_var
)
st
ring
ngraph
::
runtime
::
cpu
::
kernel
::
end_index_loop
(
const
string
&
index_var
)
{
st
d
::
st
ringstream
ss
;
stringstream
ss
;
ss
<<
"} // end for("
<<
index_var
<<
")
\n
"
;
return
ss
.
str
();
}
st
d
::
st
ring
ngraph
::
runtime
::
cpu
::
kernel
::
emit_nd_sizes
(
CoordinateTransform
&
trans
)
string
ngraph
::
runtime
::
cpu
::
kernel
::
emit_nd_sizes
(
CoordinateTransform
&
trans
)
{
st
d
::
st
ringstream
ss
;
stringstream
ss
;
for
(
size_t
s
:
trans
.
get_source_shape
())
{
...
...
@@ -166,12 +164,12 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_sizes(CoordinateTransform& tra
return
ss
.
str
();
}
st
d
::
st
ring
ngraph
::
runtime
::
cpu
::
kernel
::
emit_nd_index
(
CoordinateTransform
&
trans
,
const
std
::
vector
<
std
::
string
>&
index_vars
)
string
ngraph
::
runtime
::
cpu
::
kernel
::
emit_nd_index
(
CoordinateTransform
&
trans
,
const
vector
<
string
>&
index_vars
)
{
st
d
::
st
ringstream
ss
;
stringstream
ss
;
for
(
st
d
::
st
ring
index
:
emit_multi_indices
(
trans
,
index_vars
))
for
(
string
index
:
emit_multi_indices
(
trans
,
index_vars
))
{
ss
<<
"["
<<
index
<<
"]"
;
}
...
...
@@ -184,21 +182,21 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& tra
// dest_buffer mediated by dest_trans.
//
void
ngraph
::
runtime
::
cpu
::
kernel
::
emit_pointwise_copy
(
codegen
::
CodeWriter
&
writer
,
const
st
d
::
st
ring
&
element_type
,
const
st
d
::
st
ring
&
source_buffer
,
const
st
d
::
st
ring
&
dest_buffer
,
const
string
&
element_type
,
const
string
&
source_buffer
,
const
string
&
dest_buffer
,
CoordinateTransform
&
source_trans
,
CoordinateTransform
&
dest_trans
)
{
std
::
vector
<
std
::
string
>
index_vars
;
vector
<
string
>
index_vars
;
Shape
source_start_corner
=
source_trans
.
get_source_start_corner
();
Shape
source_end_corner
=
source_trans
.
get_source_end_corner
();
size_t
n_axes
=
source_start_corner
.
size
();
st
d
::
st
ring
source_nd_name
=
writer
.
generate_temporary_name
(
"source_nd"
);
st
d
::
st
ring
dest_nd_name
=
writer
.
generate_temporary_name
(
"dest_nd"
);
string
source_nd_name
=
writer
.
generate_temporary_name
(
"source_nd"
);
string
dest_nd_name
=
writer
.
generate_temporary_name
(
"dest_nd"
);
writer
<<
element_type
<<
"(&"
<<
source_nd_name
<<
")"
<<
emit_nd_sizes
(
source_trans
)
<<
" = *reinterpret_cast<"
<<
element_type
<<
"(*)"
<<
emit_nd_sizes
(
source_trans
)
...
...
@@ -209,7 +207,7 @@ void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writ
for
(
size_t
i
=
0
;
i
<
n_axes
;
i
++
)
{
st
d
::
st
ring
index_var
=
writer
.
generate_temporary_name
(
"i"
);
string
index_var
=
writer
.
generate_temporary_name
(
"i"
);
writer
<<
start_index_loop
(
index_var
,
source_start_corner
[
i
],
source_end_corner
[
i
],
i
==
0
);
writer
.
indent
++
;
...
...
src/ngraph/runtime/manager.cpp
View file @
ad6b0f07
...
...
@@ -22,24 +22,25 @@
#include "ngraph/runtime/manager.hpp"
#include "ngraph/util.hpp"
using
namespace
ngraph
::
runtime
;
using
namespace
ngraph
;
using
namespace
std
;
static
std
::
mutex
load_plugins_mutex
;
static
std
::
mutex
close_plugins_mutex
;
static
mutex
load_plugins_mutex
;
static
mutex
close_plugins_mutex
;
bool
Manager
::
m_is_factory_map_initialized
=
false
;
std
::
vector
<
void
*>
Manager
::
m_plugin_handles
=
{};
bool
runtime
::
Manager
::
m_is_factory_map_initialized
=
false
;
vector
<
void
*>
runtime
::
Manager
::
m_plugin_handles
=
{};
void
Manager
::
load_plugins
(
const
std
::
string
&
runtime_plugin_libs
)
void
runtime
::
Manager
::
load_plugins
(
const
string
&
runtime_plugin_libs
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
load_plugins_mutex
);
lock_guard
<
mutex
>
lock
(
load_plugins_mutex
);
if
(
Manager
::
m_is_factory_map_initialized
)
if
(
m_is_factory_map_initialized
)
{
return
;
}
std
::
vector
<
std
::
string
>
plugin_paths
=
ngraph
::
split
(
runtime_plugin_libs
,
':'
,
false
);
vector
<
string
>
plugin_paths
=
ngraph
::
split
(
runtime_plugin_libs
,
':'
,
false
);
for
(
auto
plugin_path
:
plugin_paths
)
{
if
(
plugin_path
.
size
()
>
0
)
...
...
@@ -52,7 +53,7 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs)
if
(
register_plugin
!=
NULL
)
{
register_plugin
();
Manager
::
m_plugin_handles
.
push_back
(
plugin_handle
);
m_plugin_handles
.
push_back
(
plugin_handle
);
}
else
{
...
...
@@ -66,31 +67,31 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs)
}
}
Manager
::
m_is_factory_map_initialized
=
true
;
m_is_factory_map_initialized
=
true
;
}
// TODO: Should call this function after plugin is not needed anymore.
void
Manager
::
close_plugins
()
void
runtime
::
Manager
::
close_plugins
()
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
close_plugins_mutex
);
lock_guard
<
mutex
>
lock
(
close_plugins_mutex
);
for
(
auto
plugin_handle
:
Manager
::
m_plugin_handles
)
for
(
auto
plugin_handle
:
m_plugin_handles
)
{
dlclose
(
plugin_handle
);
}
Manager
::
m_plugin_handles
.
clear
();
m_plugin_handles
.
clear
();
}
Manager
::
FactoryMap
&
Manager
::
get_factory_map
()
runtime
::
Manager
::
FactoryMap
&
runtime
::
Manager
::
get_factory_map
()
{
// Stores Manager Factories
static
FactoryMap
factory_map
;
return
factory_map
;
}
s
td
::
shared_ptr
<
Manager
>
Manager
::
get
(
const
std
::
string
&
name
)
s
hared_ptr
<
runtime
::
Manager
>
runtime
::
Manager
::
get
(
const
string
&
name
)
{
Manager
::
load_plugins
(
RUNTIME_PLUGIN_LIBS
);
load_plugins
(
RUNTIME_PLUGIN_LIBS
);
auto
iter
=
get_factory_map
().
find
(
name
);
...
...
@@ -103,7 +104,7 @@ std::shared_ptr<Manager> Manager::get(const std::string& name)
return
f
(
name
);
}
Manager
::
Factory
Manager
::
register_factory
(
const
std
::
string
&
name
,
Factory
factory
)
runtime
::
Manager
::
Factory
runtime
::
Manager
::
register_factory
(
const
string
&
name
,
Factory
factory
)
{
get_factory_map
()[
name
]
=
factory
;
return
factory
;
...
...
src/ngraph/runtime/tensor_view.cpp
View file @
ad6b0f07
...
...
@@ -18,41 +18,41 @@
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
using
namespace
ngraph
::
runtime
;
using
namespace
ngraph
;
using
namespace
std
;
s
td
::
shared_ptr
<
const
ngraph
::
descriptor
::
TensorView
>
TensorView
::
get_tensor_view_descriptor
()
const
s
hared_ptr
<
const
descriptor
::
TensorView
>
runtime
::
TensorView
::
get_tensor_view_descriptor
()
const
{
return
m_descriptor
;
}
s
td
::
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
TensorView
::
get_descriptor
()
const
s
hared_ptr
<
descriptor
::
TensorView
>
runtime
::
TensorView
::
get_descriptor
()
const
{
return
m_descriptor
;
}
void
TensorView
::
collect_tensor_views
(
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
views
,
const
std
::
shared_ptr
<
TensorView
>&
value
)
const
void
runtime
::
TensorView
::
collect_tensor_views
(
vector
<
shared_ptr
<
TensorView
>>&
views
,
const
shared_ptr
<
TensorView
>&
value
)
const
{
views
.
push_back
(
value
);
}
const
ngraph
::
Shape
&
TensorView
::
get_shape
()
const
const
Shape
&
runtime
::
TensorView
::
get_shape
()
const
{
return
m_descriptor
->
get_tensor_view_type
()
->
get_shape
();
}
const
ngraph
::
Strides
&
TensorView
::
get_strides
()
const
const
Strides
&
runtime
::
TensorView
::
get_strides
()
const
{
return
m_descriptor
->
get_tensor_view_layout
()
->
get_strides
();
}
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
TensorViewLayout
>
TensorView
::
get_tensor_view_layout
()
const
shared_ptr
<
descriptor
::
layout
::
TensorViewLayout
>
runtime
::
TensorView
::
get_tensor_view_layout
()
const
{
return
m_descriptor
->
get_tensor_view_layout
();
}
size_t
TensorView
::
get_element_count
()
const
size_t
runtime
::
TensorView
::
get_element_count
()
const
{
size_t
rc
=
1
;
for
(
size_t
s
:
get_shape
())
...
...
@@ -62,7 +62,7 @@ size_t TensorView::get_element_count() const
return
rc
;
}
const
ngraph
::
descriptor
::
Tensor
&
TensorView
::
get_tensor
()
const
const
descriptor
::
Tensor
&
runtime
::
TensorView
::
get_tensor
()
const
{
return
get_tensor_view_descriptor
()
->
get_tensor
();
}
test/builder.cpp
View file @
ad6b0f07
...
...
@@ -18,11 +18,10 @@
#include "util/test_tools.hpp"
using
namespace
ngraph
;
using
namespace
ngraph
::
test
;
using
namespace
std
;
s
td
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>
make_reduce_result
(
std
::
function
<
std
::
shared_ptr
<
Node
>
(
const
std
::
shared_ptr
<
Node
>&
,
const
AxisSet
&
)
>
func
)
s
hared_ptr
<
runtime
::
TensorView
>
make_reduce_result
(
function
<
shared_ptr
<
Node
>
(
const
shared_ptr
<
Node
>&
,
const
AxisSet
&
)
>
func
)
{
auto
shape_a
=
Shape
{
3
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_a
);
...
...
@@ -41,8 +40,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result(
return
result
;
}
s
td
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>
make_reduce_result_true
(
std
::
function
<
std
::
shared_ptr
<
Node
>
(
const
std
::
shared_ptr
<
Node
>&
,
const
AxisSet
&
,
bool
)
>
func
)
s
hared_ptr
<
runtime
::
TensorView
>
make_reduce_result_true
(
function
<
shared_ptr
<
Node
>
(
const
shared_ptr
<
Node
>&
,
const
AxisSet
&
,
bool
)
>
func
)
{
auto
shape_a
=
Shape
{
3
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_a
);
...
...
@@ -61,8 +60,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_true(
return
result
;
}
s
td
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>
make_reduce_result_false
(
std
::
function
<
std
::
shared_ptr
<
Node
>
(
const
std
::
shared_ptr
<
Node
>&
,
const
AxisSet
&
,
bool
)
>
func
)
s
hared_ptr
<
runtime
::
TensorView
>
make_reduce_result_false
(
function
<
shared_ptr
<
Node
>
(
const
shared_ptr
<
Node
>&
,
const
AxisSet
&
,
bool
)
>
func
)
{
auto
shape_a
=
Shape
{
3
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape_a
);
...
...
@@ -84,58 +83,58 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_false(
TEST
(
builder
,
l2_norm
)
{
auto
result
=
make_reduce_result
(
builder
::
l2_norm
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
5.9160797831
f
,
7.48331477355
f
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
5.9160797831
f
,
7.48331477355
f
}),
read_vector
<
float
>
(
result
)));
}
TEST
(
builder
,
mean
)
{
auto
result
=
make_reduce_result
(
builder
::
mean
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
3
,
4
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
3
,
4
}),
read_vector
<
float
>
(
result
)));
}
TEST
(
builder
,
std_dev
)
{
auto
result
=
make_reduce_result_false
(
builder
::
std_dev
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
1.63299316186
f
,
1.63299316186
f
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
1.63299316186
f
,
1.63299316186
f
}),
read_vector
<
float
>
(
result
)));
result
=
make_reduce_result_true
(
builder
::
std_dev
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
2
,
2
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
2
,
2
}),
read_vector
<
float
>
(
result
)));
}
TEST
(
builder
,
variance
)
{
auto
result
=
make_reduce_result_false
(
builder
::
variance
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
2.66666666666
f
,
2.66666666666
f
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
2.66666666666
f
,
2.66666666666
f
}),
read_vector
<
float
>
(
result
)));
result
=
make_reduce_result_true
(
builder
::
variance
);
ASSERT_TRUE
(
all_close
((
vector
<
float
>
{
4
,
4
}),
read_vector
<
float
>
(
result
)));
ASSERT_TRUE
(
test
::
all_close
((
vector
<
float
>
{
4
,
4
}),
read_vector
<
float
>
(
result
)));
}
TEST
(
builder
,
numpy_transpose
)
{
// 2D Transpose
Shape
shape
{
2
,
4
};
auto
param
=
std
::
make_shared
<
op
::
Parameter
>
(
ngraph
::
element
::
f32
,
shape
);
auto
transposed
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
));
auto
param
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
transposed
=
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
));
EXPECT_EQ
(
Shape
({
4
,
2
}),
transposed
->
get_output_shape
());
// Multidimensional Transpose
shape
=
Shape
{
2
,
4
,
8
};
param
=
std
::
make_shared
<
op
::
Parameter
>
(
ngraph
::
element
::
f32
,
shape
);
transposed
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
));
param
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
transposed
=
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
));
EXPECT_EQ
(
Shape
({
8
,
4
,
2
}),
transposed
->
get_output_shape
());
// Dimshuffle
shape
=
Shape
{
2
,
4
,
8
};
param
=
std
::
make_shared
<
op
::
Parameter
>
(
ngraph
::
element
::
f32
,
shape
);
transposed
=
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
,
0
,
1
}));
param
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
transposed
=
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
,
0
,
1
}));
EXPECT_EQ
(
Shape
({
8
,
2
,
4
}),
transposed
->
get_output_shape
());
// Bad Orders
EXPECT_ANY_THROW
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
})));
EXPECT_ANY_THROW
(
std
::
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
,
2
,
1
})));
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
})));
EXPECT_ANY_THROW
(
dynamic_pointer_cast
<
op
::
Reshape
>
(
builder
::
numpy_transpose
(
param
,
AxisVector
{
2
,
2
,
1
})));
}
test/tensor.cpp
View file @
ad6b0f07
...
...
@@ -27,7 +27,6 @@
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
TEST
(
tensor
,
size
)
{
...
...
@@ -44,7 +43,7 @@ TEST(tensor, size)
auto
&
outputs
=
arg0
->
get_outputs
();
ASSERT_EQ
(
1
,
outputs
.
size
());
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
descriptor
::
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
EXPECT_EQ
(
2
*
3
*
4
,
output
.
size
());
}
...
...
@@ -57,7 +56,7 @@ TEST(tensor, size)
auto
&
outputs
=
arg0
->
get_outputs
();
ASSERT_EQ
(
1
,
outputs
.
size
());
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
descriptor
::
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
EXPECT_EQ
(
1
*
4
,
output
.
size
());
}
...
...
@@ -70,33 +69,33 @@ TEST(tensor, size)
auto
&
outputs
=
arg0
->
get_outputs
();
ASSERT_EQ
(
1
,
outputs
.
size
());
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
descriptor
::
Tensor
&
output
=
outputs
[
0
].
get_tensor
();
EXPECT_EQ
(
1
*
4
,
output
.
size
());
}
}
template
<
typename
T
>
void
test_read_write
(
const
std
::
vector
<
T
>&
x
)
void
test_read_write
(
const
vector
<
T
>&
x
)
{
auto
manager
=
ngraph
::
runtime
::
Manager
::
get
(
"INTERPRETER"
);
auto
manager
=
runtime
::
Manager
::
get
(
"INTERPRETER"
);
auto
backend
=
manager
->
allocate_backend
();
auto
a
=
backend
->
make_primary_tensor_view
(
element
::
from
<
T
>
(),
Shape
{
2
,
x
.
size
()});
std
::
vector
<
T
>
result
(
2
*
x
.
size
());
vector
<
T
>
result
(
2
*
x
.
size
());
a
->
write
(
&
x
[
0
],
0
,
x
.
size
()
*
sizeof
(
T
));
std
::
copy
(
x
.
begin
(),
x
.
end
(),
result
.
begin
());
copy
(
x
.
begin
(),
x
.
end
(),
result
.
begin
());
a
->
write
(
&
x
[
0
],
x
.
size
()
*
sizeof
(
T
),
x
.
size
()
*
sizeof
(
T
));
std
::
copy
(
x
.
begin
(),
x
.
end
(),
result
.
begin
()
+
x
.
size
());
copy
(
x
.
begin
(),
x
.
end
(),
result
.
begin
()
+
x
.
size
());
std
::
vector
<
T
>
af_vector
(
2
*
x
.
size
());
vector
<
T
>
af_vector
(
2
*
x
.
size
());
a
->
read
(
af_vector
.
data
(),
0
,
af_vector
.
size
()
*
sizeof
(
T
));
ASSERT_EQ
(
af_vector
,
result
);
std
::
vector
<
T
>
result1
(
x
.
size
());
std
::
vector
<
T
>
result2
(
x
.
size
());
std
::
copy
(
result
.
begin
()
+
1
,
result
.
begin
()
+
1
+
x
.
size
(),
result1
.
begin
());
vector
<
T
>
result1
(
x
.
size
());
vector
<
T
>
result2
(
x
.
size
());
copy
(
result
.
begin
()
+
1
,
result
.
begin
()
+
1
+
x
.
size
(),
result1
.
begin
());
a
->
read
(
&
result2
[
0
],
sizeof
(
T
),
sizeof
(
T
)
*
x
.
size
());
ASSERT_EQ
(
result1
,
result2
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment