Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
75fd8b8d
Commit
75fd8b8d
authored
Feb 02, 2019
by
Robert Kimball
Browse files
Options
Browse Files
Download
Plain Diff
merge master
parents
afd8e51a
c9a9c154
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
43 additions
and
47 deletions
+43
-47
int_backend.cpp
src/ngraph/runtime/interpreter/int_backend.cpp
+38
-41
int_backend.hpp
src/ngraph/runtime/interpreter/int_backend.hpp
+5
-6
No files found.
src/ngraph/runtime/interpreter/int_backend.cpp
View file @
75fd8b8d
...
...
@@ -81,12 +81,8 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& f
pass_manager
.
register_pass
<
pass
::
LikeReplacement
>
();
pass_manager
.
register_pass
<
pass
::
AssignLayout
<
DenseTensorLayout
>>
();
pass_manager
.
register_pass
<
pass
::
Liveness
>
();
pass_manager
.
register_pass
<
pass
::
MemoryLayout
>
(
get_alignment
());
pass_manager
.
run_passes
(
function
);
size_t
memory_pool_size
=
function
->
get_temporary_pool_size
();
instance
.
m_temporary_memory
.
reset
(
new
AlignedBuffer
(
memory_pool_size
,
get_alignment
()));
for
(
const
shared_ptr
<
Node
>&
node
:
function
->
get_ordered_ops
())
{
instance
.
m_wrapped_nodes
.
emplace_back
(
node
);
...
...
@@ -101,29 +97,27 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
FunctionInstance
&
instance
=
m_function_instance
;
// convert inputs to HostTensor
vector
<
void
*>
func_inputs
;
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
htv_inputs
;
vector
<
shared_ptr
<
HostTensor
>>
func_inputs
;
for
(
auto
tensor
:
inputs
)
{
auto
host_tensor
=
static_pointer_cast
<
runtime
::
HostTensor
>
(
tensor
);
func_inputs
.
push_back
(
static_cast
<
void
*>
(
host_tensor
->
get_data_ptr
()));
htv_inputs
.
push_back
(
host_tensor
);
func_inputs
.
push_back
(
host_tensor
);
}
if
(
instance
.
m_nan_check_enabled
)
{
perform_nan_check
(
htv
_inputs
);
perform_nan_check
(
func
_inputs
);
}
// convert outputs to HostTensor
vector
<
void
*
>
func_outputs
;
vector
<
shared_ptr
<
HostTensor
>
>
func_outputs
;
for
(
auto
tensor
:
outputs
)
{
auto
host_tensor
=
static_pointer_cast
<
runtime
::
HostTensor
>
(
tensor
);
func_outputs
.
push_back
(
static_cast
<
void
*>
(
host_tensor
->
get_data_ptr
())
);
func_outputs
.
push_back
(
host_tensor
);
}
// map function params -> HostTensor
unordered_map
<
descriptor
::
Tensor
*
,
void
*
>
tensor_map
;
unordered_map
<
descriptor
::
Tensor
*
,
shared_ptr
<
HostTensor
>
>
tensor_map
;
size_t
input_count
=
0
;
for
(
auto
param
:
get_parameters
())
{
...
...
@@ -155,15 +149,9 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
{
continue
;
}
if
(
type_id
==
OP_TYPEID
::
Constant
)
{
const
op
::
Constant
*
c
=
static_cast
<
const
op
::
Constant
*>
(
op
);
descriptor
::
Tensor
*
tensor
=
op
->
get_output_tensor_ptr
(
0
).
get
();
tensor_map
.
insert
({
tensor
,
const_cast
<
void
*>
(
c
->
get_data_ptr
())});
continue
;
}
// get op inputs from map
vector
<
const
void
*
>
op_inputs
;
vector
<
shared_ptr
<
HostTensor
>
>
op_inputs
;
for
(
const
descriptor
::
Input
&
input
:
op
->
get_inputs
())
{
descriptor
::
Tensor
*
tensor
=
input
.
get_output
().
get_tensor_ptr
().
get
();
...
...
@@ -171,17 +159,18 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
}
// get op outputs from map or create
vector
<
void
*>
op_outputs
;
vector
<
shared_ptr
<
runtime
::
HostTensor
>>
htv_outputs
;
vector
<
shared_ptr
<
HostTensor
>>
op_outputs
;
for
(
size_t
i
=
0
;
i
<
op
->
get_output_size
();
++
i
)
{
descriptor
::
Tensor
*
tensor
=
op
->
get_output_tensor_ptr
(
i
).
get
();
void
*
host_tensor
=
nullpt
r
;
shared_ptr
<
HostTensor
>
host_tenso
r
;
auto
it
=
tensor_map
.
find
(
tensor
);
if
(
it
==
tensor_map
.
end
())
{
auto
offset
=
op
->
get_output_tensor
(
i
).
get_pool_offset
();
host_tensor
=
instance
.
get_temporary_pointer
(
offset
);
const
Shape
&
shape
=
op
->
get_output_shape
(
i
);
const
element
::
Type
&
type
=
op
->
get_output_element_type
(
i
);
string
name
=
op
->
get_output_tensor
(
i
).
get_name
();
host_tensor
=
make_shared
<
runtime
::
HostTensor
>
(
type
,
shape
,
name
);
tensor_map
.
insert
({
tensor
,
host_tensor
});
}
else
...
...
@@ -189,8 +178,6 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
host_tensor
=
it
->
second
;
}
op_outputs
.
push_back
(
host_tensor
);
htv_outputs
.
push_back
(
make_shared
<
runtime
::
HostTensor
>
(
tensor
->
get_element_type
(),
tensor
->
get_shape
(),
host_tensor
));
}
// get op type
...
...
@@ -231,7 +218,7 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
}
if
(
instance
.
m_nan_check_enabled
)
{
perform_nan_check
(
htv
_outputs
,
op
);
perform_nan_check
(
op
_outputs
,
op
);
}
}
...
...
@@ -240,24 +227,34 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
void
runtime
::
interpreter
::
INTExecutable
::
generate_calls
(
const
element
::
Type
&
type
,
const
NodeWrapper
&
op
,
const
vector
<
void
*
>&
outputs
,
const
vector
<
const
void
*
>&
inputs
,
const
vector
<
shared_ptr
<
HostTensor
>
>&
outputs
,
const
vector
<
shared_ptr
<
HostTensor
>
>&
inputs
,
FunctionInstance
&
instance
)
{
vector
<
void
*>
out
;
vector
<
const
void
*>
in
;
for
(
auto
t
:
outputs
)
{
out
.
push_back
(
t
->
get_data_ptr
());
}
for
(
auto
t
:
inputs
)
{
in
.
push_back
(
t
->
get_data_ptr
());
}
stringstream
ss
;
switch
(
type
.
get_type_enum
())
{
case
element
:
:
Type_t
::
boolean
:
op_engine
<
char
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
f32
:
op_engine
<
float
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
f64
:
op_engine
<
double
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
i8
:
op_engine
<
int8_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
i16
:
op_engine
<
int16_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
i32
:
op_engine
<
int32_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
i64
:
op_engine
<
int64_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
u8
:
op_engine
<
uint8_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
u16
:
op_engine
<
uint16_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
u32
:
op_engine
<
uint32_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
u64
:
op_engine
<
uint64_t
>
(
op
,
out
puts
,
inputs
,
instance
);
break
;
case
element
:
:
Type_t
::
boolean
:
op_engine
<
char
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
f32
:
op_engine
<
float
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
f64
:
op_engine
<
double
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
i8
:
op_engine
<
int8_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
i16
:
op_engine
<
int16_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
i32
:
op_engine
<
int32_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
i64
:
op_engine
<
int64_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
u8
:
op_engine
<
uint8_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
u16
:
op_engine
<
uint16_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
u32
:
op_engine
<
uint32_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
u64
:
op_engine
<
uint64_t
>
(
op
,
out
,
in
,
instance
);
break
;
case
element
:
:
Type_t
::
undefined
:
case
element
:
:
Type_t
::
dynamic
:
case
element
:
:
Type_t
::
bf16
:
...
...
src/ngraph/runtime/interpreter/int_backend.hpp
View file @
75fd8b8d
...
...
@@ -195,9 +195,6 @@ private:
std
::
unordered_map
<
const
Node
*
,
stopwatch
>
m_timer_map
;
std
::
vector
<
NodeWrapper
>
m_wrapped_nodes
;
std
::
unordered_map
<
const
Node
*
,
std
::
shared_ptr
<
RNGState
>>
m_states
;
std
::
shared_ptr
<
AlignedBuffer
>
m_temporary_memory
;
void
*
get_temporary_pointer
(
size_t
offset
)
{
return
m_temporary_memory
->
get_ptr
(
offset
);
}
}
m_function_instance
;
std
::
set
<
std
::
string
>
m_unsupported_op_name_list
;
...
...
@@ -206,8 +203,8 @@ private:
void
generate_calls
(
const
element
::
Type
&
type
,
const
NodeWrapper
&
op
,
const
std
::
vector
<
void
*
>&
outputs
,
const
std
::
vector
<
const
void
*
>&
inputs
,
const
std
::
vector
<
std
::
shared_ptr
<
HostTensor
>
>&
outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
HostTensor
>
>&
inputs
,
FunctionInstance
&
instance
);
template
<
typename
T
>
...
...
@@ -495,7 +492,9 @@ private:
}
case
OP_TYPEID
:
:
Constant
:
{
// Constant is handled in the main loop
const
op
::
Constant
*
c
=
static_cast
<
const
op
::
Constant
*>
(
&
node
);
size_t
element_count
=
shape_size
(
node
.
get_output_shape
(
0
));
reference
::
constant
<
T
>
(
c
->
get_data_ptr
<
T
>
(),
static_cast
<
T
*>
(
out
[
0
]),
element_count
);
break
;
}
case
OP_TYPEID
:
:
ScalarConstantLike
:
break
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment