Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
f2a93568
Commit
f2a93568
authored
Jul 09, 2019
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
revert API
parent
567bc822
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
33 additions
and
411 deletions
+33
-411
backend.cpp
src/ngraph/runtime/backend.cpp
+0
-161
backend.hpp
src/ngraph/runtime/backend.hpp
+0
-74
executable.cpp
src/ngraph/runtime/executable.cpp
+0
-19
executable.hpp
src/ngraph/runtime/executable.hpp
+1
-15
host_tensor.cpp
src/ngraph/runtime/host_tensor.cpp
+21
-45
host_tensor.hpp
src/ngraph/runtime/host_tensor.hpp
+0
-12
int_backend.cpp
src/ngraph/runtime/interpreter/int_backend.cpp
+3
-3
int_backend.hpp
src/ngraph/runtime/interpreter/int_backend.hpp
+1
-2
int_executable.cpp
src/ngraph/runtime/interpreter/int_executable.cpp
+2
-4
int_executable.hpp
src/ngraph/runtime/interpreter/int_executable.hpp
+1
-3
tensor.cpp
src/ngraph/runtime/tensor.cpp
+1
-41
tensor.hpp
src/ngraph/runtime/tensor.hpp
+3
-32
No files found.
src/ngraph/runtime/backend.cpp
View file @
f2a93568
...
...
@@ -20,20 +20,13 @@
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/dynamic/dynamic_backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
using
namespace
ngraph
;
runtime
::
Backend
::
Backend
()
{
async_thread_start
();
}
runtime
::
Backend
::~
Backend
()
{
async_thread_stop
();
}
std
::
shared_ptr
<
ngraph
::
Node
>
runtime
::
Backend
::
get_backend_op
(
const
std
::
string
&
op_name
,
...)
...
...
@@ -105,160 +98,6 @@ std::shared_ptr<runtime::Executable> runtime::Backend::load(istream& input_strea
throw
runtime_error
(
"load opertion unimplemented."
);
}
runtime
::
Backend
::
AsyncEvent
::
AsyncEvent
(
Type
type
,
const
shared_ptr
<
Tensor
>&
tensor
,
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
)
:
m_type
{
type
}
,
m_buffer_number
{
buffer_number
}
,
m_data
{
p
}
,
m_size_in_bytes
{
size_in_bytes
}
,
m_executable
{
nullptr
}
,
m_tensor
{
tensor
}
,
m_outputs
{
nullptr
}
,
m_inputs
{
nullptr
}
{
}
runtime
::
Backend
::
AsyncEvent
::
AsyncEvent
(
const
shared_ptr
<
Executable
>&
executable
,
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
inputs
)
:
m_type
{
Type
::
EXECUTE
}
,
m_buffer_number
{
0
}
,
m_data
{
nullptr
}
,
m_size_in_bytes
{
0
}
,
m_executable
{
executable
}
,
m_tensor
{
nullptr
}
,
m_outputs
{
outputs
}
,
m_inputs
{
inputs
}
{
}
future
<
void
>
runtime
::
Backend
::
post_async_read_event
(
const
shared_ptr
<
Tensor
>&
tensor
,
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
)
{
auto
event
=
make_shared
<
AsyncEvent
>
(
AsyncEvent
::
Type
::
READ
,
tensor
,
p
,
size_in_bytes
,
buffer_number
);
unique_lock
<
std
::
mutex
>
lock
(
m_event_queue_mutex
);
m_event_queue
.
push_back
(
event
);
m_event_queue_condition
.
notify_all
();
return
event
->
get_future
();
}
future
<
void
>
runtime
::
Backend
::
post_async_write_event
(
const
shared_ptr
<
Tensor
>&
tensor
,
const
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
)
{
auto
event
=
make_shared
<
AsyncEvent
>
(
AsyncEvent
::
Type
::
WRITE
,
tensor
,
const_cast
<
void
*>
(
p
),
size_in_bytes
,
buffer_number
);
unique_lock
<
std
::
mutex
>
lock
(
m_event_queue_mutex
);
m_event_queue
.
push_back
(
event
);
m_event_queue_condition
.
notify_all
();
return
event
->
get_future
();
}
future
<
void
>
runtime
::
Backend
::
post_async_execute_event
(
const
std
::
shared_ptr
<
Executable
>&
executable
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
inputs
)
{
auto
event
=
make_shared
<
AsyncEvent
>
(
executable
,
outputs
,
inputs
);
unique_lock
<
std
::
mutex
>
lock
(
m_event_queue_mutex
);
m_event_queue
.
push_back
(
event
);
m_event_queue_condition
.
notify_all
();
return
event
->
get_future
();
}
void
runtime
::
Backend
::
async_thread_start
()
{
if
(
!
m_event_queue_active
)
{
m_event_queue_active
=
true
;
m_event_queue_thread
=
unique_ptr
<
thread
>
(
new
thread
(
&
runtime
::
Backend
::
async_thread_entry
,
this
));
}
}
void
runtime
::
Backend
::
async_thread_stop
()
{
if
(
m_event_queue_active
)
{
{
unique_lock
<
std
::
mutex
>
lock
(
m_event_queue_mutex
);
m_event_queue_active
=
false
;
m_event_queue_condition
.
notify_all
();
}
m_event_queue_thread
->
join
();
}
}
static
void
local_thread_entry
(
shared_ptr
<
runtime
::
Backend
::
AsyncEvent
>
event
)
{
event
->
get_executable
()
->
call
(
event
->
get_outputs
(),
event
->
get_inputs
());
event
->
signal_result
();
};
void
runtime
::
Backend
::
async_thread_process
(
const
shared_ptr
<
AsyncEvent
>&
event
)
{
switch
(
event
->
get_type
())
{
case
AsyncEvent
:
:
Type
::
READ
:
event
->
get_tensor
()
->
read
(
event
->
get_data
(),
event
->
get_size_in_bytes
());
event
->
signal_result
();
break
;
case
AsyncEvent
:
:
Type
::
WRITE
:
event
->
get_tensor
()
->
write
(
event
->
get_data
(),
event
->
get_size_in_bytes
());
event
->
signal_result
();
break
;
case
AsyncEvent
:
:
Type
::
EXECUTE
:
{
std
::
thread
(
local_thread_entry
,
event
).
detach
();
break
;
}
}
}
void
runtime
::
Backend
::
async_thread_entry
()
{
unique_lock
<
std
::
mutex
>
lock
(
m_event_queue_mutex
);
while
(
m_event_queue_active
)
{
m_event_queue_condition
.
wait
(
lock
);
while
(
!
m_event_queue
.
empty
())
{
async_thread_process
(
m_event_queue
.
front
());
m_event_queue
.
pop_front
();
}
}
}
namespace
ngraph
{
namespace
runtime
{
ostream
&
operator
<<
(
ostream
&
out
,
const
ngraph
::
runtime
::
Backend
::
AsyncEvent
&
event
)
{
out
<<
"Async{"
;
switch
(
event
.
get_type
())
{
case
runtime
:
:
Backend
::
AsyncEvent
::
Type
::
READ
:
out
<<
"READ "
<<
locale_string
(
event
.
get_size_in_bytes
());
break
;
case
runtime
:
:
Backend
::
AsyncEvent
::
Type
::
WRITE
:
out
<<
"WRITE "
<<
locale_string
(
event
.
get_size_in_bytes
());
break
;
case
runtime
:
:
Backend
::
AsyncEvent
::
Type
::
EXECUTE
:
out
<<
"EXECUTE"
;
break
;
}
out
<<
"}"
;
return
out
;
}
}
}
bool
runtime
::
Backend
::
set_config
(
const
map
<
string
,
string
>&
config
,
string
&
error
)
{
error
=
"set_config not supported"
;
...
...
src/ngraph/runtime/backend.hpp
View file @
f2a93568
...
...
@@ -16,7 +16,6 @@
#pragma once
#include <future>
#include <memory>
#include "ngraph/function.hpp"
...
...
@@ -43,7 +42,6 @@ namespace ngraph
class
ngraph
::
runtime
::
Backend
{
public
:
Backend
();
virtual
~
Backend
();
/// \brief Create a new Backend object
/// \param type The name of a registered backend, such as "CPU" or "GPU".
...
...
@@ -169,76 +167,4 @@ public:
/// \returns true if the configuration is supported, false otherwise. On false the error
/// parameter value is valid.
virtual
bool
set_config
(
const
std
::
map
<
std
::
string
,
std
::
string
>&
config
,
std
::
string
&
error
);
friend
class
ngraph
::
runtime
::
Tensor
;
friend
class
ngraph
::
runtime
::
Executable
;
class
AsyncEvent
{
public
:
enum
class
Type
{
READ
,
WRITE
,
EXECUTE
};
AsyncEvent
(
Type
type
,
const
std
::
shared_ptr
<
Tensor
>&
tensor
,
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
);
AsyncEvent
(
const
std
::
shared_ptr
<
Executable
>&
m_executable
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
m_outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
m_inputs
);
void
*
get_data
()
const
{
return
m_data
;
}
size_t
get_size_in_bytes
()
const
{
return
m_size_in_bytes
;
}
Type
get_type
()
const
{
return
m_type
;
}
size_t
get_buffer_number
()
const
{
return
m_buffer_number
;
}
std
::
shared_ptr
<
Executable
>
get_executable
()
const
{
return
m_executable
;
}
std
::
shared_ptr
<
Tensor
>
get_tensor
()
const
{
return
m_tensor
;
}
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
get_outputs
()
const
{
return
m_outputs
;
}
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
get_inputs
()
const
{
return
m_inputs
;
}
std
::
future
<
void
>
get_future
()
{
return
m_promise
.
get_future
();
}
void
signal_result
()
{
m_promise
.
set_value
();
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
const
AsyncEvent
&
event
);
private
:
const
Type
m_type
;
size_t
m_buffer_number
;
void
*
m_data
;
const
size_t
m_size_in_bytes
;
std
::
shared_ptr
<
Executable
>
m_executable
;
std
::
shared_ptr
<
Tensor
>
m_tensor
;
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>
m_outputs
;
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>
m_inputs
;
std
::
promise
<
void
>
m_promise
;
};
protected
:
std
::
future
<
void
>
post_async_read_event
(
const
std
::
shared_ptr
<
Tensor
>&
tensor
,
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
);
std
::
future
<
void
>
post_async_write_event
(
const
std
::
shared_ptr
<
Tensor
>&
tensor
,
const
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
);
std
::
future
<
void
>
post_async_execute_event
(
const
std
::
shared_ptr
<
Executable
>&
executable
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
inputs
);
void
async_thread_start
();
void
async_thread_stop
();
void
async_thread_process
(
const
std
::
shared_ptr
<
AsyncEvent
>&
event
);
void
async_thread_entry
();
std
::
deque
<
std
::
shared_ptr
<
AsyncEvent
>>
m_event_queue
;
std
::
mutex
m_event_queue_mutex
;
std
::
condition_variable
m_event_queue_condition
;
std
::
unique_ptr
<
std
::
thread
>
m_event_queue_thread
;
bool
m_event_queue_active
=
false
;
};
src/ngraph/runtime/executable.cpp
View file @
f2a93568
...
...
@@ -17,7 +17,6 @@
#include <sstream>
#include "ngraph/file_util.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/util.hpp"
...
...
@@ -29,11 +28,6 @@ runtime::Executable::Executable()
{
}
runtime
::
Executable
::
Executable
(
const
shared_ptr
<
Backend
>&
backend
)
:
m_backend
{
backend
}
{
}
runtime
::
Executable
::~
Executable
()
{
}
...
...
@@ -129,16 +123,3 @@ void runtime::Executable::save(std::ostream& output_stream)
{
throw
runtime_error
(
"save opertion unimplemented."
);
}
future
<
void
>
runtime
::
Executable
::
begin_execute
(
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
vector
<
shared_ptr
<
runtime
::
Tensor
>>&
inputs
)
{
if
(
m_backend
)
{
return
m_backend
->
post_async_execute_event
(
shared_from_this
(),
outputs
,
inputs
);
}
else
{
throw
runtime_error
(
"Async operations not supported for this backend"
);
}
}
src/ngraph/runtime/executable.hpp
View file @
f2a93568
...
...
@@ -16,7 +16,6 @@
#pragma once
#include <future>
#include <memory>
#include "ngraph/function.hpp"
...
...
@@ -30,15 +29,13 @@ namespace ngraph
{
class
Tensor
;
class
Executable
;
class
Backend
;
}
}
class
ngraph
::
runtime
::
Executable
:
public
std
::
enable_shared_from_this
<
Executable
>
class
ngraph
::
runtime
::
Executable
{
public
:
Executable
();
Executable
(
const
std
::
shared_ptr
<
Backend
>&
backend
);
virtual
~
Executable
();
/// \param outputs vector of runtime::Tensor used as outputs
...
...
@@ -54,16 +51,6 @@ public:
bool
call_with_validate
(
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
inputs
);
/// \brief Asynchronously executes a single iteration of the Function. The `future` is used
/// to monitor the execution.
/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \returns a valid std::future to monitor the execution. Use future.get() to get the results
/// or future.wait*() to wait for completion.
virtual
std
::
future
<
void
>
begin_execute
(
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
outputs
,
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
Tensor
>>&
inputs
);
/// \brief Collect performance information gathered on a Function.
/// \returns Vector of PerformanceCounter information.
virtual
std
::
vector
<
PerformanceCounter
>
get_performance_data
()
const
;
...
...
@@ -95,5 +82,4 @@ protected:
private
:
ngraph
::
ParameterVector
m_parameters
;
ngraph
::
ResultVector
m_results
;
std
::
shared_ptr
<
Backend
>
m_backend
;
};
src/ngraph/runtime/host_tensor.cpp
View file @
f2a93568
...
...
@@ -30,53 +30,10 @@ runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type,
const
Shape
&
shape
,
void
*
memory_pointer
,
const
string
&
name
)
:
HostTensor
(
nullptr
,
element_type
,
shape
,
memory_pointer
,
name
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
const
string
&
name
)
:
HostTensor
(
nullptr
,
element_type
,
shape
,
nullptr
,
name
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
:
HostTensor
(
nullptr
,
element_type
,
shape
,
nullptr
,
""
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
)
:
HostTensor
(
nullptr
,
element_type
,
shape
,
memory_pointer
,
""
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
:
HostTensor
(
backend
,
element_type
,
shape
,
nullptr
,
""
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
)
:
HostTensor
(
backend
,
element_type
,
shape
,
memory_pointer
,
""
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
,
const
std
::
string
&
name
)
:
runtime
::
Tensor
(
backend
,
std
::
make_shared
<
ngraph
::
descriptor
::
Tensor
>
(
element_type
,
shape
,
name
))
:
runtime
::
Tensor
(
std
::
make_shared
<
ngraph
::
descriptor
::
Tensor
>
(
element_type
,
shape
,
name
))
,
m_allocated_buffer_pool
(
nullptr
)
,
m_aligned_buffer_pool
(
nullptr
)
{
m_descriptor
->
set_tensor_layout
(
std
::
make_shared
<
ngraph
::
descriptor
::
layout
::
DenseTensorLayout
>
(
*
m_descriptor
));
...
...
@@ -100,6 +57,25 @@ runtime::HostTensor::HostTensor(const std::shared_ptr<ngraph::runtime::Backend>&
}
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
const
string
&
name
)
:
HostTensor
(
element_type
,
shape
,
nullptr
,
name
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
:
HostTensor
(
element_type
,
shape
,
nullptr
,
""
)
{
}
runtime
::
HostTensor
::
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
)
:
HostTensor
(
element_type
,
shape
,
memory_pointer
,
""
)
{
}
runtime
::
HostTensor
::~
HostTensor
()
{
if
(
m_allocated_buffer_pool
!=
nullptr
)
...
...
src/ngraph/runtime/host_tensor.hpp
View file @
f2a93568
...
...
@@ -42,18 +42,6 @@ public:
const
std
::
string
&
name
);
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
);
HostTensor
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
);
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
);
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
);
HostTensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
,
void
*
memory_pointer
,
const
std
::
string
&
name
);
virtual
~
HostTensor
()
override
;
char
*
get_data_ptr
();
...
...
src/ngraph/runtime/interpreter/int_backend.cpp
View file @
f2a93568
...
...
@@ -54,20 +54,20 @@ runtime::interpreter::INTBackend::INTBackend(const vector<string>& unsupported_o
shared_ptr
<
runtime
::
Tensor
>
runtime
::
interpreter
::
INTBackend
::
create_tensor
(
const
element
::
Type
&
type
,
const
Shape
&
shape
)
{
return
make_shared
<
runtime
::
HostTensor
>
(
shared_from_this
(),
type
,
shape
);
return
make_shared
<
runtime
::
HostTensor
>
(
type
,
shape
);
}
shared_ptr
<
runtime
::
Tensor
>
runtime
::
interpreter
::
INTBackend
::
create_tensor
(
const
element
::
Type
&
type
,
const
Shape
&
shape
,
void
*
memory_pointer
)
{
return
make_shared
<
runtime
::
HostTensor
>
(
shared_from_this
(),
type
,
shape
,
memory_pointer
);
return
make_shared
<
runtime
::
HostTensor
>
(
type
,
shape
,
memory_pointer
);
}
shared_ptr
<
runtime
::
Executable
>
runtime
::
interpreter
::
INTBackend
::
compile
(
shared_ptr
<
Function
>
function
,
bool
enable_performance_collection
)
{
return
make_shared
<
INTExecutable
>
(
shared_from_this
(),
function
,
enable_performance_collection
);
return
make_shared
<
INTExecutable
>
(
function
,
enable_performance_collection
);
}
bool
runtime
::
interpreter
::
INTBackend
::
is_supported
(
const
Node
&
node
)
const
...
...
src/ngraph/runtime/interpreter/int_backend.hpp
View file @
f2a93568
...
...
@@ -38,8 +38,7 @@ namespace ngraph
}
}
class
ngraph
::
runtime
::
interpreter
::
INTBackend
:
public
Backend
,
public
std
::
enable_shared_from_this
<
INTBackend
>
class
ngraph
::
runtime
::
interpreter
::
INTBackend
:
public
Backend
{
public
:
INTBackend
();
...
...
src/ngraph/runtime/interpreter/int_executable.cpp
View file @
f2a93568
...
...
@@ -38,11 +38,9 @@ using namespace ngraph;
using
descriptor
::
layout
::
DenseTensorLayout
;
runtime
::
interpreter
::
INTExecutable
::
INTExecutable
(
const
shared_ptr
<
runtime
::
Backend
>&
backend
,
const
shared_ptr
<
Function
>&
function
,
runtime
::
interpreter
::
INTExecutable
::
INTExecutable
(
const
shared_ptr
<
Function
>&
function
,
bool
enable_performance_collection
)
:
Executable
{
backend
}
,
m_is_compiled
{
true
}
:
m_is_compiled
{
true
}
,
m_performance_counters_enabled
{
enable_performance_collection
}
{
m_function
=
clone_function
(
*
function
);
...
...
src/ngraph/runtime/interpreter/int_executable.hpp
View file @
f2a93568
...
...
@@ -69,7 +69,6 @@
#include "ngraph/op/topk.hpp"
#include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#ifdef INTERPRETER_USE_HYBRID
#include "ngraph/runtime/hybrid/op/function_call.hpp"
...
...
@@ -175,8 +174,7 @@ class ngraph::runtime::interpreter::INTExecutable : public Executable
friend
class
INTBackend
;
public
:
INTExecutable
(
const
std
::
shared_ptr
<
runtime
::
Backend
>&
backend
,
const
std
::
shared_ptr
<
Function
>&
function
,
INTExecutable
(
const
std
::
shared_ptr
<
Function
>&
function
,
bool
enable_performance_collection
=
false
);
bool
call
(
const
std
::
vector
<
std
::
shared_ptr
<
Tensor
>>&
outputs
,
...
...
src/ngraph/runtime/tensor.cpp
View file @
f2a93568
...
...
@@ -14,12 +14,10 @@
// limitations under the License.
//*****************************************************************************
#include <functional>
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp"
using
namespace
ngraph
;
...
...
@@ -97,41 +95,3 @@ void runtime::Tensor::copy_from(const ngraph::runtime::Tensor& source)
source
.
read
(
buffer
.
get_ptr
(),
size
);
write
(
buffer
.
get_ptr
(),
size
);
}
future
<
void
>
runtime
::
Tensor
::
begin_write
(
const
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
)
{
if
(
m_backend
)
{
// auto f = m_promise.get_future();
return
m_backend
->
post_async_write_event
(
shared_from_this
(),
p
,
size_in_bytes
,
buffer_number
);
}
else
{
throw
runtime_error
(
"Async operations not supported for this backend"
);
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::write, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
}
future
<
void
>
runtime
::
Tensor
::
begin_read
(
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
)
{
if
(
m_backend
)
{
// auto f = m_promise.get_future();
return
m_backend
->
post_async_read_event
(
shared_from_this
(),
p
,
size_in_bytes
,
buffer_number
);
}
else
{
throw
runtime_error
(
"Async operations not supported for this backend"
);
}
// using namespace std::placeholders;
// auto f = m_promise.get_future();
// auto bound_f = bind(&Tensor::read, this, _1, _2, _3);
// async(bound_f, p, 0, n);
// return f;
}
src/ngraph/runtime/tensor.hpp
View file @
f2a93568
...
...
@@ -16,7 +16,6 @@
#pragma once
#include <future>
#include <memory>
#include <vector>
...
...
@@ -36,10 +35,8 @@ namespace ngraph
namespace
runtime
{
class
Tensor
:
public
std
::
enable_shared_from_this
<
Tensor
>
class
Tensor
{
friend
class
Executable
;
protected
:
Tensor
(
const
std
::
shared_ptr
<
ngraph
::
descriptor
::
Tensor
>&
descriptor
)
:
m_descriptor
(
descriptor
)
...
...
@@ -47,14 +44,6 @@ namespace ngraph
{
}
Tensor
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>&
backend
,
const
std
::
shared_ptr
<
ngraph
::
descriptor
::
Tensor
>&
descriptor
)
:
m_descriptor
(
descriptor
)
,
m_stale
(
true
)
,
m_backend
{
backend
}
{
}
public
:
virtual
~
Tensor
()
{}
Tensor
&
operator
=
(
const
Tensor
&
)
=
default
;
...
...
@@ -114,24 +103,6 @@ namespace ngraph
/// \param n Number of bytes to read, must be integral number of elements.
virtual
void
read
(
void
*
p
,
size_t
n
)
const
=
0
;
/// \brief Write bytes into the tensor. The data buffer pointed to by `p` must
/// be kept live until after the future is signaled complete
/// \param p Pointer to source of data
/// \param size_in_bytes Number of bytes to write, must be integral number of elements.
/// \param buffer_number For double-buffering, which buffer to write.
/// \return std::future to track the operation
virtual
std
::
future
<
void
>
begin_write
(
const
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
);
/// \brief Read bytes from the tensor. The data buffer pointed to by `p` must
/// be kept live until after the future is signaled complete
/// \param p Pointer to destination for data
/// \param size_in_bytes Number of bytes to read, must be integral number of elements.
/// \param buffer_number For double-buffering, which buffer to read.
/// \return std::future to track the operation
virtual
std
::
future
<
void
>
begin_read
(
void
*
p
,
size_t
size_in_bytes
,
size_t
buffer_number
);
/// \brief copy bytes directly from source to this tensor
/// \param source The source tensor
virtual
void
copy_from
(
const
ngraph
::
runtime
::
Tensor
&
source
);
...
...
@@ -161,8 +132,8 @@ namespace ngraph
protected
:
std
::
shared_ptr
<
ngraph
::
descriptor
::
Tensor
>
m_descriptor
;
bool
m_stale
;
std
::
promise
<
void
>
m_promise
;
std
::
shared_ptr
<
ngraph
::
runtime
::
Backend
>
m_backend
;
};
using
TensorViewPtrs
=
std
::
vector
<
std
::
shared_ptr
<
Tensor
>>
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment