Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
b0de2d3e
Commit
b0de2d3e
authored
Nov 08, 2017
by
Robert Kimball
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
input tensors defined
parent
073adbfb
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
330 additions
and
135 deletions
+330
-135
CMakeLists.txt
src/ngraph/CMakeLists.txt
+1
-0
tensor.hpp
src/ngraph/descriptor/tensor.hpp
+1
-1
memory_layout.cpp
src/ngraph/pass/memory_layout.cpp
+6
-1
memory_layout.hpp
src/ngraph/pass/memory_layout.hpp
+2
-0
backend.cpp
src/ngraph/runtime/backend.cpp
+1
-0
backend.hpp
src/ngraph/runtime/backend.hpp
+3
-0
call_frame.cpp
src/ngraph/runtime/cpu/call_frame.cpp
+35
-20
call_frame.hpp
src/ngraph/runtime/cpu/call_frame.hpp
+14
-22
cpu_backend.cpp
src/ngraph/runtime/cpu/cpu_backend.cpp
+16
-3
cpu_backend.hpp
src/ngraph/runtime/cpu/cpu_backend.hpp
+10
-3
eigen_utils.hpp
src/ngraph/runtime/cpu/eigen_utils.hpp
+24
-32
emitter.cpp
src/ngraph/runtime/cpu/emitter.cpp
+35
-45
external_function.cpp
src/ngraph/runtime/cpu/external_function.cpp
+0
-0
external_function.hpp
src/ngraph/runtime/cpu/external_function.hpp
+2
-6
tensor_view.cpp
src/ngraph/runtime/cpu/tensor_view.cpp
+81
-0
tensor_view.hpp
src/ngraph/runtime/cpu/tensor_view.hpp
+58
-0
ndarray.hpp
src/ngraph/runtime/ndarray.hpp
+2
-0
tensor_view.hpp
src/ngraph/runtime/tensor_view.hpp
+26
-1
tensor_view_info.hpp
src/ngraph/runtime/tensor_view_info.hpp
+13
-1
backend_test.in.cpp
test/backend_test.in.cpp
+0
-0
No files found.
src/ngraph/CMakeLists.txt
View file @
b0de2d3e
...
@@ -112,6 +112,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
...
@@ -112,6 +112,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/cpu/cpu_kernels.cpp
runtime/cpu/cpu_kernels.cpp
runtime/cpu/emitter.cpp
runtime/cpu/emitter.cpp
runtime/cpu/external_function.cpp
runtime/cpu/external_function.cpp
runtime/cpu/tensor_view.cpp
)
)
# LLVM binary builds are typically built without RTTI
# LLVM binary builds are typically built without RTTI
# The built-in headers are in a version-specific directory
# The built-in headers are in a version-specific directory
...
...
src/ngraph/descriptor/tensor.hpp
View file @
b0de2d3e
...
@@ -59,7 +59,7 @@ public:
...
@@ -59,7 +59,7 @@ public:
size_t
size
()
const
;
size_t
size
()
const
;
void
set_pool_offset
(
size_t
);
void
set_pool_offset
(
size_t
);
size_t
get_pool_offset
()
const
;
size_t
get_pool_offset
()
const
;
const
element
::
Type
&
get_element_type
()
const
{
return
m_element_type
;
}
static
std
::
string
make_tensor_name
(
const
Node
*
node
,
size_t
value_index
);
static
std
::
string
make_tensor_name
(
const
Node
*
node
,
size_t
value_index
);
protected
:
protected
:
...
...
src/ngraph/pass/memory_layout.cpp
View file @
b0de2d3e
...
@@ -26,9 +26,14 @@ using namespace std;
...
@@ -26,9 +26,14 @@ using namespace std;
using
namespace
ngraph
;
using
namespace
ngraph
;
using
namespace
ngraph
::
descriptor
;
using
namespace
ngraph
::
descriptor
;
pass
::
MemoryLayout
::
MemoryLayout
(
size_t
alignment
)
:
m_alignment
(
alignment
)
{
}
bool
pass
::
MemoryLayout
::
run_on_call_graph
(
std
::
list
<
std
::
shared_ptr
<
Node
>>&
node_list
)
bool
pass
::
MemoryLayout
::
run_on_call_graph
(
std
::
list
<
std
::
shared_ptr
<
Node
>>&
node_list
)
{
{
MemoryManager
mm
;
MemoryManager
mm
(
m_alignment
)
;
for
(
shared_ptr
<
Node
>
node
:
node_list
)
for
(
shared_ptr
<
Node
>
node
:
node_list
)
{
{
for
(
Tensor
*
tensor
:
node
->
liveness_new_list
)
for
(
Tensor
*
tensor
:
node
->
liveness_new_list
)
...
...
src/ngraph/pass/memory_layout.hpp
View file @
b0de2d3e
...
@@ -33,9 +33,11 @@ namespace ngraph
...
@@ -33,9 +33,11 @@ namespace ngraph
class
ngraph
::
pass
::
MemoryLayout
:
public
CallGraphPass
class
ngraph
::
pass
::
MemoryLayout
:
public
CallGraphPass
{
{
public
:
public
:
MemoryLayout
(
size_t
alignment
=
1
);
virtual
bool
run_on_call_graph
(
std
::
list
<
std
::
shared_ptr
<
Node
>>&
)
override
;
virtual
bool
run_on_call_graph
(
std
::
list
<
std
::
shared_ptr
<
Node
>>&
)
override
;
private
:
private
:
size_t
m_alignment
;
};
};
class
ngraph
::
pass
::
MemoryManager
class
ngraph
::
pass
::
MemoryManager
...
...
src/ngraph/runtime/backend.cpp
View file @
b0de2d3e
...
@@ -24,6 +24,7 @@ using namespace ngraph::runtime;
...
@@ -24,6 +24,7 @@ using namespace ngraph::runtime;
std
::
shared_ptr
<
TensorView
>
std
::
shared_ptr
<
TensorView
>
Backend
::
make_primary_tensor_view
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
Backend
::
make_primary_tensor_view
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
{
{
NGRAPH_INFO
;
return
element_type
.
make_primary_tensor_view
(
shape
);
return
element_type
.
make_primary_tensor_view
(
shape
);
}
}
...
...
src/ngraph/runtime/backend.hpp
View file @
b0de2d3e
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
#include <memory>
#include <memory>
#include "ngraph/common.hpp"
#include "ngraph/common.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/ndarray.hpp"
#include "ngraph/runtime/ndarray.hpp"
namespace
ngraph
namespace
ngraph
...
@@ -59,6 +60,7 @@ namespace ngraph
...
@@ -59,6 +60,7 @@ namespace ngraph
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
make_parameterized_tensor_view
(
const
Shape
&
shape
)
make_parameterized_tensor_view
(
const
Shape
&
shape
)
{
{
NGRAPH_INFO
;
return
std
::
dynamic_pointer_cast
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
(
return
std
::
dynamic_pointer_cast
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
(
make_primary_tensor_view
(
ET
::
element_type
(),
shape
));
make_primary_tensor_view
(
ET
::
element_type
(),
shape
));
}
}
...
@@ -67,6 +69,7 @@ namespace ngraph
...
@@ -67,6 +69,7 @@ namespace ngraph
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
make_parameterized_tensor_view
(
const
NDArrayBase
<
typename
ET
::
type
>&
ndarray
)
make_parameterized_tensor_view
(
const
NDArrayBase
<
typename
ET
::
type
>&
ndarray
)
{
{
NGRAPH_INFO
;
auto
result
=
auto
result
=
std
::
dynamic_pointer_cast
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
(
std
::
dynamic_pointer_cast
<
ngraph
::
runtime
::
ParameterizedTensorView
<
ET
>>
(
make_primary_tensor_view
(
ET
::
element_type
(),
ndarray
.
get_shape
()));
make_primary_tensor_view
(
ET
::
element_type
(),
ndarray
.
get_shape
()));
...
...
src/ngraph/runtime/cpu/call_frame.cpp
View file @
b0de2d3e
...
@@ -15,53 +15,68 @@
...
@@ -15,53 +15,68 @@
#include <algorithm>
#include <algorithm>
#include "call_frame.hpp"
#include "call_frame.hpp"
#include "ngraph/runtime/cpu/tensor_view.hpp"
using
namespace
std
;
using
namespace
std
;
using
namespace
ngraph
::
runtime
::
cpu
;
using
namespace
ngraph
::
runtime
::
cpu
;
CallFrame
::
CallFrame
(
EntryPoint
compiled_function
,
CallFrame
::
CallFrame
(
EntryPoint
compiled_function
,
size_t
n_outputs
,
size_t
n_inputs
,
const
TensorViewPtrs
&
temps
,
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
callees
)
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
callees
)
:
m_n_outputs
(
n_outputs
)
:
m_compiled_function
(
compiled_function
)
,
m_n_inputs
(
n_inputs
)
,
m_tensor_views
(
n_outputs
+
n_inputs
+
temps
.
size
())
,
m_compiled_function
(
compiled_function
)
,
m_callees
(
callees
)
,
m_callees
(
callees
)
{
{
copy
(
temps
.
begin
(),
temps
.
end
(),
m_tensor_views
.
begin
()
+
m_n_outputs
+
m_n_inputs
);
}
}
void
CallFrame
::
tensor_call
(
void
CallFrame
::
tensor_call
(
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>&
inputs
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>&
input
_tv
s
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>&
outputs
)
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>&
output
_tv
s
)
{
{
copy
(
outputs
.
begin
(),
outputs
.
end
(),
m_tensor_views
.
begin
());
m_inputs
.
clear
();
copy
(
inputs
.
begin
(),
inputs
.
end
(),
m_tensor_views
.
begin
()
+
m_n_outputs
);
m_outputs
.
clear
();
for
(
size_t
i
=
0
;
i
<
input_tvs
.
size
();
i
++
)
{
shared_ptr
<
runtime
::
cpu
::
CPUTensorView
>
tv
=
static_pointer_cast
<
runtime
::
cpu
::
CPUTensorView
>
(
input_tvs
[
i
]);
m_inputs
.
push_back
(
tv
->
get_data_ptr
());
}
for
(
size_t
i
=
0
;
i
<
output_tvs
.
size
();
i
++
)
{
shared_ptr
<
runtime
::
cpu
::
CPUTensorView
>
tv
=
static_pointer_cast
<
runtime
::
cpu
::
CPUTensorView
>
(
output_tvs
[
i
]);
m_outputs
.
push_back
(
tv
->
get_data_ptr
());
}
// Invoke compiled computation
// Invoke compiled computation
m_compiled_function
(
this
,
m_tensor_views
,
m_callees
);
m_compiled_function
(
this
);
// Don't hold onto inputs/outputs
fill_n
(
m_tensor_views
.
begin
(),
m_n_outputs
+
m_n_inputs
,
nullptr
);
}
}
void
CallFrame
::
operator
()(
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
arguments
,
void
CallFrame
::
operator
()(
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
arguments
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
results
)
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
results
)
{
{
// TODO: Check types of args and result
// TODO: Check types of args and result
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>
inputs
;
vector
<
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>
inputs
;
for
(
auto
argument
:
arguments
)
for
(
shared_ptr
<
ngraph
::
runtime
::
Value
>
argument
:
arguments
)
{
{
argument
->
collect_tensor_views
(
inputs
,
argument
);
argument
->
collect_tensor_views
(
inputs
,
argument
);
}
}
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>
outputs
;
vector
<
shared_ptr
<
ngraph
::
runtime
::
TensorView
>>
outputs
;
for
(
auto
result
:
results
)
for
(
shared_ptr
<
ngraph
::
runtime
::
Value
>
result
:
results
)
{
{
result
->
collect_tensor_views
(
outputs
,
result
);
result
->
collect_tensor_views
(
outputs
,
result
);
}
}
tensor_call
(
inputs
,
outputs
);
tensor_call
(
inputs
,
outputs
);
}
}
void
*
CallFrame
::
get_input_data
(
size_t
index
)
{
void
*
rc
=
m_inputs
.
at
(
index
);
return
rc
;
}
void
*
CallFrame
::
get_output_data
(
size_t
index
)
{
void
*
rc
=
m_outputs
.
at
(
index
);
return
rc
;
}
src/ngraph/runtime/cpu/call_frame.hpp
View file @
b0de2d3e
...
@@ -32,18 +32,15 @@ namespace ngraph
...
@@ -32,18 +32,15 @@ namespace ngraph
{
{
class
CallFrame
;
class
CallFrame
;
using
EntryPoint
=
std
::
function
<
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
using
EntryPoint
_t
=
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
call_frame
);
ngraph
::
runtime
::
TensorViewPtrs
&
,
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
)
>
;
using
EntryPoint
=
std
::
function
<
EntryPoint_t
>
;
// Compile and execute graphs
// Compile and execute graphs
class
CallFrame
:
public
ngraph
::
runtime
::
CallFrame
class
CallFrame
:
public
ngraph
::
runtime
::
CallFrame
{
{
public
:
public
:
CallFrame
(
EntryPoint
compiled_function
,
CallFrame
(
EntryPoint
compiled_function
,
size_t
n_outputs
,
size_t
n_inputs
,
const
TensorViewPtrs
&
temps
,
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
callees
);
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
callees
);
/// @brief Invoke the function with values matching the signature of the function.
/// @brief Invoke the function with values matching the signature of the function.
...
@@ -53,30 +50,25 @@ namespace ngraph
...
@@ -53,30 +50,25 @@ namespace ngraph
operator
()(
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
inputs
,
operator
()(
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
inputs
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
outputs
);
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
Value
>>&
outputs
);
/// @brief Invoke the function with tuples pre-expanded to their underlying tensor views.
/// @brief Invoke the function with tuples pre-expanded to their underlying
void
tensor_call
(
const
TensorViewPtrs
&
inputs
,
const
TensorViewPtrs
&
outputs
);
/// tensor views.
void
tensor_call
(
const
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
inputs
,
const
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
outputs
);
void
set_return
()
{
m_return
=
true
;
}
void
set_return
()
{
m_return
=
true
;
}
std
::
shared_ptr
<
TensorView
>
get_tensor_view
(
size_t
i
)
{
return
m_tensor_views
[
i
];
}
// const std::vector<std::shared_ptr<ngraph::runtime::Value>>& get_inputs();
template
<
typename
ET
>
// const std::vector<std::shared_ptr<ngraph::runtime::Value>>& get_outputs();
ParameterizedTensorView
<
ET
>*
get_parameterized_tensor_view
(
size_t
i
)
{
return
m_tensor_views
[
i
]
->
get_parameterized_tensor_view
<
ET
>
();
}
template
<
typename
ET
>
void
*
get_input_data
(
size_t
index
);
typename
ET
::
type
*
get_tensor_view_data
(
size_t
i
)
void
*
get_output_data
(
size_t
index
);
{
return
&
get_parameterized_tensor_view
<
ET
>
(
i
)
->
get_vector
()[
0
];
}
protected
:
protected
:
size_t
m_n_outputs
;
size_t
m_n_inputs
;
TensorViewPtrs
m_tensor_views
;
bool
m_return
;
bool
m_return
;
EntryPoint
m_compiled_function
;
EntryPoint
m_compiled_function
;
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>
m_callees
;
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>
m_callees
;
std
::
vector
<
void
*>
m_inputs
;
std
::
vector
<
void
*>
m_outputs
;
};
};
}
}
}
}
...
...
src/ngraph/runtime/cpu/cpu_backend.cpp
View file @
b0de2d3e
...
@@ -13,12 +13,25 @@
...
@@ -13,12 +13,25 @@
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
#include "ngraph/runtime/cpu/cpu_backend.hpp"
#include "ngraph/runtime/cpu/cpu_backend.hpp"
#include "ngraph/runtime/cpu/tensor_view.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/external_function.hpp"
using
namespace
ngraph
::
runtime
::
cpu
;
using
namespace
ngraph
;
using
namespace
std
;
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
extern
"C"
void
CPUBackend
::
make_call_frame
(
const
std
::
shared_ptr
<
ExternalFunction
>&
external_function
)
allocate_aligned_buffer
(
size_t
size
,
size_t
alignment
,
char
**
allocated
,
char
**
aligned_ptr
);
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
runtime
::
cpu
::
CPUBackend
::
make_call_frame
(
const
std
::
shared_ptr
<
ExternalFunction
>&
external_function
)
{
{
return
external_function
->
make_call_frame
();
return
external_function
->
make_call_frame
();
}
}
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>
runtime
::
cpu
::
CPUBackend
::
make_primary_tensor_view
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
{
auto
rc
=
make_shared
<
runtime
::
cpu
::
CPUTensorView
>
(
element_type
,
shape
);
return
dynamic_pointer_cast
<
runtime
::
TensorView
>
(
rc
);
}
src/ngraph/runtime/cpu/cpu_backend.hpp
View file @
b0de2d3e
...
@@ -22,11 +22,18 @@ namespace ngraph
...
@@ -22,11 +22,18 @@ namespace ngraph
{
{
namespace
cpu
namespace
cpu
{
{
class
CPUBackend
:
public
Backend
static
size_t
alignment
=
64
;
class
CPUBackend
:
public
runtime
::
Backend
{
{
public
:
public
:
virtual
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
make_call_frame
(
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
make_call_frame
(
const
std
::
shared_ptr
<
ngraph
::
runtime
::
ExternalFunction
>&
external_function
);
const
std
::
shared_ptr
<
ngraph
::
runtime
::
ExternalFunction
>&
external_function
)
override
;
std
::
shared_ptr
<
ngraph
::
runtime
::
TensorView
>
make_primary_tensor_view
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
override
;
};
};
}
}
}
}
...
...
src/ngraph/runtime/cpu/eigen_utils.hpp
View file @
b0de2d3e
...
@@ -37,25 +37,24 @@ namespace ngraph
...
@@ -37,25 +37,24 @@ namespace ngraph
using
DynamicStrides
=
Eigen
::
Stride
<
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
;
using
DynamicStrides
=
Eigen
::
Stride
<
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
;
using
VectorStrides
=
Eigen
::
Stride
<
Eigen
::
Dynamic
,
1
>
;
using
VectorStrides
=
Eigen
::
Stride
<
Eigen
::
Dynamic
,
1
>
;
template
<
typename
ET
>
template
<
typename
T
>
using
DynamicArray
=
using
DynamicArray
=
Eigen
::
Array
<
T
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
;
Eigen
::
Array
<
typename
ET
::
type
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
;
template
<
typename
E
T
>
template
<
typename
T
>
using
EigenArrayBase
=
Eigen
::
Map
<
DynamicArray
<
E
T
>
,
0
,
DynamicStrides
>
;
using
EigenArrayBase
=
Eigen
::
Map
<
DynamicArray
<
T
>
,
0
,
DynamicStrides
>
;
template
<
typename
E
T
>
template
<
typename
T
>
using
DynamicMatrix
=
Eigen
::
using
DynamicMatrix
=
Matrix
<
typename
ET
::
type
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
Eigen
::
Matrix
<
T
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
template
<
typename
E
T
>
template
<
typename
T
>
using
EigenMatrixBase
=
Eigen
::
Map
<
DynamicMatrix
<
E
T
>
,
0
,
DynamicStrides
>
;
using
EigenMatrixBase
=
Eigen
::
Map
<
DynamicMatrix
<
T
>
,
0
,
DynamicStrides
>
;
template
<
typename
E
T
>
template
<
typename
T
>
using
DynamicVector
=
Eigen
::
Matrix
<
typename
ET
::
type
,
Eigen
::
Dynamic
,
1
>
;
using
DynamicVector
=
Eigen
::
Matrix
<
T
,
Eigen
::
Dynamic
,
1
>
;
template
<
typename
E
T
>
template
<
typename
T
>
using
EigenVectorBase
=
Eigen
::
Map
<
DynamicVector
<
E
T
>
,
0
,
VectorStrides
>
;
using
EigenVectorBase
=
Eigen
::
Map
<
DynamicVector
<
T
>
,
0
,
VectorStrides
>
;
namespace
fmt
namespace
fmt
{
{
...
@@ -117,7 +116,7 @@ namespace ngraph
...
@@ -117,7 +116,7 @@ namespace ngraph
// ET element type
// ET element type
// FMT array format (fmt::V for vector, etc.)
// FMT array format (fmt::V for vector, etc.)
// BASE select array/matrix
// BASE select array/matrix
template
<
typename
E
T
,
template
<
typename
T
,
typename
FMT
,
typename
FMT
,
typename
BASE
,
typename
BASE
,
typename
STRIDES
=
DynamicStrides
>
typename
STRIDES
=
DynamicStrides
>
...
@@ -126,26 +125,19 @@ namespace ngraph
...
@@ -126,26 +125,19 @@ namespace ngraph
using
base
=
BASE
;
using
base
=
BASE
;
public
:
public
:
EigenWrapper
(
typename
ET
::
type
*
t
,
const
FMT
&
fmt
)
EigenWrapper
(
T
*
t
,
const
FMT
&
fmt
)
:
base
(
t
,
fmt
.
l0
,
fmt
.
l1
,
STRIDES
(
fmt
.
s0
,
fmt
.
s1
))
:
base
(
t
,
fmt
.
l0
,
fmt
.
l1
,
STRIDES
(
fmt
.
s0
,
fmt
.
s1
))
{
{
}
}
EigenWrapper
(
EigenWrapper
(
typename
ET
::
type
*
t
,
T
*
t
,
const
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
DenseTensorViewLayout
>&
const
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
DenseTensorViewLayout
>&
layout
)
layout
)
:
base
(
t
,
layout
->
get_size
(),
1
,
DynamicStrides
(
1
,
1
))
:
base
(
t
,
layout
->
get_size
(),
1
,
DynamicStrides
(
1
,
1
))
{
{
}
}
EigenWrapper
(
CallFrame
*
call_frame
,
const
TensorViewInfo
&
tensor_view_info
)
:
EigenWrapper
(
call_frame
->
get_tensor_view_data
<
ET
>
(
tensor_view_info
.
get_index
()),
FMT
(
tensor_view_info
))
{
}
template
<
typename
U
>
template
<
typename
U
>
EigenWrapper
&
operator
=
(
const
U
&
other
)
EigenWrapper
&
operator
=
(
const
U
&
other
)
{
{
...
@@ -154,17 +146,17 @@ namespace ngraph
...
@@ -154,17 +146,17 @@ namespace ngraph
}
}
};
};
template
<
typename
E
T
,
typename
FMT
=
fmt
::
V
>
template
<
typename
T
,
typename
FMT
=
fmt
::
V
>
using
EigenArray1d
=
EigenWrapper
<
ET
,
FMT
,
EigenArrayBase
<
E
T
>>
;
using
EigenArray1d
=
EigenWrapper
<
T
,
FMT
,
EigenArrayBase
<
T
>>
;
template
<
typename
E
T
,
typename
FMT
=
fmt
::
M
>
template
<
typename
T
,
typename
FMT
=
fmt
::
M
>
using
EigenArray2d
=
EigenWrapper
<
ET
,
FMT
,
EigenArrayBase
<
E
T
>>
;
using
EigenArray2d
=
EigenWrapper
<
T
,
FMT
,
EigenArrayBase
<
T
>>
;
template
<
typename
E
T
,
typename
FMT
=
fmt
::
M
>
template
<
typename
T
,
typename
FMT
=
fmt
::
M
>
using
EigenMatrix
=
EigenWrapper
<
ET
,
FMT
,
EigenMatrixBase
<
E
T
>>
;
using
EigenMatrix
=
EigenWrapper
<
T
,
FMT
,
EigenMatrixBase
<
T
>>
;
template
<
typename
E
T
,
typename
FMT
=
fmt
::
V
>
template
<
typename
T
,
typename
FMT
=
fmt
::
V
>
using
EigenVector
=
EigenWrapper
<
ET
,
FMT
,
EigenVectorBase
<
E
T
>
,
VectorStrides
>
;
using
EigenVector
=
EigenWrapper
<
T
,
FMT
,
EigenVectorBase
<
T
>
,
VectorStrides
>
;
}
}
}
}
}
}
...
...
src/ngraph/runtime/cpu/emitter.cpp
View file @
b0de2d3e
...
@@ -34,8 +34,10 @@
...
@@ -34,8 +34,10 @@
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
::
runtime
::
cpu
;
using
namespace
ngraph
::
runtime
::
cpu
;
using
ngraph
::
descriptor
::
layout
::
DenseTensorViewLayout
;
using
ngraph
::
descriptor
::
layout
::
DenseTensorViewLayout
;
...
@@ -54,34 +56,18 @@ static unordered_map<type_index, string> element_type_names = {
...
@@ -54,34 +56,18 @@ static unordered_map<type_index, string> element_type_names = {
#define EIGEN_VECTOR_FORMAT(x) "fmt::V{" + to_string(x) + "}"
#define EIGEN_VECTOR_FORMAT(x) "fmt::V{" + to_string(x) + "}"
string
eigen_vector_format
(
const
runtime
::
TensorViewInfo
&
info
)
{
stringstream
ss
;
ss
<<
"fmt::V{"
<<
info
.
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
()
<<
"}"
;
return
ss
.
str
();
}
static
std
::
string
EIGEN_MATRIX_FORMAT
(
const
ngraph
::
Shape
&
shape
,
const
ngraph
::
Strides
&
strides
)
static
std
::
string
EIGEN_MATRIX_FORMAT
(
const
ngraph
::
Shape
&
shape
,
const
ngraph
::
Strides
&
strides
)
{
{
std
::
string
I
;
stringstream
ss
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
i
++
)
ss
<<
"fmt::M{{"
<<
join
(
shape
)
<<
"}, {"
<<
join
(
strides
)
<<
"}}"
;
{
return
ss
.
str
();
if
(
!
i
)
{
I
+=
"fmt::M{{"
+
to_string
(
shape
[
i
]);
}
else
{
I
+=
", "
+
to_string
(
shape
[
i
]);
}
}
I
+=
"}, "
;
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
i
++
)
{
if
(
!
i
)
{
I
+=
"{"
+
to_string
(
strides
[
i
]);
}
else
{
I
+=
", "
+
to_string
(
strides
[
i
]);
}
}
I
+=
"}}"
;
return
I
;
}
}
void
Emitter
::
EMITTER_DECL
(
EmitNop
)
void
Emitter
::
EMITTER_DECL
(
EmitNop
)
...
@@ -93,18 +79,21 @@ void Emitter::EMITTER_DECL(EmitAdd)
...
@@ -93,18 +79,21 @@ void Emitter::EMITTER_DECL(EmitAdd)
const
element
::
Type
&
et
=
const
element
::
Type
&
et
=
(
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
()))
(
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
()))
->
get_element_type
();
->
get_element_type
();
string
type
=
et
.
c_type_string
();
TU
+=
" {
\n
"
" auto arg0 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
");
\n
"
TU
.
indent
++
;
" auto arg1 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
inputs
[
1
].
get_index
())
+
");
\n
"
TU
<<
"{ // "
<<
n
->
get_name
()
<<
"
\n
"
;
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
TU
.
indent
++
;
" EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(out, "
TU
<<
"EigenArray1d<"
<<
type
<<
">("
<<
outputs
[
0
].
get_tensor
().
get_name
()
<<
", "
EIGEN_VECTOR_FORMAT
(
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") =
\n
"
<<
eigen_vector_format
(
outputs
[
0
])
<<
") =
\n
"
;
" EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(arg0, "
TU
.
indent
++
;
EIGEN_VECTOR_FORMAT
(
inputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") +
\n
"
TU
<<
"EigenArray1d<"
<<
type
<<
">("
<<
inputs
[
0
].
get_tensor
().
get_name
()
<<
", "
" EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(arg1, "
<<
eigen_vector_format
(
inputs
[
0
])
<<
") +
\n
"
;
EIGEN_VECTOR_FORMAT
(
inputs
[
1
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
");
\n
"
TU
<<
"EigenArray1d<"
<<
type
<<
">("
<<
inputs
[
1
].
get_tensor
().
get_name
()
<<
", "
" }
\n
"
;
<<
eigen_vector_format
(
inputs
[
1
])
<<
");
\n
"
;
TU
.
indent
-=
2
;
TU
<<
"}
\n
"
;
TU
.
indent
--
;
}
}
void
Emitter
::
EMITTER_DECL
(
EmitDot
)
void
Emitter
::
EMITTER_DECL
(
EmitDot
)
...
@@ -248,16 +237,14 @@ void Emitter::EMITTER_DECL(EmitMultiply)
...
@@ -248,16 +237,14 @@ void Emitter::EMITTER_DECL(EmitMultiply)
const
element
::
Type
&
et
=
const
element
::
Type
&
et
=
(
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
()))
(
dynamic_pointer_cast
<
const
TensorViewType
>
(
n
->
get_arguments
().
at
(
0
)
->
get_value_type
()))
->
get_element_type
();
->
get_element_type
();
string
type
=
et
.
c_type_string
();
TU
+=
" {
\n
"
TU
+=
" { // "
+
n
->
get_name
()
+
"
\n
"
" auto arg0 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
");
\n
"
" EigenArray1d<"
+
type
+
">("
+
outputs
[
0
].
get_tensor
().
get_name
()
+
", "
" auto arg1 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
inputs
[
1
].
get_index
())
+
");
\n
"
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
" EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(out, "
EIGEN_VECTOR_FORMAT
(
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") =
\n
"
EIGEN_VECTOR_FORMAT
(
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") =
\n
"
"
EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(arg0
, "
"
EigenArray1d<"
+
type
+
">("
+
inputs
[
0
].
get_tensor
().
get_name
()
+
"
, "
EIGEN_VECTOR_FORMAT
(
inputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") *
\n
"
EIGEN_VECTOR_FORMAT
(
inputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") *
\n
"
"
EigenArray1d<"
+
element_type_names
[
TI
(
et
)]
+
">(arg1
, "
"
EigenArray1d<"
+
type
+
">("
+
inputs
[
1
].
get_tensor
().
get_name
()
+
"
, "
EIGEN_VECTOR_FORMAT
(
inputs
[
1
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
");
\n
"
EIGEN_VECTOR_FORMAT
(
inputs
[
1
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
");
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
...
@@ -1058,6 +1045,7 @@ void Emitter::EMITTER_DECL(EmitReshape)
...
@@ -1058,6 +1045,7 @@ void Emitter::EMITTER_DECL(EmitReshape)
void
Emitter
::
EMITTER_DECL
(
EmitFunctionCall
)
void
Emitter
::
EMITTER_DECL
(
EmitFunctionCall
)
{
{
NGRAPH_INFO
;
auto
function_call
=
static_cast
<
const
op
::
FunctionCall
*>
(
n
);
auto
function_call
=
static_cast
<
const
op
::
FunctionCall
*>
(
n
);
auto
function
=
function_call
->
get_function
();
auto
function
=
function_call
->
get_function
();
...
@@ -1073,6 +1061,7 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
...
@@ -1073,6 +1061,7 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
function_map
.
insert
({
function
,
external
});
function_map
.
insert
({
function
,
external
});
}
}
NGRAPH_INFO
;
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
...
@@ -1110,6 +1099,7 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
...
@@ -1110,6 +1099,7 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
void
Emitter
::
EMITTER_DECL
(
EmitReduce
)
void
Emitter
::
EMITTER_DECL
(
EmitReduce
)
{
{
NGRAPH_INFO
;
auto
reduce
=
static_cast
<
const
op
::
Reduce
*>
(
n
);
auto
reduce
=
static_cast
<
const
op
::
Reduce
*>
(
n
);
auto
reduction_function
=
reduce
->
get_reduction_function
();
auto
reduction_function
=
reduce
->
get_reduction_function
();
...
...
src/ngraph/runtime/cpu/external_function.cpp
View file @
b0de2d3e
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/cpu/external_function.hpp
View file @
b0de2d3e
...
@@ -22,6 +22,7 @@
...
@@ -22,6 +22,7 @@
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/function.hpp"
#include "ngraph/function.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
...
@@ -47,11 +48,6 @@ namespace ngraph
...
@@ -47,11 +48,6 @@ namespace ngraph
using
OpMap
=
std
::
unordered_map
<
std
::
type_index
,
OpFunction
>
;
using
OpMap
=
std
::
unordered_map
<
std
::
type_index
,
OpFunction
>
;
using
EntryPoint
=
std
::
function
<
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
ngraph
::
runtime
::
TensorViewPtrs
&
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
cpu
::
CallFrame
>>&
)
>
;
class
ExternalFunction
:
public
ngraph
::
runtime
::
ExternalFunction
class
ExternalFunction
:
public
ngraph
::
runtime
::
ExternalFunction
{
{
public
:
public
:
...
@@ -65,7 +61,7 @@ namespace ngraph
...
@@ -65,7 +61,7 @@ namespace ngraph
size_t
m_n_inputs
;
size_t
m_n_inputs
;
size_t
m_n_outputs
;
size_t
m_n_outputs
;
ngraph
::
descriptor
::
TensorViewPtrs
m_temp_views
;
ngraph
::
descriptor
::
TensorViewPtrs
m_temp_views
;
EntryPoint
compiled_function
;
EntryPoint
m_
compiled_function
;
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>
callees
;
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>
callees
;
};
};
}
}
...
...
src/ngraph/runtime/cpu/tensor_view.cpp
0 → 100644
View file @
b0de2d3e
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "cpu_backend.hpp"
#include "tensor_view.hpp"
using
namespace
ngraph
;
using
namespace
std
;
extern
"C"
void
allocate_aligned_buffer
(
size_t
size
,
size_t
alignment
,
char
**
allocated
,
char
**
aligned_ptr
);
extern
"C"
void
free_aligned_buffer
(
void
*
allocated
);
runtime
::
cpu
::
CPUTensorView
::
CPUTensorView
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
)
:
runtime
::
TensorView
(
std
::
make_shared
<
ngraph
::
descriptor
::
PrimaryTensorView
>
(
std
::
make_shared
<
ngraph
::
TensorViewType
>
(
element_type
,
shape
),
"external"
,
true
,
true
))
{
m_descriptor
->
set_tensor_view_layout
(
std
::
make_shared
<
ngraph
::
descriptor
::
layout
::
DenseTensorViewLayout
>
(
*
m_descriptor
));
m_buffer_size
=
m_descriptor
->
get_tensor_view_layout
()
->
get_size
()
*
element_type
.
size
();
NGRAPH_INFO
<<
m_buffer_size
;
allocate_aligned_buffer
(
m_buffer_size
,
runtime
::
cpu
::
alignment
,
&
m_allocated
,
&
m_buffer
);
}
runtime
::
cpu
::
CPUTensorView
::~
CPUTensorView
()
{
NGRAPH_INFO
;
free_aligned_buffer
(
m_allocated
);
NGRAPH_INFO
;
}
char
*
runtime
::
cpu
::
CPUTensorView
::
get_data_ptr
()
{
return
m_buffer
;
}
const
char
*
runtime
::
cpu
::
CPUTensorView
::
get_data_ptr
()
const
{
return
m_buffer
;
}
void
runtime
::
cpu
::
CPUTensorView
::
write
(
const
void
*
source
,
size_t
tensor_offset
,
size_t
n
)
{
if
(
tensor_offset
+
n
>
m_buffer_size
)
{
NGRAPH_INFO
<<
m_buffer_size
;
NGRAPH_INFO
<<
n
;
NGRAPH_INFO
<<
tensor_offset
;
throw
out_of_range
(
"write access past end of tensor"
);
}
char
*
target
=
get_data_ptr
();
memcpy
(
&
target
[
tensor_offset
],
source
,
n
);
}
void
runtime
::
cpu
::
CPUTensorView
::
read
(
void
*
target
,
size_t
tensor_offset
,
size_t
n
)
const
{
if
(
tensor_offset
+
n
>
m_buffer_size
)
{
NGRAPH_INFO
<<
m_buffer_size
;
NGRAPH_INFO
<<
n
;
NGRAPH_INFO
<<
tensor_offset
;
throw
out_of_range
(
"read access past end of tensor"
);
}
const
char
*
source
=
get_data_ptr
();
memcpy
(
target
,
&
source
[
tensor_offset
],
n
);
}
src/ngraph/runtime/cpu/tensor_view.hpp
0 → 100644
View file @
b0de2d3e
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/types/element_type.hpp"
namespace
ngraph
{
namespace
runtime
{
namespace
cpu
{
class
CPUTensorView
;
}
}
}
class
ngraph
::
runtime
::
cpu
::
CPUTensorView
:
public
ngraph
::
runtime
::
TensorView
{
public
:
CPUTensorView
(
const
ngraph
::
element
::
Type
&
element_type
,
const
Shape
&
shape
);
virtual
~
CPUTensorView
();
char
*
get_data_ptr
();
const
char
*
get_data_ptr
()
const
;
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
void
write
(
const
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
override
;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
void
read
(
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
const
override
;
private
:
char
*
m_allocated
;
char
*
m_buffer
;
size_t
m_buffer_size
;
};
src/ngraph/runtime/ndarray.hpp
View file @
b0de2d3e
...
@@ -179,6 +179,8 @@ namespace ngraph
...
@@ -179,6 +179,8 @@ namespace ngraph
const
vtype
get_vector
()
const
{
return
m_elements
;
}
const
vtype
get_vector
()
const
{
return
m_elements
;
}
operator
const
vtype
()
const
{
return
m_elements
;
}
operator
const
vtype
()
const
{
return
m_elements
;
}
operator
vtype
()
{
return
m_elements
;
}
operator
vtype
()
{
return
m_elements
;
}
void
*
data
()
{
return
m_elements
.
data
();
}
const
void
*
data
()
const
{
return
m_elements
.
data
();
}
bool
operator
==
(
const
NDArrayBase
<
T
>&
other
)
const
bool
operator
==
(
const
NDArrayBase
<
T
>&
other
)
const
{
{
return
m_shape
==
other
.
m_shape
&&
m_elements
==
other
.
m_elements
;
return
m_shape
==
other
.
m_shape
&&
m_elements
==
other
.
m_elements
;
...
...
src/ngraph/runtime/tensor_view.hpp
View file @
b0de2d3e
...
@@ -18,8 +18,11 @@
...
@@ -18,8 +18,11 @@
#include <vector>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/ndarray.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/runtime/value.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/util.hpp"
namespace
ngraph
namespace
ngraph
{
{
...
@@ -42,7 +45,6 @@ namespace ngraph
...
@@ -42,7 +45,6 @@ namespace ngraph
}
}
public
:
public
:
TensorView
()
{}
virtual
~
TensorView
()
{}
virtual
~
TensorView
()
{}
template
<
typename
ET
>
template
<
typename
ET
>
ParameterizedTensorView
<
ET
>*
get_parameterized_tensor_view
()
ParameterizedTensorView
<
ET
>*
get_parameterized_tensor_view
()
...
@@ -75,6 +77,29 @@ namespace ngraph
...
@@ -75,6 +77,29 @@ namespace ngraph
/// @param n Number of bytes to read, must be integral number of elements.
/// @param n Number of bytes to read, must be integral number of elements.
virtual
void
read
(
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
const
=
0
;
virtual
void
read
(
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
const
=
0
;
// This is for unit test only
template
<
typename
T
>
bool
operator
==
(
const
NDArrayBase
<
T
>&
ndarray
)
const
{
bool
rc
=
false
;
if
(
get_shape
()
==
ndarray
.
get_shape
())
{
std
::
vector
<
T
>
lhs
(
ndarray
.
get_vector
().
size
());
read
(
lhs
.
data
(),
0
,
ndarray
.
get_vector
().
size
()
*
sizeof
(
T
));
rc
=
(
lhs
==
ndarray
.
get_vector
());
}
return
rc
;
}
template
<
typename
T
>
std
::
vector
<
T
>
get_vector
()
{
size_t
element_count
=
shape_size
(
get_shape
());
size_t
size
=
element_count
*
sizeof
(
T
);
std
::
vector
<
T
>
rc
(
element_count
);
read
(
rc
.
data
(),
0
,
size
);
return
rc
;
}
protected
:
protected
:
std
::
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
m_descriptor
;
std
::
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
m_descriptor
;
};
};
...
...
src/ngraph/runtime/tensor_view_info.hpp
View file @
b0de2d3e
...
@@ -27,9 +27,10 @@ namespace ngraph
...
@@ -27,9 +27,10 @@ namespace ngraph
{
{
public
:
public
:
TensorViewInfo
(
size_t
index
,
TensorViewInfo
(
size_t
index
,
const
std
::
shared_ptr
<
const
ngraph
::
descriptor
::
TensorView
>&
descriptor
)
std
::
shared_ptr
<
const
ngraph
::
descriptor
::
TensorView
>
descriptor
)
:
m_index
(
index
)
:
m_index
(
index
)
,
m_layout
(
descriptor
->
get_tensor_view_layout
())
,
m_layout
(
descriptor
->
get_tensor_view_layout
())
,
m_tensor_view
(
descriptor
)
{
{
}
}
...
@@ -46,9 +47,20 @@ namespace ngraph
...
@@ -46,9 +47,20 @@ namespace ngraph
return
std
::
static_pointer_cast
<
LT
>
(
m_layout
);
return
std
::
static_pointer_cast
<
LT
>
(
m_layout
);
}
}
std
::
shared_ptr
<
const
ngraph
::
descriptor
::
TensorView
>
get_tensor_view
()
const
{
return
m_tensor_view
;
}
const
ngraph
::
descriptor
::
Tensor
&
get_tensor
()
const
{
return
m_tensor_view
->
get_tensor
();
}
protected
:
protected
:
size_t
m_index
;
size_t
m_index
;
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
TensorViewLayout
>
m_layout
;
std
::
shared_ptr
<
ngraph
::
descriptor
::
layout
::
TensorViewLayout
>
m_layout
;
std
::
shared_ptr
<
const
ngraph
::
descriptor
::
TensorView
>
m_tensor_view
;
};
};
}
}
}
}
test/backend_test.in.cpp
View file @
b0de2d3e
This source diff could not be displayed because it is too large. You can
view the blob
instead.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment