Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
305dd5b7
Commit
305dd5b7
authored
Oct 24, 2017
by
Jaikrishnan Menon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
CPU: Implement scalar-tensor and vector dot product
Also make Eigen format construction explicit
parent
4bc6f486
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
64 additions
and
10 deletions
+64
-10
emitter.cpp
src/ngraph/runtime/cpu/emitter.cpp
+61
-7
cpu.cpp
test/cpu.cpp
+3
-3
No files found.
src/ngraph/runtime/cpu/emitter.cpp
View file @
305dd5b7
...
...
@@ -44,7 +44,7 @@ static unordered_map<type_index, string> element_type_names = {{TI(ngraph::eleme
};
#define EIGEN_VECTOR_FORMAT(x) "{" + to_string(x) + "}"
#define EIGEN_VECTOR_FORMAT(x) "
fmt::V
{" + to_string(x) + "}"
static
std
::
string
EIGEN_MATRIX_FORMAT
(
const
ngraph
::
Shape
&
shape
,
const
ngraph
::
Strides
&
strides
)
...
...
@@ -54,7 +54,7 @@ static std::string EIGEN_MATRIX_FORMAT(const ngraph::Shape& shape,
{
if
(
!
i
)
{
I
+=
"{"
+
to_string
(
shape
[
i
]);
I
+=
"
fmt::M{
{"
+
to_string
(
shape
[
i
]);
}
else
{
...
...
@@ -73,7 +73,7 @@ static std::string EIGEN_MATRIX_FORMAT(const ngraph::Shape& shape,
I
+=
", "
+
to_string
(
strides
[
i
]);
}
}
I
+=
"}"
;
I
+=
"}
}
"
;
return
I
;
}
...
...
@@ -102,6 +102,60 @@ void Emitter::EMITTER_DECL(EmitAdd)
void
Emitter
::
EMITTER_DECL
(
EmitDot
)
{
auto
&
arg_nodes
=
n
->
get_arguments
();
assert
(
arg_nodes
.
size
()
==
2
);
auto
arg0_tensor_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
arg_nodes
.
at
(
0
)
->
get_value_type
());
assert
(
nullptr
!=
arg0_tensor_type
);
auto
arg1_tensor_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
arg_nodes
.
at
(
1
)
->
get_value_type
());
assert
(
nullptr
!=
arg1_tensor_type
);
auto
arg0_shape
=
arg0_tensor_type
->
get_shape
();
auto
arg1_shape
=
arg1_tensor_type
->
get_shape
();
auto
&
arg0_element_type
=
arg0_tensor_type
->
get_element_type
();
if
(
arg0_shape
.
empty
()
||
arg1_shape
.
empty
())
{
auto
&
first
=
(
arg0_shape
.
empty
()
?
inputs
[
0
]
:
inputs
[
1
]);
auto
&
second
=
(
arg0_shape
.
empty
()
?
inputs
[
1
]
:
inputs
[
0
]);
TU
+=
" {
\n
"
;
" auto arg1 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
second
.
get_index
())
+
");
\n
"
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
" EigenVector<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(out, "
EIGEN_VECTOR_FORMAT
(
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") = "
"call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
first
.
get_index
())
+
")[0] * EigenVector<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(arg1, "
EIGEN_VECTOR_FORMAT
(
second
.
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
");
\n
"
" }
\n
"
;
}
else
if
((
arg0_shape
.
size
()
==
1
)
&&
(
arg1_shape
.
size
()
==
1
))
{
TU
+=
" {
\n
"
" auto arg0 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
");
\n
"
" auto arg1 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
1
].
get_index
())
+
");
\n
"
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
" EigenVector<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(out, "
EIGEN_VECTOR_FORMAT
(
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
") <<
\n
"
" EigenVector<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(arg0, "
EIGEN_VECTOR_FORMAT
(
inputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
").dot("
"EigenVector<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(arg1, "
EIGEN_VECTOR_FORMAT
(
inputs
[
1
].
get_layout
<
DenseTensorViewLayout
>
()
->
get_size
())
"));
\n
"
" }
\n
"
;
}
else
{
throw
ngraph_error
(
"Dot product for given tensors unimplemented"
);
}
}
void
Emitter
::
EMITTER_DECL
(
EmitMultiply
)
...
...
@@ -210,8 +264,8 @@ void Emitter::EMITTER_DECL(EmitConcat)
TU
+=
" {
\n
"
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
"> out_matrix(out,
{
"
+
EIGEN_MATRIX_FORMAT
(
out_layout
->
get_shape
(),
out_layout
->
get_strides
())
+
"
}
);
\n
"
;
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
"> out_matrix(out, "
+
EIGEN_MATRIX_FORMAT
(
out_layout
->
get_shape
(),
out_layout
->
get_strides
())
+
");
\n
"
;
size_t
concat_pos
[
2
]{
0
,
0
};
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
...
...
@@ -224,8 +278,8 @@ void Emitter::EMITTER_DECL(EmitConcat)
to_string
(
arg_shape
.
at
(
1
))
+
") << "
"EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">(call_frame->"
"get_tensor_view_data<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
to_string
(
inputs
[
i
].
get_index
())
+
"),
{
"
+
EIGEN_MATRIX_FORMAT
(
arg_layout
->
get_shape
(),
arg_layout
->
get_strides
())
+
"
}
);
\n
"
;
to_string
(
inputs
[
i
].
get_index
())
+
"), "
+
EIGEN_MATRIX_FORMAT
(
arg_layout
->
get_shape
(),
arg_layout
->
get_strides
())
+
");
\n
"
;
concat_pos
[
axis
]
+=
arg_shape
.
at
(
axis
);
}
...
...
test/cpu.cpp
View file @
305dd5b7
...
...
@@ -442,8 +442,7 @@ TEST(cpu, equal)
ASSERT_EQ
((
vector
<
char
>
{
1
,
1
,
0
,
0
,
0
,
1
,
1
,
0
}),
result
->
get_vector
());
}
/*
TEST(execute, dot_0_0)
TEST
(
cpu
,
dot_0_0
)
{
auto
shape
=
Shape
{
0
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
Float32
::
element_type
(),
shape
);
...
...
@@ -452,7 +451,7 @@ TEST(execute, dot_0_0)
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{});
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
Dot
>
(
A
,
B
),
rt
,
op
::
Parameters
{
A
,
B
});
auto manager = runtime::Manager::get("
NGVM
");
auto
manager
=
runtime
::
Manager
::
get
(
"
CPU
"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
...
...
@@ -468,6 +467,7 @@ TEST(execute, dot_0_0)
ASSERT_EQ
((
vector
<
float
>
{
0
}),
result
->
get_vector
());
}
/*
TEST(execute, dot_matrix_2x0_0x2)
{
auto shape_a = Shape{2, 0};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment