Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
11b612ca
Commit
11b612ca
authored
Nov 05, 2017
by
Jaikrishnan Menon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
CPU: clang-format fixes and header cleanup for generated code
parent
fc50420f
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
71 deletions
+90
-71
cpu_kernels.hpp
src/ngraph/runtime/cpu/cpu_kernels.hpp
+6
-3
emitter.cpp
src/ngraph/runtime/cpu/emitter.cpp
+75
-58
external_function.cpp
src/ngraph/runtime/cpu/external_function.cpp
+5
-6
external_function.hpp
src/ngraph/runtime/cpu/external_function.hpp
+4
-4
No files found.
src/ngraph/runtime/cpu/cpu_kernels.hpp
View file @
11b612ca
...
@@ -92,9 +92,12 @@ namespace mkl
...
@@ -92,9 +92,12 @@ namespace mkl
extern
"C"
{
extern
"C"
{
void
MKL_Somatcopy
(
char
ordering
,
void
MKL_Somatcopy
(
char
ordering
,
char
trans
,
char
trans
,
size_t
rows
,
size_t
cols
,
size_t
rows
,
size_t
cols
,
const
ngraph
::
element
::
Float32
::
type
alpha
,
const
ngraph
::
element
::
Float32
::
type
alpha
,
const
ngraph
::
element
::
Float32
::
type
*
A
,
size_t
lda
,
const
ngraph
::
element
::
Float32
::
type
*
A
,
ngraph
::
element
::
Float32
::
type
*
B
,
size_t
ldb
);
size_t
lda
,
ngraph
::
element
::
Float32
::
type
*
B
,
size_t
ldb
);
}
}
}
}
src/ngraph/runtime/cpu/emitter.cpp
View file @
11b612ca
...
@@ -12,12 +12,12 @@
...
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
#include <algorithm>
#include <iostream>
#include <iostream>
#include <string>
#include <string>
#include <typeindex>
#include <typeindex>
#include <unordered_map>
#include <unordered_map>
#include <vector>
#include <vector>
#include <algorithm>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node.hpp"
...
@@ -30,10 +30,10 @@
...
@@ -30,10 +30,10 @@
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
using
namespace
std
;
using
namespace
std
;
using
namespace
ngraph
::
runtime
::
cpu
;
using
namespace
ngraph
::
runtime
::
cpu
;
...
@@ -185,6 +185,7 @@ void Emitter::EMITTER_DECL(EmitDot)
...
@@ -185,6 +185,7 @@ void Emitter::EMITTER_DECL(EmitDot)
auto
out_layout
=
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
();
auto
out_layout
=
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
();
// Emit an MKL SGEMM call if possible
// Emit an MKL SGEMM call if possible
// clang-format off
if
(
arg0_element_type
==
ngraph
::
element
::
Float32
::
element_type
())
if
(
arg0_element_type
==
ngraph
::
element
::
Float32
::
element_type
())
{
{
TU
+=
TU
+=
...
@@ -204,18 +205,22 @@ void Emitter::EMITTER_DECL(EmitDot)
...
@@ -204,18 +205,22 @@ void Emitter::EMITTER_DECL(EmitDot)
" out, "
+
to_string
(
max
(
1UL
,
arg1_shape
[
1
]))
+
");
\n
"
" out, "
+
to_string
(
max
(
1UL
,
arg1_shape
[
1
]))
+
");
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
// clang-format on
else
else
{
{
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" auto arg0 = call_frame->get_tensor_view_data<"
+
" auto arg0 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
");
\n
"
");
\n
"
" auto arg1 = call_frame->get_tensor_view_data<"
+
" auto arg1 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
1
].
get_index
())
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
inputs
[
1
].
get_index
())
+
");
\n
"
");
\n
"
" auto out = call_frame->get_tensor_view_data<"
+
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
");
\n
"
" EigenMatrix<"
+
" EigenMatrix<"
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(out, "
+
element_type_names
[
TI
(
arg0_element_type
)]
+
">(out, "
+
...
@@ -944,8 +949,7 @@ void Emitter::EMITTER_DECL(EmitConstant)
...
@@ -944,8 +949,7 @@ void Emitter::EMITTER_DECL(EmitConstant)
" {
\n
"
" {
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
c_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
element_type_names
[
TI
(
c_element_type
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
")->get_vector() = std::vector<"
+
element_type_names
[
TI
(
c_element_type
)]
+
")->get_vector() = std::vector<"
+
element_type_names
[
TI
(
c_element_type
)]
+
"::type>{"
;
"::type>{"
;
for
(
size_t
i
=
0
;
i
<
c_value_strings
.
size
();
i
++
)
for
(
size_t
i
=
0
;
i
<
c_value_strings
.
size
();
i
++
)
{
{
...
@@ -989,7 +993,8 @@ void Emitter::EMITTER_DECL(EmitReshape)
...
@@ -989,7 +993,8 @@ void Emitter::EMITTER_DECL(EmitReshape)
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
")->get_vector() =
\n
"
")->get_vector() =
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
...
@@ -1004,6 +1009,7 @@ void Emitter::EMITTER_DECL(EmitReshape)
...
@@ -1004,6 +1009,7 @@ void Emitter::EMITTER_DECL(EmitReshape)
auto
out_layout
=
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
();
auto
out_layout
=
outputs
[
0
].
get_layout
<
DenseTensorViewLayout
>
();
// Emit an MKL transpose call if possible
// Emit an MKL transpose call if possible
// clang-format off
if
(
result_element_type
==
ngraph
::
element
::
Float32
::
element_type
())
if
(
result_element_type
==
ngraph
::
element
::
Float32
::
element_type
())
{
{
TU
+=
TU
+=
...
@@ -1018,18 +1024,27 @@ void Emitter::EMITTER_DECL(EmitReshape)
...
@@ -1018,18 +1024,27 @@ void Emitter::EMITTER_DECL(EmitReshape)
" out, "
+
to_string
(
arg_shape
[
0
])
+
");
\n
"
" out, "
+
to_string
(
arg_shape
[
0
])
+
");
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
// clang-format on
else
else
{
{
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" auto arg0 = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
result_element_type
)]
+
" auto arg0 = call_frame->get_tensor_view_data<"
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
");
\n
"
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
" auto out = call_frame->get_tensor_view_data<"
+
element_type_names
[
TI
(
result_element_type
)]
+
to_string
(
inputs
[
0
].
get_index
())
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
");
\n
"
");
\n
"
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">(out, "
+
" auto out = call_frame->get_tensor_view_data<"
+
EIGEN_MATRIX_FORMAT
(
out_layout
->
get_shape
(),
out_layout
->
get_strides
())
+
") =
\n
"
element_type_names
[
TI
(
result_element_type
)]
+
">("
+
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">(arg0, "
+
to_string
(
outputs
[
0
].
get_index
())
+
EIGEN_MATRIX_FORMAT
(
arg0_layout
->
get_shape
(),
arg0_layout
->
get_strides
())
+
").transpose();
\n
"
");
\n
"
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">(out, "
+
EIGEN_MATRIX_FORMAT
(
out_layout
->
get_shape
(),
out_layout
->
get_strides
())
+
") =
\n
"
" EigenMatrix<"
+
element_type_names
[
TI
(
result_element_type
)]
+
">(arg0, "
+
EIGEN_MATRIX_FORMAT
(
arg0_layout
->
get_shape
(),
arg0_layout
->
get_strides
())
+
").transpose();
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
}
}
...
@@ -1058,26 +1073,28 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
...
@@ -1058,26 +1073,28 @@ void Emitter::EMITTER_DECL(EmitFunctionCall)
function_map
.
insert
({
function
,
external
});
function_map
.
insert
({
function
,
external
});
}
}
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
dynamic_pointer_cast
<
CallFrame
>
(
std
::
shared_ptr
<
CallFrame
>
cf
=
external
->
make_call_frame
());
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
ef
->
get_callees
().
emplace_back
(
cf
);
ef
->
get_callees
().
emplace_back
(
cf
);
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" auto cf = callees.at("
+
to_string
(
ef
->
get_callees
().
size
()
-
1
)
+
");
\n
"
" auto cf = callees.at("
+
to_string
(
ef
->
get_callees
().
size
()
-
1
)
+
");
\n
"
" std::vector<std::shared_ptr<ngraph::runtime::Value>> inputs;
\n
"
" std::vector<std::shared_ptr<ngraph::runtime::Value>> inputs;
\n
"
" std::vector<std::shared_ptr<ngraph::runtime::Value>> outputs;
\n
"
;
" std::vector<std::shared_ptr<ngraph::runtime::Value>> outputs;
\n
"
;
for
(
const
auto
&
in
:
inputs
)
for
(
const
auto
&
in
:
inputs
)
{
{
TU
+=
TU
+=
" inputs.emplace_back(call_frame->get_tensor_view("
+
" inputs.emplace_back(call_frame->get_tensor_view("
+
to_string
(
in
.
get_index
())
+
"));
\n
"
;
to_string
(
in
.
get_index
())
+
"));
\n
"
;
}
}
for
(
const
auto
&
out
:
outputs
)
for
(
const
auto
&
out
:
outputs
)
{
{
TU
+=
TU
+=
" outputs.emplace_back(call_frame->get_tensor_view("
+
" outputs.emplace_back(call_frame->get_tensor_view("
+
to_string
(
out
.
get_index
())
+
"));
\n
"
;
to_string
(
out
.
get_index
())
+
"));
\n
"
;
}
}
TU
+=
TU
+=
...
@@ -1109,14 +1126,12 @@ void Emitter::EMITTER_DECL(EmitReduce)
...
@@ -1109,14 +1126,12 @@ void Emitter::EMITTER_DECL(EmitReduce)
}
}
auto
reductee_type
=
reduce
->
get_arguments
().
at
(
0
)
->
get_value_type
();
auto
reductee_type
=
reduce
->
get_arguments
().
at
(
0
)
->
get_value_type
();
auto
reductee_tensor_view_type
=
auto
reductee_tensor_view_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
reductee_type
);
dynamic_pointer_cast
<
const
TensorViewType
>
(
reductee_type
);
assert
(
reductee_tensor_view_type
);
assert
(
reductee_tensor_view_type
);
auto
reductee_shape
=
reductee_tensor_view_type
->
get_shape
();
auto
reductee_shape
=
reductee_tensor_view_type
->
get_shape
();
auto
f_result_type
=
reduction_function
->
get_result_type
();
auto
f_result_type
=
reduction_function
->
get_result_type
();
auto
f_result_tensor_view_type
=
auto
f_result_tensor_view_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
f_result_type
);
dynamic_pointer_cast
<
const
TensorViewType
>
(
f_result_type
);
assert
(
f_result_tensor_view_type
);
assert
(
f_result_tensor_view_type
);
auto
&
f_result_element_type
=
f_result_tensor_view_type
->
get_element_type
();
auto
&
f_result_element_type
=
f_result_tensor_view_type
->
get_element_type
();
...
@@ -1172,8 +1187,7 @@ void Emitter::EMITTER_DECL(EmitReduce)
...
@@ -1172,8 +1187,7 @@ void Emitter::EMITTER_DECL(EmitReduce)
else
if
((
reductee_shape
.
size
()
==
1
&&
reduction_axes
==
AxisSet
{
0
})
||
else
if
((
reductee_shape
.
size
()
==
1
&&
reduction_axes
==
AxisSet
{
0
})
||
(
reductee_shape
.
size
()
==
2
&&
reduction_axes
==
AxisSet
{
0
,
1
}))
(
reductee_shape
.
size
()
==
2
&&
reduction_axes
==
AxisSet
{
0
,
1
}))
{
{
if
(
reductee_shape
.
at
(
0
)
==
0
||
if
(
reductee_shape
.
at
(
0
)
==
0
||
(
reductee_shape
.
size
()
==
2
&&
reductee_shape
.
at
(
1
)
==
0
))
(
reductee_shape
.
size
()
==
2
&&
reductee_shape
.
at
(
1
)
==
0
))
{
{
TU
+=
TU
+=
" {
\n
"
" {
\n
"
...
@@ -1189,8 +1203,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
...
@@ -1189,8 +1203,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
}
}
else
else
{
{
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
dynamic_pointer_cast
<
CallFrame
>
(
std
::
shared_ptr
<
CallFrame
>
cf
=
external
->
make_call_frame
());
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
ef
->
get_callees
().
emplace_back
(
cf
);
ef
->
get_callees
().
emplace_back
(
cf
);
TU
+=
TU
+=
...
@@ -1234,8 +1248,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
...
@@ -1234,8 +1248,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
}
}
else
else
{
{
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
dynamic_pointer_cast
<
CallFrame
>
(
std
::
shared_ptr
<
CallFrame
>
cf
=
external
->
make_call_frame
());
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
ef
->
get_callees
().
emplace_back
(
cf
);
ef
->
get_callees
().
emplace_back
(
cf
);
TU
+=
TU
+=
...
@@ -1279,8 +1293,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
...
@@ -1279,8 +1293,8 @@ void Emitter::EMITTER_DECL(EmitReduce)
}
}
else
else
{
{
std
::
shared_ptr
<
CallFrame
>
cf
=
std
::
dynamic_pointer_cast
<
CallFrame
>
(
std
::
shared_ptr
<
CallFrame
>
cf
=
external
->
make_call_frame
());
std
::
dynamic_pointer_cast
<
CallFrame
>
(
external
->
make_call_frame
());
ef
->
get_callees
().
emplace_back
(
cf
);
ef
->
get_callees
().
emplace_back
(
cf
);
TU
+=
TU
+=
...
@@ -1357,12 +1371,10 @@ void Emitter::EMITTER_DECL(EmitSlice)
...
@@ -1357,12 +1371,10 @@ void Emitter::EMITTER_DECL(EmitSlice)
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
arg_element_type
)]
+
">("
+
element_type_names
[
TI
(
arg_element_type
)]
+
">("
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
")->get_vector() =
\n
"
")->get_vector() =
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
arg_element_type
)]
+
">("
+
element_type_names
[
TI
(
arg_element_type
)]
+
">("
+
to_string
(
inputs
.
at
(
0
).
get_index
())
+
to_string
(
inputs
.
at
(
0
).
get_index
())
+
")->get_vector();
\n
"
")->get_vector();
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
...
@@ -1400,24 +1412,28 @@ void Emitter::EMITTER_DECL(EmitSlice)
...
@@ -1400,24 +1412,28 @@ void Emitter::EMITTER_DECL(EmitSlice)
") =
\n
"
") =
\n
"
" EigenMatrix<"
+
" EigenMatrix<"
+
element_type_names
[
TI
(
arg_element_type
)]
+
">(arg0, "
+
element_type_names
[
TI
(
arg_element_type
)]
+
">(arg0, "
+
EIGEN_MATRIX_FORMAT
(
arg0_layout
->
get_shape
(),
arg0_layout
->
get_strides
())
+
EIGEN_MATRIX_FORMAT
(
arg0_layout
->
get_shape
(),
arg0_layout
->
get_strides
())
+
").block("
+
").block("
+
to_string
(
lower_bounds
[
0
])
+
", "
+
to_string
(
lower_bounds
[
1
])
+
",
\n
"
to_string
(
lower_bounds
[
0
])
+
", "
+
to_string
(
lower_bounds
[
1
])
+
" "
+
to_string
(
upper_bounds
[
0
]
-
lower_bounds
[
0
])
+
",
\n
"
",
\n
"
" "
+
to_string
(
upper_bounds
[
1
]
-
lower_bounds
[
1
])
+
");
\n
"
" "
+
to_string
(
upper_bounds
[
0
]
-
lower_bounds
[
0
])
+
",
\n
"
" "
+
to_string
(
upper_bounds
[
1
]
-
lower_bounds
[
1
])
+
");
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
// Other cases (reordering of axes for tensors with rank>2) are not handled yet.
// Other cases (reordering of axes for tensors with rank>2) are not handled yet.
else
else
{
{
throw
ngraph_error
(
"Slice is not implemented yet for tensors with rank>2
in VM
"
);
throw
ngraph_error
(
"Slice is not implemented yet for tensors with rank>2"
);
}
}
}
}
void
Emitter
::
EMITTER_DECL
(
EmitSum
)
void
Emitter
::
EMITTER_DECL
(
EmitSum
)
{
{
auto
s
=
static_cast
<
const
op
::
Sum
*>
(
n
);
auto
s
=
static_cast
<
const
op
::
Sum
*>
(
n
);
auto
s_tensor_view_type
=
auto
s_tensor_view_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
s
->
get_value_type
());
dynamic_pointer_cast
<
const
TensorViewType
>
(
s
->
get_value_type
());
assert
(
s_tensor_view_type
);
assert
(
s_tensor_view_type
);
auto
&
s_element_type
=
s_tensor_view_type
->
get_element_type
();
auto
&
s_element_type
=
s_tensor_view_type
->
get_element_type
();
auto
s_shape
=
s_tensor_view_type
->
get_shape
();
auto
s_shape
=
s_tensor_view_type
->
get_shape
();
...
@@ -1437,12 +1453,10 @@ void Emitter::EMITTER_DECL(EmitSum)
...
@@ -1437,12 +1453,10 @@ void Emitter::EMITTER_DECL(EmitSum)
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
s_element_type
)]
+
">("
+
element_type_names
[
TI
(
s_element_type
)]
+
">("
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
to_string
(
outputs
.
at
(
0
).
get_index
())
+
")->get_vector() =
\n
"
")->get_vector() =
\n
"
" call_frame->get_parameterized_tensor_view<"
+
" call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
s_element_type
)]
+
">("
+
element_type_names
[
TI
(
s_element_type
)]
+
">("
+
to_string
(
inputs
.
at
(
0
).
get_index
())
+
to_string
(
inputs
.
at
(
0
).
get_index
())
+
")->get_vector();
\n
"
")->get_vector();
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
...
@@ -1626,12 +1640,15 @@ void Emitter::EMITTER_DECL(EmitTanh)
...
@@ -1626,12 +1640,15 @@ void Emitter::EMITTER_DECL(EmitTanh)
// by models
// by models
TU
+=
TU
+=
" {
\n
"
" {
\n
"
" auto& arg0 = call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
" auto& arg0 = call_frame->get_parameterized_tensor_view<"
+
to_string
(
inputs
[
0
].
get_index
())
+
")->get_vector();
\n
"
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
inputs
[
0
].
get_index
())
+
" auto& out = call_frame->get_parameterized_tensor_view<"
+
element_type_names
[
TI
(
et
)]
+
">("
+
")->get_vector();
\n
"
to_string
(
outputs
[
0
].
get_index
())
+
")->get_vector();
\n
"
" auto& out = call_frame->get_parameterized_tensor_view<"
+
" std::transform(arg0.begin(), arg0.end(), out.begin(), []("
+
element_type_names
[
TI
(
et
)]
+
element_type_names
[
TI
(
et
)]
+
">("
+
to_string
(
outputs
[
0
].
get_index
())
+
"::type x) -> "
+
element_type_names
[
TI
(
et
)]
+
"::type { return std::tanh(x); });
\n
"
")->get_vector();
\n
"
" std::transform(arg0.begin(), arg0.end(), out.begin(), []("
+
element_type_names
[
TI
(
et
)]
+
"::type x) -> "
+
element_type_names
[
TI
(
et
)]
+
"::type { return std::tanh(x); });
\n
"
" }
\n
"
;
" }
\n
"
;
}
}
...
...
src/ngraph/runtime/cpu/external_function.cpp
View file @
11b612ca
...
@@ -208,19 +208,18 @@ void ExternalFunction::compile(FunctionMap& function_map)
...
@@ -208,19 +208,18 @@ void ExternalFunction::compile(FunctionMap& function_map)
Emitter
emitter
;
Emitter
emitter
;
auto
&
TU
=
emitter
.
GetTU
();
auto
&
TU
=
emitter
.
GetTU
();
TU
+=
R"(// Generated by the NGraph CPU backend
TU
+=
R"(// Generated by the NGraph CPU backend
#include <memory>
#include <vector>
#include <algorithm>
#include <algorithm>
#include <cmath>
#include <cmath>
#include <memory>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/utils.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/cpu_kernels.hpp"
#include "ngraph/runtime/cpu/cpu_kernels.hpp"
#include "ngraph/runtime/cpu/eigen_utils.hpp"
#include "ngraph/runtime/cpu/eigen_utils.hpp"
#include "ngraph/runtime/utils.hpp"
using namespace ngraph::element;
using namespace ngraph::element;
using namespace ngraph::runtime;
using namespace ngraph::runtime;
...
@@ -281,8 +280,8 @@ extern "C" void __entrypoint(ngraph::runtime::cpu::CallFrame* call_frame,
...
@@ -281,8 +280,8 @@ extern "C" void __entrypoint(ngraph::runtime::cpu::CallFrame* call_frame,
assert
(
llvm_module
);
assert
(
llvm_module
);
estate
.
add_module
(
llvm_module
);
estate
.
add_module
(
llvm_module
);
estate
.
finalize
();
estate
.
finalize
();
compiled_function
=
estate
.
find_function
<
void
(
compiled_function
=
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
estate
.
find_function
<
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
ngraph
::
runtime
::
TensorViewPtrs
&
,
ngraph
::
runtime
::
TensorViewPtrs
&
,
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
)
>
(
"__entrypoint"
);
const
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
)
>
(
"__entrypoint"
);
assert
(
compiled_function
);
assert
(
compiled_function
);
...
...
src/ngraph/runtime/cpu/external_function.hpp
View file @
11b612ca
...
@@ -20,8 +20,8 @@
...
@@ -20,8 +20,8 @@
#include <typeinfo>
#include <typeinfo>
#include <unordered_map>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
...
@@ -47,7 +47,8 @@ namespace ngraph
...
@@ -47,7 +47,8 @@ namespace ngraph
using
OpMap
=
std
::
unordered_map
<
std
::
type_index
,
OpFunction
>
;
using
OpMap
=
std
::
unordered_map
<
std
::
type_index
,
OpFunction
>
;
using
EntryPoint
=
std
::
function
<
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
using
EntryPoint
=
std
::
function
<
void
(
ngraph
::
runtime
::
cpu
::
CallFrame
*
,
ngraph
::
runtime
::
TensorViewPtrs
&
,
ngraph
::
runtime
::
TensorViewPtrs
&
,
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
cpu
::
CallFrame
>>&
)
>
;
const
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
cpu
::
CallFrame
>>&
)
>
;
...
@@ -57,8 +58,7 @@ namespace ngraph
...
@@ -57,8 +58,7 @@ namespace ngraph
ExternalFunction
(
const
std
::
shared_ptr
<
ngraph
::
Function
>&
function
,
ExternalFunction
(
const
std
::
shared_ptr
<
ngraph
::
Function
>&
function
,
bool
release_function
=
true
);
bool
release_function
=
true
);
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
make_call_frame
();
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
make_call_frame
();
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>
&
get_callees
()
{
return
callees
;
}
std
::
vector
<
std
::
shared_ptr
<
CallFrame
>>&
get_callees
()
{
return
callees
;
}
protected
:
protected
:
void
compile
(
FunctionMap
&
function_map
);
void
compile
(
FunctionMap
&
function_map
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment