Commit 61df6725 authored by Rob Earhart's avatar Rob Earhart Committed by Robert Kimball

[PlaidML] Specialize within namespaces (for Linux) (#1948)

parent 5698fa75
...@@ -24,10 +24,16 @@ ...@@ -24,10 +24,16 @@
#include "ngraph/op/not_equal.hpp" #include "ngraph/op/not_equal.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Equal performs a simple elementwise equality. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Equal performs a simple elementwise equality.
template <>
void Impl<op::Equal>::operator()()
{
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -37,12 +43,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::operator()() ...@@ -37,12 +43,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::operator()()
.add(builder::Elementwise{"C", "A == B"}) .add(builder::Elementwise{"C", "A == B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// Greater performs a simple elementwise greater-than comparison. // Greater performs a simple elementwise greater-than comparison.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::operator()() void Impl<op::Greater>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -52,12 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::operator()() ...@@ -52,12 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::operator()()
.add(builder::Elementwise{"C", "A > B"}) .add(builder::Elementwise{"C", "A > B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// GreaterEq performs a simple elementwise greater-than-or-equal-to comparison. // GreaterEq performs a simple elementwise greater-than-or-equal-to comparison.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::operator()() void Impl<op::GreaterEq>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -67,12 +73,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::operator()() ...@@ -67,12 +73,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::operator()()
.add(builder::Elementwise{"C", "A >= B"}) .add(builder::Elementwise{"C", "A >= B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// Less performs a simple elementwise less-than comparison. // Less performs a simple elementwise less-than comparison.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Less>::operator()() void Impl<op::Less>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -82,12 +88,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Less>::operator()() ...@@ -82,12 +88,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Less>::operator()()
.add(builder::Elementwise{"C", "A < B"}) .add(builder::Elementwise{"C", "A < B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// LessEq performs a simple elementwise less-than-or-equal-to comparison. // LessEq performs a simple elementwise less-than-or-equal-to comparison.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::operator()() void Impl<op::LessEq>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -97,12 +103,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::operator()() ...@@ -97,12 +103,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::operator()()
.add(builder::Elementwise{"C", "A <= B"}) .add(builder::Elementwise{"C", "A <= B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// Maximum performs a simple elementwise maximum. // Maximum performs a simple elementwise maximum.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::operator()() void Impl<op::Maximum>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -111,12 +117,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::operator()() ...@@ -111,12 +117,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::operator()()
.add(builder::Output{"C"}) .add(builder::Output{"C"})
.add(builder::Elementwise{"C", "max(A, B)"}) .add(builder::Elementwise{"C", "max(A, B)"})
.finalize()); .finalize());
} }
// Minimum performs a simple elementwise minimum. // Minimum performs a simple elementwise minimum.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::operator()() void Impl<op::Minimum>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -125,12 +131,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::operator()() ...@@ -125,12 +131,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::operator()()
.add(builder::Output{"C"}) .add(builder::Output{"C"})
.add(builder::Elementwise{"C", "min(A, B)"}) .add(builder::Elementwise{"C", "min(A, B)"})
.finalize()); .finalize());
} }
// NotEqual performs a simple elementwise not-equality. // NotEqual performs a simple elementwise not-equality.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::operator()() void Impl<op::NotEqual>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -140,16 +146,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::operator()() ...@@ -140,16 +146,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::operator()()
.add(builder::Elementwise{"C", "A != B"}) .add(builder::Elementwise{"C", "A != B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::Registration register_equal; Impl<op::Equal>::Registration register_equal;
ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::Registration register_greater; Impl<op::Greater>::Registration register_greater;
ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::Registration register_greater_eq; Impl<op::GreaterEq>::Registration register_greater_eq;
ngraph::runtime::plaidml::Impl<ngraph::op::Less>::Registration register_less; Impl<op::Less>::Registration register_less;
ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::Registration register_less_eq; Impl<op::LessEq>::Registration register_less_eq;
ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::Registration register_maximum; Impl<op::Maximum>::Registration register_maximum;
ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::Registration register_minimum; Impl<op::Minimum>::Registration register_minimum;
ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::Registration register_not_equal; Impl<op::NotEqual>::Registration register_not_equal;
}
}
}
} }
...@@ -17,10 +17,16 @@ ...@@ -17,10 +17,16 @@
#include "ngraph/op/concat.hpp" #include "ngraph/op/concat.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Concat views a tensor as a new type. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Concat views a tensor as a new type.
template <>
void Impl<op::Concat>::operator()()
{
check_outputs(1); check_outputs(1);
auto f = start_tile_function(); auto f = start_tile_function();
...@@ -52,10 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()() ...@@ -52,10 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
continue; continue;
} }
std::string sidx{std::to_string(iidx)}; std::string sidx{std::to_string(iidx)};
f.add(builder::Input{op_input(iidx), "I" + sidx}.add_dims("I" + sidx + "_D", 0, dim_count)); f.add(builder::Input{op_input(iidx), "I" + sidx}.add_dims(
"I" + sidx + "_D", 0, dim_count));
f.add(builder::UnaryContraction{"="} f.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"E" + sidx} .set(builder::ContractionOutput{"E" + sidx}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_count; ++idx) for (std::size_t idx = 0; idx < dim_count; ++idx)
{ {
std::ostringstream s; std::ostringstream s;
...@@ -70,19 +78,22 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()() ...@@ -70,19 +78,22 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
} }
} }
}) })
.add_indices([&](std::back_insert_iterator<std::list<std::string>> out) { .add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_count; ++idx) for (std::size_t idx = 0; idx < dim_count; ++idx)
{ {
std::ostringstream s; std::ostringstream s;
s << "d" << idx; s << "d" << idx;
if (saw_non_zero_tensor && idx == op().get_concatenation_axis()) if (saw_non_zero_tensor &&
idx == op().get_concatenation_axis())
{ {
s << " + " << offset.str(); s << " + " << offset.str();
} }
out = s.str(); out = s.str();
} }
})) }))
.set(builder::ContractionInput{"I" + sidx}.add_indices("d", 0, dim_count))); .set(builder::ContractionInput{"I" + sidx}.add_indices(
"d", 0, dim_count)));
if (saw_non_zero_tensor) if (saw_non_zero_tensor)
{ {
oexpr << " + "; oexpr << " + ";
...@@ -95,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()() ...@@ -95,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
f.add(builder::Elementwise{"O", oexpr.str()}); f.add(builder::Elementwise{"O", oexpr.str()});
set_output(f.finalize()); set_output(f.finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::Registration register_concat; Impl<op::Concat>::Registration register_concat;
}
}
}
} }
...@@ -18,21 +18,31 @@ ...@@ -18,21 +18,31 @@
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
#include "ngraph/runtime/plaidml/plaidml_translate.hpp" #include "ngraph/runtime/plaidml/plaidml_translate.hpp"
// Convert views a tensor as a new type. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Convert>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Convert views a tensor as a new type.
template <>
void Impl<op::Convert>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(
start_tile_function()
.add(builder::Input{op_input(), "I"}) .add(builder::Input{op_input(), "I"})
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add(builder::Elementwise{ .add(builder::Elementwise{
"O", tile_converter("I", to_plaidml(op().get_convert_element_type()))}) "O", tile_converter("I", to_plaidml(op().get_convert_element_type()))})
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Convert>::Registration register_convert; Impl<op::Convert>::Registration register_convert;
}
}
}
} }
...@@ -50,32 +50,29 @@ namespace ngraph ...@@ -50,32 +50,29 @@ namespace ngraph
std::size_t output_channel_axis_result, std::size_t output_channel_axis_result,
bool rotate_filter); bool rotate_filter);
}; };
}
}
}
template <> template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::Convolution> struct ParentImpl<op::Convolution>
{ {
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::Convolution>; using Type = ConvolutionImpl<op::Convolution>;
}; };
template <> template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ConvolutionBackpropFilters> struct ParentImpl<op::ConvolutionBackpropFilters>
{ {
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::ConvolutionBackpropFilters>; using Type = ConvolutionImpl<op::ConvolutionBackpropFilters>;
}; };
template <> template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ConvolutionBackpropData> struct ParentImpl<op::ConvolutionBackpropData>
{ {
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::ConvolutionBackpropData>; using Type = ConvolutionImpl<op::ConvolutionBackpropData>;
}; };
// Convolution implements a standard ML convolultion, with optional striding, padding, and dilation. // Convolution implements a standard ML convolultion, with optional striding, padding, and dilation.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::operator()() void Impl<op::Convolution>::operator()()
{ {
this->check_inputs(2); this->check_inputs(2);
this->check_outputs(1); this->check_outputs(1);
...@@ -122,13 +119,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::operator()() ...@@ -122,13 +119,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::operator()()
.set_lhs(cpf.I_in_body()) .set_lhs(cpf.I_in_body())
.set_rhs(cpf.F_in_body())) .set_rhs(cpf.F_in_body()))
.finalize()); .finalize());
} }
// ConvolutionBackpropFilters implements the derivative of a convolution with respect to its filter // ConvolutionBackpropFilters implements the derivative of a convolution with respect to its filter
// input. // input.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::operator()() void Impl<op::ConvolutionBackpropFilters>::operator()()
{ {
this->check_inputs(2); this->check_inputs(2);
this->check_outputs(1); this->check_outputs(1);
...@@ -177,13 +174,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::ope ...@@ -177,13 +174,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::ope
.set_lhs(cpf.O_in_body()) .set_lhs(cpf.O_in_body())
.set_rhs(cpf.I_in_body())) .set_rhs(cpf.I_in_body()))
.finalize()); .finalize());
} }
// ConvolutionBackpropData implements the derivative of a convolution with respect to its data // ConvolutionBackpropData implements the derivative of a convolution with respect to its data
// input. // input.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::operator()() void Impl<op::ConvolutionBackpropData>::operator()()
{ {
this->check_inputs(2); this->check_inputs(2);
this->check_outputs(1); this->check_outputs(1);
...@@ -232,11 +229,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::operat ...@@ -232,11 +229,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::operat
.set_lhs(cpf.O_in_body()) .set_lhs(cpf.O_in_body())
.set_rhs(cpf.F_in_body())) .set_rhs(cpf.F_in_body()))
.finalize()); .finalize());
} }
template <typename O> template <typename O>
inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution( inline void ConvolutionImpl<O>::LogConvolution(vertexai::plaidml::variable image,
vertexai::plaidml::variable image,
vertexai::plaidml::variable filter, vertexai::plaidml::variable filter,
std::size_t image_dims, std::size_t image_dims,
const Strides& window_movement_strides, const Strides& window_movement_strides,
...@@ -251,7 +247,7 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution( ...@@ -251,7 +247,7 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution(
std::size_t batch_axis_result, std::size_t batch_axis_result,
std::size_t output_channel_axis_result, std::size_t output_channel_axis_result,
bool rotate_filter) bool rotate_filter)
{ {
this->check_inputs(2); this->check_inputs(2);
this->check_outputs(1); this->check_outputs(1);
...@@ -271,13 +267,15 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution( ...@@ -271,13 +267,15 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution(
NGRAPH_DEBUG << "batch_axis_result: " << batch_axis_result; NGRAPH_DEBUG << "batch_axis_result: " << batch_axis_result;
NGRAPH_DEBUG << "output_channel_axis_result: " << output_channel_axis_result; NGRAPH_DEBUG << "output_channel_axis_result: " << output_channel_axis_result;
NGRAPH_DEBUG << "rotate_filter: " << rotate_filter; NGRAPH_DEBUG << "rotate_filter: " << rotate_filter;
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::Registration register_convolution; Impl<op::Convolution>::Registration register_convolution;
ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::Registration Impl<op::ConvolutionBackpropFilters>::Registration
register_convolution_backprop_filters; register_convolution_backprop_filters;
ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::Registration Impl<op::ConvolutionBackpropData>::Registration register_convolution_backprop_data;
register_convolution_backprop_data; }
}
}
} }
...@@ -20,11 +20,17 @@ ...@@ -20,11 +20,17 @@
#include "ngraph/op/dot.hpp" #include "ngraph/op/dot.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Dot is a generalized dot product operation -- scalar-tensor, namespace ngraph
// matrix-vector, and matrix multiplication.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Dot is a generalized dot product operation -- scalar-tensor,
// matrix-vector, and matrix multiplication.
template <>
void Impl<op::Dot>::operator()()
{
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
...@@ -40,7 +46,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()() ...@@ -40,7 +46,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
NGRAPH_DEBUG << "l_dim_mac=" << l_dim_mac; NGRAPH_DEBUG << "l_dim_mac=" << l_dim_mac;
NGRAPH_DEBUG << "r_dim_mic=" << r_dim_mic; NGRAPH_DEBUG << "r_dim_mic=" << r_dim_mic;
set_output(start_tile_function() set_output(
start_tile_function()
.add(builder::Input{op_input(0), "L"} .add(builder::Input{op_input(0), "L"}
.add_dims("DL", 1, l_dim_mac + 1) .add_dims("DL", 1, l_dim_mac + 1)
.add_dims("DC", 1, reduce_limit + 1)) .add_dims("DC", 1, reduce_limit + 1))
...@@ -61,9 +68,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()() ...@@ -61,9 +68,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
.add_indices("dc", 1, reduce_limit + 1) .add_indices("dc", 1, reduce_limit + 1)
.add_indices("dr", r_dim_mic + 1, r_dim_limit + 1))) .add_indices("dr", r_dim_mic + 1, r_dim_limit + 1)))
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::Registration register_dot; Impl<op::Dot>::Registration register_dot;
}
}
}
} }
...@@ -19,10 +19,16 @@ ...@@ -19,10 +19,16 @@
#include "ngraph/runtime/plaidml/plaidml_compiler.hpp" #include "ngraph/runtime/plaidml/plaidml_compiler.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// FunctionCall invokes a sub-function. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// FunctionCall invokes a sub-function.
template <>
void Impl<op::FunctionCall>::operator()()
{
Build b; Build b;
build()->compiler->build(op().get_functions()[0], &b); build()->compiler->build(op().get_functions()[0], &b);
vertexai::plaidml::function f{b.composer}; vertexai::plaidml::function f{b.composer};
...@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()() ...@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
for (std::size_t idx = 0; idx < op().get_input_size(); ++idx) for (std::size_t idx = 0; idx < op().get_input_size(); ++idx)
{ {
auto* oitv = op().get_inputs()[idx].get_output().get_tensor_ptr().get(); auto* oitv = op().get_inputs()[idx].get_output().get_tensor_ptr().get();
auto* iitv = b.func->get_parameters()[idx]->get_outputs()[0].get_tensor_ptr().get(); auto* iitv =
b.func->get_parameters()[idx]->get_outputs()[0].get_tensor_ptr().get();
inputs.emplace_back(b.input_names.at(iitv), build()->bindings.at(oitv).var); inputs.emplace_back(b.input_names.at(iitv), build()->bindings.at(oitv).var);
} }
vertexai::plaidml::application app{f.apply(inputs)}; vertexai::plaidml::application app{f.apply(inputs)};
...@@ -39,9 +46,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()() ...@@ -39,9 +46,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
auto* iotv = b.func->get_results()[idx]->get_output_tensor_ptr().get(); auto* iotv = b.func->get_results()[idx]->get_output_tensor_ptr().get();
set_output(idx, app.get_output(b.output_names[iotv])); set_output(idx, app.get_output(b.output_names[iotv]));
} }
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::Registration register_function_call; Impl<op::FunctionCall>::Registration register_function_call;
}
}
}
} }
...@@ -36,13 +36,10 @@ namespace ngraph ...@@ -36,13 +36,10 @@ namespace ngraph
void build_index_reduction(const char* agg_op); void build_index_reduction(const char* agg_op);
}; };
}
}
}
template <typename O> template <typename O>
void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(const char* agg_op) void IndexReductionImpl<O>::build_index_reduction(const char* agg_op)
{ {
this->check_inputs(1); this->check_inputs(1);
this->check_outputs(1); this->check_outputs(1);
...@@ -56,16 +53,20 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons ...@@ -56,16 +53,20 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add( // Compute the maxes along the specified axis in the input .add( // Compute the maxes along the specified axis in the input
builder::UnaryContraction{agg_op} builder::UnaryContraction{agg_op}
.set(builder::ContractionOutput{"SelVal"} .set(
builder::ContractionOutput{"SelVal"}
.add_indices([&]( .add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) { std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx) for (auto idx = 0; idx < dim_limit; ++idx)
{ {
out = (idx == this->op().get_reduction_axis() ? "rd" : "d") + out =
(idx == this->op().get_reduction_axis() ? "rd"
: "d") +
std::to_string(idx); std::to_string(idx);
} }
}) })
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx) for (auto idx = 0; idx < dim_limit; ++idx)
{ {
if (idx == this->op().get_reduction_axis()) if (idx == this->op().get_reduction_axis())
...@@ -82,13 +83,14 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons ...@@ -82,13 +83,14 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
.add( // Compare the input against the (broadcasted) max values, and select the indices .add( // Compare the input against the (broadcasted) max values, and select the indices
// where the max val occurs // where the max val occurs
builder::Elementwise{"SelValIdxs", builder::Elementwise{"SelValIdxs",
"I == SelVal ? index(I, " + reduction_axis_str + ") : D" + "I == SelVal ? index(I, " + reduction_axis_str +
reduction_axis_str}) ") : D" + reduction_axis_str})
.add( // Select the maximum index .add( // Select the maximum index
builder::UnaryContraction{"<"} builder::UnaryContraction{"<"}
.set(builder::ContractionOutput{"SelIdx"} .set(
.add_indices( builder::ContractionOutput{"SelIdx"}
[&](std::back_insert_iterator<std::list<std::string>> out) { .add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx) for (auto idx = 0; idx < dim_limit; ++idx)
{ {
if (idx != this->op().get_reduction_axis()) if (idx != this->op().get_reduction_axis())
...@@ -97,7 +99,8 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons ...@@ -97,7 +99,8 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
} }
} }
}) })
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx) for (auto idx = 0; idx < dim_limit; ++idx)
{ {
if (idx != this->op().get_reduction_axis()) if (idx != this->op().get_reduction_axis())
...@@ -106,41 +109,45 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons ...@@ -106,41 +109,45 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
} }
} }
})) }))
.set(builder::ContractionInput{"SelValIdxs"}.add_indices("d", 0, dim_limit))) .set(builder::ContractionInput{"SelValIdxs"}.add_indices(
"d", 0, dim_limit)))
.add( // Convert to the requested output element type (if any) .add( // Convert to the requested output element type (if any)
builder::Elementwise{"O", builder::Elementwise{
tile_converter("SelIdx", this->op().get_index_element_type())}) "O", tile_converter("SelIdx", this->op().get_index_element_type())})
.finalize()); .finalize());
} }
template <> template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ArgMax> struct ParentImpl<op::ArgMax>
{ {
using Type = IndexReductionImpl<ngraph::op::ArgMax>; using Type = IndexReductionImpl<op::ArgMax>;
}; };
template <> template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ArgMin> struct ParentImpl<op::ArgMin>
{ {
using Type = IndexReductionImpl<ngraph::op::ArgMin>; using Type = IndexReductionImpl<op::ArgMin>;
}; };
// ArgMax computes the maximum index along a tensor axis. // ArgMax computes the maximum index along a tensor axis.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ArgMax>::operator()() void Impl<op::ArgMax>::operator()()
{ {
build_index_reduction(">"); build_index_reduction(">");
} }
// ArgMin computes the minimum index along a tensor axis. // ArgMin computes the minimum index along a tensor axis.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ArgMin>::operator()() void Impl<op::ArgMin>::operator()()
{ {
build_index_reduction("<"); build_index_reduction("<");
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::ArgMax>::Registration register_argmax; Impl<op::ArgMax>::Registration register_argmax;
ngraph::runtime::plaidml::Impl<ngraph::op::ArgMin>::Registration register_argmin; Impl<op::ArgMin>::Registration register_argmin;
}
}
}
} }
...@@ -20,10 +20,16 @@ ...@@ -20,10 +20,16 @@
namespace vp = vertexai::plaidml; namespace vp = vertexai::plaidml;
// Parameter binds a descriptor::Tensor to a PlaidML Placeholder. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Parameter binds a descriptor::Tensor to a PlaidML Placeholder.
template <>
void Impl<op::Parameter>::operator()()
{
check_inputs(0); check_inputs(0);
check_outputs(1); check_outputs(1);
vp::placeholder ph{build()->io_dim_override ? build()->io_dim_override_count vp::placeholder ph{build()->io_dim_override ? build()->io_dim_override_count
...@@ -33,22 +39,25 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::operator()() ...@@ -33,22 +39,25 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::operator()()
build()->bindings.emplace(tv, TensorInfo{ph, TensorContents::DATA}); build()->bindings.emplace(tv, TensorInfo{ph, TensorContents::DATA});
build()->composer.input(name, ph); build()->composer.input(name, ph);
build()->input_names.emplace(tv, std::move(name)); build()->input_names.emplace(tv, std::move(name));
} }
// Result binds a PlaidML variable to a composed function output. // Result binds a PlaidML variable to a composed function output.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Result>::operator()() void Impl<op::Result>::operator()()
{ {
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
std::string name = std::string{"O"} + std::to_string(build()->output_names.size()); std::string name = std::string{"O"} + std::to_string(build()->output_names.size());
descriptor::Tensor* tv = op().get_output_tensor_ptr().get(); descriptor::Tensor* tv = op().get_output_tensor_ptr().get();
build()->composer.output(name, op_input()); build()->composer.output(name, op_input());
build()->output_names.emplace(tv, std::move(name)); build()->output_names.emplace(tv, std::move(name));
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::Registration register_parameter; Impl<op::Parameter>::Registration register_parameter;
ngraph::runtime::plaidml::Impl<ngraph::op::Result>::Registration register_result; Impl<op::Result>::Registration register_result;
}
}
}
} }
...@@ -17,21 +17,29 @@ ...@@ -17,21 +17,29 @@
#include "ngraph/op/lrn.hpp" #include "ngraph/op/lrn.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// LRN implements Local Response Normalization namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// LRN implements Local Response Normalization
template <>
void Impl<op::LRN>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
auto dim_limit = op().get_inputs()[0].get_shape().size(); auto dim_limit = op().get_inputs()[0].get_shape().size();
auto rank = dim_limit - 2; auto rank = dim_limit - 2;
auto distance = op().get_nsize() / 2; auto distance = op().get_nsize() / 2;
std::ostringstream div_expr; std::ostringstream div_expr;
div_expr << "I / pow(" << op().get_bias() << ".0 + ((" << op().get_alpha() << ".0 / " div_expr << "I / pow(" << op().get_bias() << ".0 + ((" << op().get_alpha()
<< op().get_nsize() << ".0) * S), " << op().get_beta() << ".0)"; << ".0 / " << op().get_nsize() << ".0) * S), " << op().get_beta() << ".0)";
set_output( set_output(
start_tile_function() start_tile_function()
.add(builder::Input{op_input(), "I"}.add_dims({"N", "C"}).add_dims("D", 0, rank)) .add(builder::Input{op_input(), "I"}
.add_dims({"N", "C"})
.add_dims("D", 0, rank))
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add(builder::Elementwise{"ISQ", "I * I"}) .add(builder::Elementwise{"ISQ", "I * I"})
.add(builder::UnaryContraction{"+"} .add(builder::UnaryContraction{"+"}
...@@ -43,14 +51,18 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::operator()() ...@@ -43,14 +51,18 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::operator()()
.set(builder::ContractionInput{"ISQ"} .set(builder::ContractionInput{"ISQ"}
.add_indices({"n", "c + z - " + std::to_string(distance)}) .add_indices({"n", "c + z - " + std::to_string(distance)})
.add_indices("d", 0, rank)) .add_indices("d", 0, rank))
.add_constraints([&](std::back_insert_iterator<std::list<std::string>> out) { .add_constraints(
[&](std::back_insert_iterator<std::list<std::string>> out) {
out = "z < " + std::to_string(op().get_nsize()); out = "z < " + std::to_string(op().get_nsize());
})) }))
.add(builder::Elementwise{"O", div_expr.str()}) .add(builder::Elementwise{"O", div_expr.str()})
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::Registration register_local_response_norm; Impl<op::LRN>::Registration register_local_response_norm;
}
}
}
} }
...@@ -19,10 +19,16 @@ ...@@ -19,10 +19,16 @@
#include "ngraph/op/or.hpp" #include "ngraph/op/or.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// And performs a simple elementwise logical and. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::And>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// And performs a simple elementwise logical and.
template <>
void Impl<op::And>::operator()()
{
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -32,12 +38,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::And>::operator()() ...@@ -32,12 +38,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::And>::operator()()
.add(builder::Elementwise{"C", "A ? B : A"}) .add(builder::Elementwise{"C", "A ? B : A"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// Not performs a simple elementwise logical not. // Not performs a simple elementwise logical not.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Not>::operator()() void Impl<op::Not>::operator()()
{ {
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -46,12 +52,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Not>::operator()() ...@@ -46,12 +52,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Not>::operator()()
.add(builder::Elementwise{"O", "cmp_eq(I, 0)"}) .add(builder::Elementwise{"O", "cmp_eq(I, 0)"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
// Or performs a simple elementwise logical or. // Or performs a simple elementwise logical or.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Or>::operator()() void Impl<op::Or>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
set_output(start_tile_function() set_output(start_tile_function()
...@@ -61,11 +67,14 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Or>::operator()() ...@@ -61,11 +67,14 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Or>::operator()()
.add(builder::Elementwise{"C", "A ? A : B"}) .add(builder::Elementwise{"C", "A ? A : B"})
.finalize(), .finalize(),
TensorContents::LOGICAL); TensorContents::LOGICAL);
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::And>::Registration register_and; Impl<op::And>::Registration register_and;
ngraph::runtime::plaidml::Impl<ngraph::op::Not>::Registration register_not; Impl<op::Not>::Registration register_not;
ngraph::runtime::plaidml::Impl<ngraph::op::Or>::Registration register_or; Impl<op::Or>::Registration register_or;
}
}
}
} }
...@@ -20,10 +20,16 @@ ...@@ -20,10 +20,16 @@
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
#include "ngraph/runtime/plaidml/plaidml_translate.hpp" #include "ngraph/runtime/plaidml/plaidml_translate.hpp"
// OneHot performs one-hot encoding along the requested axis. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// OneHot performs one-hot encoding along the requested axis.
template <>
void Impl<op::OneHot>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -68,9 +74,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()() ...@@ -68,9 +74,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
.add(builder::Input{op_input(), "I"}.add_dims("D", 0, in_shape.size())) .add(builder::Input{op_input(), "I"}.add_dims("D", 0, in_shape.size()))
.add(builder::Input{static_cast<std::int64_t>(0), "Zero"}) .add(builder::Input{static_cast<std::int64_t>(0), "Zero"})
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add(builder::UnaryContraction{"="} .add(
.set(builder::ContractionOutput{"ZS"} builder::UnaryContraction{"="}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .set(
builder::ContractionOutput{"ZS"}
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < out_shape.size(); ++idx) for (std::size_t idx = 0; idx < out_shape.size(); ++idx)
{ {
if (idx == op().get_one_hot_axis()) if (idx == op().get_one_hot_axis())
...@@ -85,15 +94,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()() ...@@ -85,15 +94,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
}) })
.add_indices("d", 0, out_shape.size())) .add_indices("d", 0, out_shape.size()))
.set(builder::ContractionInput{"Zero"})) .set(builder::ContractionInput{"Zero"}))
.add(builder::Elementwise{"Idx", .add(builder::Elementwise{
"index(ZS, " + std::to_string(op().get_one_hot_axis()) + ")"}) "Idx", "index(ZS, " + std::to_string(op().get_one_hot_axis()) + ")"})
.add(builder::Elementwise{"IS", "reshape(I, " + in_reshape.str() + ")"}) .add(builder::Elementwise{"IS", "reshape(I, " + in_reshape.str() + ")"})
.add(builder::Elementwise{"OV", "IS == Idx ? 1 : 0"}) .add(builder::Elementwise{"OV", "IS == Idx ? 1 : 0"})
.add(builder::Elementwise{"O", tile_converter("OV", op().get_element_type())}) .add(builder::Elementwise{"O",
tile_converter("OV", op().get_element_type())})
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::Registration register_one_hot; Impl<op::OneHot>::Registration register_one_hot;
}
}
}
} }
...@@ -20,10 +20,16 @@ ...@@ -20,10 +20,16 @@
#include "ngraph/runtime/plaidml/plaidml_convpool_formatter.hpp" #include "ngraph/runtime/plaidml/plaidml_convpool_formatter.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// AvgPool implements a batch average pooling operation. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// AvgPool implements a batch average pooling operation.
template <>
void Impl<op::AvgPool>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -92,12 +98,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::operator()() ...@@ -92,12 +98,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::operator()()
f.add(cpf.PoolContraction()).add(builder::Elementwise{"O", "S / Count"}); f.add(cpf.PoolContraction()).add(builder::Elementwise{"O", "S / Count"});
set_output(f.finalize()); set_output(f.finalize());
} }
// MaxPool implements a batch max pooling operation. // MaxPool implements a batch max pooling operation.
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::operator()() void Impl<op::MaxPool>::operator()()
{ {
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -156,11 +162,11 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::operator()() ...@@ -156,11 +162,11 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::operator()()
.add(cpf.O_out_header()) .add(cpf.O_out_header())
.add(cpf.PoolContraction()) .add(cpf.PoolContraction())
.finalize()); .finalize());
} }
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()() void Impl<op::AvgPoolBackprop>::operator()()
{ {
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -174,7 +180,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()() ...@@ -174,7 +180,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()()
if (include_padding) if (include_padding)
{ {
throw std::runtime_error("Include padding in average not yet implemented in PlaidML"); throw std::runtime_error(
"Include padding in average not yet implemented in PlaidML");
} }
ngraph::CoordinateDiff pad_above; ngraph::CoordinateDiff pad_above;
...@@ -229,18 +236,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()() ...@@ -229,18 +236,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()()
{ {
std::ostringstream s; std::ostringstream s;
s << "XI" << i - 2; s << "XI" << i - 2;
ret.add(builder::Input{static_cast<std::int64_t>(forward_arg_shape[i]), s.str()}); ret.add(
builder::Input{static_cast<std::int64_t>(forward_arg_shape[i]), s.str()});
} }
set_output(ret.add(cpf.Broadcast_Ones()) set_output(ret.add(cpf.Broadcast_Ones())
.add(cpf.Count()) .add(cpf.Count())
.add(builder::Elementwise{"S", "DO / Count"}) .add(builder::Elementwise{"S", "DO / Count"})
.add(cpf.PoolContraction()) .add(cpf.PoolContraction())
.finalize()); .finalize());
} }
template <> template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::operator()() void Impl<op::MaxPoolBackprop>::operator()()
{ {
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
...@@ -299,14 +307,15 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::operator()() ...@@ -299,14 +307,15 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::operator()()
.add(cpf.PoolContraction()) .add(cpf.PoolContraction())
.add(cpf.PoolDerivContraction()) .add(cpf.PoolDerivContraction())
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::Registration register_avg_pool; Impl<op::AvgPool>::Registration register_avg_pool;
ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::Registration register_max_pool; Impl<op::MaxPool>::Registration register_max_pool;
ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::Registration Impl<op::AvgPoolBackprop>::Registration register_avg_pool_backprop;
register_avg_pool_backprop; Impl<op::MaxPoolBackprop>::Registration register_max_pool_backprop;
ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::Registration }
register_max_pool_backprop; }
}
} }
...@@ -19,10 +19,16 @@ ...@@ -19,10 +19,16 @@
#include "ngraph/op/replace_slice.hpp" #include "ngraph/op/replace_slice.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// ReplaceSlice replaces part of a tensor with another tensor. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// ReplaceSlice replaces part of a tensor with another tensor.
template <>
void Impl<op::ReplaceSlice>::operator()()
{
check_inputs(2); check_inputs(2);
check_outputs(1); check_outputs(1);
...@@ -43,11 +49,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()() ...@@ -43,11 +49,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
.add(builder::Input{op_input(0), "L"}.add_dims("D", 0, shape.size())) .add(builder::Input{op_input(0), "L"}.add_dims("D", 0, shape.size()))
.add(builder::Input{op_input(1), "S"}.add_dims("SD", 0, shape.size())) .add(builder::Input{op_input(1), "S"}.add_dims("SD", 0, shape.size()))
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add(builder::UnaryContraction{"="} .add(
.set(builder::ContractionOutput{"O"} builder::UnaryContraction{"="}
.set(
builder::ContractionOutput{"O"}
.add_dims("D", 0, shape.size()) .add_dims("D", 0, shape.size())
.add_indices( .add_indices([&](
[&](std::back_insert_iterator<std::list<std::string>> out) { std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx) for (std::size_t idx = 0; idx < shape.size(); ++idx)
{ {
auto stride = op().get_strides()[idx]; auto stride = op().get_strides()[idx];
...@@ -73,8 +81,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()() ...@@ -73,8 +81,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
out = didx.str(); out = didx.str();
} }
})) }))
.set(builder::ContractionInput{"S"}.add_indices("d", 0, shape.size())) .set(builder::ContractionInput{"S"}.add_indices(
.add_constraints([&](std::back_insert_iterator<std::list<std::string>> out) { "d", 0, shape.size()))
.add_constraints(
[&](std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx) for (std::size_t idx = 0; idx < shape.size(); ++idx)
{ {
out = "d" + std::to_string(idx) + " < " + out = "d" + std::to_string(idx) + " < " +
...@@ -84,9 +94,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()() ...@@ -84,9 +94,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
}) })
.set_default("L")) .set_default("L"))
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::Registration register_replace_slice; Impl<op::ReplaceSlice>::Registration register_replace_slice;
}
}
}
} }
...@@ -19,10 +19,16 @@ ...@@ -19,10 +19,16 @@
#include "ngraph/op/reverse.hpp" #include "ngraph/op/reverse.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Reverse reverses the selected axes within a tensor. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Reverse reverses the selected axes within a tensor.
template <>
void Impl<op::Reverse>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -35,8 +41,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()() ...@@ -35,8 +41,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
.set(builder::ContractionOutput{"O"} .set(builder::ContractionOutput{"O"}
.add_indices("d", 0, shape.size()) .add_indices("d", 0, shape.size())
.add_dims("D", 0, shape.size())) .add_dims("D", 0, shape.size()))
.set(builder::ContractionInput{"I"}.add_indices( .set(builder::ContractionInput{"I"}.add_indices([&](
[&](std::back_insert_iterator<std::list<std::string>> out) { std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx) for (std::size_t idx = 0; idx < shape.size(); ++idx)
{ {
auto sidx = std::to_string(idx); auto sidx = std::to_string(idx);
...@@ -51,9 +57,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()() ...@@ -51,9 +57,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
} }
}))) })))
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::Registration register_reverse; Impl<op::Reverse>::Registration register_reverse;
}
}
}
} }
...@@ -18,10 +18,16 @@ ...@@ -18,10 +18,16 @@
#include "ngraph/op/slice.hpp" #include "ngraph/op/slice.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Slice takes a sub-slice of a tensor. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Slice takes a sub-slice of a tensor.
template <>
void Impl<op::Slice>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
NGRAPH_DEBUG << "Slice: low: " << op().get_lower_bounds(); NGRAPH_DEBUG << "Slice: low: " << op().get_lower_bounds();
...@@ -33,17 +39,21 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()() ...@@ -33,17 +39,21 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
start_tile_function() start_tile_function()
.add(builder::Input{op_input(), "I"}.add_dims("ID", 0, dim_limit)) .add(builder::Input{op_input(), "I"}.add_dims("ID", 0, dim_limit))
.add(builder::Output{"O"}) .add(builder::Output{"O"})
.add(builder::UnaryContraction{"="} .add(
.set(builder::ContractionOutput{"O"} builder::UnaryContraction{"="}
.set(
builder::ContractionOutput{"O"}
.add_indices("od", 0, dim_limit) .add_indices("od", 0, dim_limit)
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_limit; ++idx) for (std::size_t idx = 0; idx < dim_limit; ++idx)
{ {
std::ostringstream s; std::ostringstream s;
std::size_t stride = op().get_strides()[idx]; std::size_t stride = op().get_strides()[idx];
std::ptrdiff_t trim_count = std::ptrdiff_t trim_count =
op().get_lower_bounds()[idx] + op().get_lower_bounds()[idx] +
(shape[idx] - op().get_upper_bounds()[idx]) + 1 - stride; (shape[idx] - op().get_upper_bounds()[idx]) +
1 - stride;
if ((stride != 1) && trim_count) if ((stride != 1) && trim_count)
{ {
s << "("; s << "(";
...@@ -96,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()() ...@@ -96,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
} }
}))) })))
.finalize()); .finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::Registration register_slice; Impl<op::Slice>::Registration register_slice;
}
}
}
} }
...@@ -19,10 +19,16 @@ ...@@ -19,10 +19,16 @@
#include "ngraph/op/softmax.hpp" #include "ngraph/op/softmax.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp" #include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Softmax implements a standard ML softmax operation. namespace ngraph
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
{ {
namespace runtime
{
namespace plaidml
{
// Softmax implements a standard ML softmax operation.
template <>
void Impl<op::Softmax>::operator()()
{
check_inputs(1); check_inputs(1);
check_outputs(1); check_outputs(1);
...@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()() ...@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
auto dim_limit = shape.size(); auto dim_limit = shape.size();
auto f = start_tile_function(); auto f = start_tile_function();
f.add(builder::Input{op_input(0), "I"}.add_dims("D", 0, dim_limit)).add(builder::Output{"O"}); f.add(builder::Input{op_input(0), "I"}.add_dims("D", 0, dim_limit))
.add(builder::Output{"O"});
bool reorder_needed = false; bool reorder_needed = false;
bool saw_element = false; bool saw_element = false;
...@@ -71,7 +78,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()() ...@@ -71,7 +78,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
{ {
f.add(builder::UnaryContraction{"="} f.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"RI"} .set(builder::ContractionOutput{"RI"}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) { .add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx : group_idxs) for (auto idx : group_idxs)
{ {
out = "D" + std::to_string(idx); out = "D" + std::to_string(idx);
...@@ -81,7 +89,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()() ...@@ -81,7 +89,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
out = "D" + std::to_string(idx); out = "D" + std::to_string(idx);
} }
}) })
.add_indices([&](std::back_insert_iterator<std::list<std::string>> out) { .add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx : group_idxs) for (auto idx : group_idxs)
{ {
out = "d" + std::to_string(idx); out = "d" + std::to_string(idx);
...@@ -117,7 +126,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()() ...@@ -117,7 +126,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
{ {
// Take the softmax. // Take the softmax.
std::ostringstream softmax; std::ostringstream softmax;
softmax << "builtin_softmax(" << input << ", " << groups << ", " << elements << ")"; softmax << "builtin_softmax(" << input << ", " << groups << ", " << elements
<< ")";
f.add(builder::Elementwise{output, softmax.str()}); f.add(builder::Elementwise{output, softmax.str()});
} }
...@@ -159,9 +169,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()() ...@@ -159,9 +169,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
} }
set_output(f.finalize()); set_output(f.finalize());
} }
namespace namespace
{ {
ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::Registration register_softmax; Impl<op::Softmax>::Registration register_softmax;
}
}
}
} }
...@@ -38,6 +38,8 @@ topk_2d_max_one # No plans to implement TopK ...@@ -38,6 +38,8 @@ topk_2d_max_one # No plans to implement TopK
topk_2d_min_all # No plans to implement TopK topk_2d_min_all # No plans to implement TopK
topk_2d_min_partial # No plans to implement TopK topk_2d_min_partial # No plans to implement TopK
topk_2d_min_one # No plans to implement TopK topk_2d_min_one # No plans to implement TopK
topk_int64 # No plans to implement TopK
topk_5d_max_partial # No plans to implement TopK
# Tests that PlaidML might be able to run at some point. # Tests that PlaidML might be able to run at some point.
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3 backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
...@@ -84,3 +86,5 @@ sum_3d_eliminate_zero_dim # Empty dims apparently should produce shape ...@@ -84,3 +86,5 @@ sum_3d_eliminate_zero_dim # Empty dims apparently should produce shape
dot_0_0 # Empty dims apparently should produce shaped 0s dot_0_0 # Empty dims apparently should produce shaped 0s
dot_matrix_2x0_0x2 # Empty dims apparently should produce shaped 0s dot_matrix_2x0_0x2 # Empty dims apparently should produce shaped 0s
dot_2x0_0 # Empty dims apparently should produce shaped 0s dot_2x0_0 # Empty dims apparently should produce shaped 0s
numeric_float_nan
numeric_double_nan
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment