Commit 61df6725 authored by Rob Earhart's avatar Rob Earhart Committed by Robert Kimball

[PlaidML] Specialize within namespaces (for Linux) (#1948)

parent 5698fa75
......@@ -24,10 +24,16 @@
#include "ngraph/op/not_equal.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Equal performs a simple elementwise equality.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Equal performs a simple elementwise equality.
template <>
void Impl<op::Equal>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -37,12 +43,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::operator()()
.add(builder::Elementwise{"C", "A == B"})
.finalize(),
TensorContents::LOGICAL);
}
}
// Greater performs a simple elementwise greater-than comparison.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::operator()()
{
// Greater performs a simple elementwise greater-than comparison.
template <>
void Impl<op::Greater>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -52,12 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::operator()()
.add(builder::Elementwise{"C", "A > B"})
.finalize(),
TensorContents::LOGICAL);
}
}
// GreaterEq performs a simple elementwise greater-than-or-equal-to comparison.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::operator()()
{
// GreaterEq performs a simple elementwise greater-than-or-equal-to comparison.
template <>
void Impl<op::GreaterEq>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -67,12 +73,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::operator()()
.add(builder::Elementwise{"C", "A >= B"})
.finalize(),
TensorContents::LOGICAL);
}
}
// Less performs a simple elementwise less-than comparison.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Less>::operator()()
{
// Less performs a simple elementwise less-than comparison.
template <>
void Impl<op::Less>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -82,12 +88,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Less>::operator()()
.add(builder::Elementwise{"C", "A < B"})
.finalize(),
TensorContents::LOGICAL);
}
}
// LessEq performs a simple elementwise less-than-or-equal-to comparison.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::operator()()
{
// LessEq performs a simple elementwise less-than-or-equal-to comparison.
template <>
void Impl<op::LessEq>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -97,12 +103,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::operator()()
.add(builder::Elementwise{"C", "A <= B"})
.finalize(),
TensorContents::LOGICAL);
}
}
// Maximum performs a simple elementwise maximum.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::operator()()
{
// Maximum performs a simple elementwise maximum.
template <>
void Impl<op::Maximum>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -111,12 +117,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::operator()()
.add(builder::Output{"C"})
.add(builder::Elementwise{"C", "max(A, B)"})
.finalize());
}
}
// Minimum performs a simple elementwise minimum.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::operator()()
{
// Minimum performs a simple elementwise minimum.
template <>
void Impl<op::Minimum>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -125,12 +131,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::operator()()
.add(builder::Output{"C"})
.add(builder::Elementwise{"C", "min(A, B)"})
.finalize());
}
}
// NotEqual performs a simple elementwise not-equality.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::operator()()
{
// NotEqual performs a simple elementwise not-equality.
template <>
void Impl<op::NotEqual>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -140,16 +146,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::operator()()
.add(builder::Elementwise{"C", "A != B"})
.finalize(),
TensorContents::LOGICAL);
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Equal>::Registration register_equal;
ngraph::runtime::plaidml::Impl<ngraph::op::Greater>::Registration register_greater;
ngraph::runtime::plaidml::Impl<ngraph::op::GreaterEq>::Registration register_greater_eq;
ngraph::runtime::plaidml::Impl<ngraph::op::Less>::Registration register_less;
ngraph::runtime::plaidml::Impl<ngraph::op::LessEq>::Registration register_less_eq;
ngraph::runtime::plaidml::Impl<ngraph::op::Maximum>::Registration register_maximum;
ngraph::runtime::plaidml::Impl<ngraph::op::Minimum>::Registration register_minimum;
ngraph::runtime::plaidml::Impl<ngraph::op::NotEqual>::Registration register_not_equal;
namespace
{
Impl<op::Equal>::Registration register_equal;
Impl<op::Greater>::Registration register_greater;
Impl<op::GreaterEq>::Registration register_greater_eq;
Impl<op::Less>::Registration register_less;
Impl<op::LessEq>::Registration register_less_eq;
Impl<op::Maximum>::Registration register_maximum;
Impl<op::Minimum>::Registration register_minimum;
Impl<op::NotEqual>::Registration register_not_equal;
}
}
}
}
......@@ -17,10 +17,16 @@
#include "ngraph/op/concat.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Concat views a tensor as a new type.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Concat views a tensor as a new type.
template <>
void Impl<op::Concat>::operator()()
{
check_outputs(1);
auto f = start_tile_function();
......@@ -52,10 +58,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
continue;
}
std::string sidx{std::to_string(iidx)};
f.add(builder::Input{op_input(iidx), "I" + sidx}.add_dims("I" + sidx + "_D", 0, dim_count));
f.add(builder::Input{op_input(iidx), "I" + sidx}.add_dims(
"I" + sidx + "_D", 0, dim_count));
f.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"E" + sidx}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_count; ++idx)
{
std::ostringstream s;
......@@ -70,19 +78,22 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
}
}
})
.add_indices([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_count; ++idx)
{
std::ostringstream s;
s << "d" << idx;
if (saw_non_zero_tensor && idx == op().get_concatenation_axis())
if (saw_non_zero_tensor &&
idx == op().get_concatenation_axis())
{
s << " + " << offset.str();
}
out = s.str();
}
}))
.set(builder::ContractionInput{"I" + sidx}.add_indices("d", 0, dim_count)));
.set(builder::ContractionInput{"I" + sidx}.add_indices(
"d", 0, dim_count)));
if (saw_non_zero_tensor)
{
oexpr << " + ";
......@@ -95,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::operator()()
f.add(builder::Elementwise{"O", oexpr.str()});
set_output(f.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Concat>::Registration register_concat;
namespace
{
Impl<op::Concat>::Registration register_concat;
}
}
}
}
......@@ -18,21 +18,31 @@
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
#include "ngraph/runtime/plaidml/plaidml_translate.hpp"
// Convert views a tensor as a new type.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Convert>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Convert views a tensor as a new type.
template <>
void Impl<op::Convert>::operator()()
{
check_inputs(1);
check_outputs(1);
set_output(start_tile_function()
set_output(
start_tile_function()
.add(builder::Input{op_input(), "I"})
.add(builder::Output{"O"})
.add(builder::Elementwise{
"O", tile_converter("I", to_plaidml(op().get_convert_element_type()))})
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Convert>::Registration register_convert;
namespace
{
Impl<op::Convert>::Registration register_convert;
}
}
}
}
......@@ -50,32 +50,29 @@ namespace ngraph
std::size_t output_channel_axis_result,
bool rotate_filter);
};
}
}
}
template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::Convolution>
{
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::Convolution>;
};
template <>
struct ParentImpl<op::Convolution>
{
using Type = ConvolutionImpl<op::Convolution>;
};
template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ConvolutionBackpropFilters>
{
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::ConvolutionBackpropFilters>;
};
template <>
struct ParentImpl<op::ConvolutionBackpropFilters>
{
using Type = ConvolutionImpl<op::ConvolutionBackpropFilters>;
};
template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ConvolutionBackpropData>
{
using Type = ngraph::runtime::plaidml::ConvolutionImpl<ngraph::op::ConvolutionBackpropData>;
};
template <>
struct ParentImpl<op::ConvolutionBackpropData>
{
using Type = ConvolutionImpl<op::ConvolutionBackpropData>;
};
// Convolution implements a standard ML convolultion, with optional striding, padding, and dilation.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::operator()()
{
// Convolution implements a standard ML convolultion, with optional striding, padding, and dilation.
template <>
void Impl<op::Convolution>::operator()()
{
this->check_inputs(2);
this->check_outputs(1);
......@@ -122,13 +119,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::operator()()
.set_lhs(cpf.I_in_body())
.set_rhs(cpf.F_in_body()))
.finalize());
}
}
// ConvolutionBackpropFilters implements the derivative of a convolution with respect to its filter
// input.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::operator()()
{
// ConvolutionBackpropFilters implements the derivative of a convolution with respect to its filter
// input.
template <>
void Impl<op::ConvolutionBackpropFilters>::operator()()
{
this->check_inputs(2);
this->check_outputs(1);
......@@ -177,13 +174,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::ope
.set_lhs(cpf.O_in_body())
.set_rhs(cpf.I_in_body()))
.finalize());
}
}
// ConvolutionBackpropData implements the derivative of a convolution with respect to its data
// input.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::operator()()
{
// ConvolutionBackpropData implements the derivative of a convolution with respect to its data
// input.
template <>
void Impl<op::ConvolutionBackpropData>::operator()()
{
this->check_inputs(2);
this->check_outputs(1);
......@@ -232,11 +229,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::operat
.set_lhs(cpf.O_in_body())
.set_rhs(cpf.F_in_body()))
.finalize());
}
}
template <typename O>
inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution(
vertexai::plaidml::variable image,
template <typename O>
inline void ConvolutionImpl<O>::LogConvolution(vertexai::plaidml::variable image,
vertexai::plaidml::variable filter,
std::size_t image_dims,
const Strides& window_movement_strides,
......@@ -251,7 +247,7 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution(
std::size_t batch_axis_result,
std::size_t output_channel_axis_result,
bool rotate_filter)
{
{
this->check_inputs(2);
this->check_outputs(1);
......@@ -271,13 +267,15 @@ inline void ngraph::runtime::plaidml::ConvolutionImpl<O>::LogConvolution(
NGRAPH_DEBUG << "batch_axis_result: " << batch_axis_result;
NGRAPH_DEBUG << "output_channel_axis_result: " << output_channel_axis_result;
NGRAPH_DEBUG << "rotate_filter: " << rotate_filter;
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Convolution>::Registration register_convolution;
ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropFilters>::Registration
namespace
{
Impl<op::Convolution>::Registration register_convolution;
Impl<op::ConvolutionBackpropFilters>::Registration
register_convolution_backprop_filters;
ngraph::runtime::plaidml::Impl<ngraph::op::ConvolutionBackpropData>::Registration
register_convolution_backprop_data;
Impl<op::ConvolutionBackpropData>::Registration register_convolution_backprop_data;
}
}
}
}
......@@ -20,11 +20,17 @@
#include "ngraph/op/dot.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Dot is a generalized dot product operation -- scalar-tensor,
// matrix-vector, and matrix multiplication.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Dot is a generalized dot product operation -- scalar-tensor,
// matrix-vector, and matrix multiplication.
template <>
void Impl<op::Dot>::operator()()
{
check_inputs(2);
check_outputs(1);
......@@ -40,7 +46,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
NGRAPH_DEBUG << "l_dim_mac=" << l_dim_mac;
NGRAPH_DEBUG << "r_dim_mic=" << r_dim_mic;
set_output(start_tile_function()
set_output(
start_tile_function()
.add(builder::Input{op_input(0), "L"}
.add_dims("DL", 1, l_dim_mac + 1)
.add_dims("DC", 1, reduce_limit + 1))
......@@ -61,9 +68,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::operator()()
.add_indices("dc", 1, reduce_limit + 1)
.add_indices("dr", r_dim_mic + 1, r_dim_limit + 1)))
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Dot>::Registration register_dot;
namespace
{
Impl<op::Dot>::Registration register_dot;
}
}
}
}
......@@ -19,10 +19,16 @@
#include "ngraph/runtime/plaidml/plaidml_compiler.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// FunctionCall invokes a sub-function.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// FunctionCall invokes a sub-function.
template <>
void Impl<op::FunctionCall>::operator()()
{
Build b;
build()->compiler->build(op().get_functions()[0], &b);
vertexai::plaidml::function f{b.composer};
......@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
for (std::size_t idx = 0; idx < op().get_input_size(); ++idx)
{
auto* oitv = op().get_inputs()[idx].get_output().get_tensor_ptr().get();
auto* iitv = b.func->get_parameters()[idx]->get_outputs()[0].get_tensor_ptr().get();
auto* iitv =
b.func->get_parameters()[idx]->get_outputs()[0].get_tensor_ptr().get();
inputs.emplace_back(b.input_names.at(iitv), build()->bindings.at(oitv).var);
}
vertexai::plaidml::application app{f.apply(inputs)};
......@@ -39,9 +46,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::operator()()
auto* iotv = b.func->get_results()[idx]->get_output_tensor_ptr().get();
set_output(idx, app.get_output(b.output_names[iotv]));
}
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::FunctionCall>::Registration register_function_call;
namespace
{
Impl<op::FunctionCall>::Registration register_function_call;
}
}
}
}
......@@ -36,13 +36,10 @@ namespace ngraph
void build_index_reduction(const char* agg_op);
};
}
}
}
template <typename O>
void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(const char* agg_op)
{
template <typename O>
void IndexReductionImpl<O>::build_index_reduction(const char* agg_op)
{
this->check_inputs(1);
this->check_outputs(1);
......@@ -56,16 +53,20 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
.add(builder::Output{"O"})
.add( // Compute the maxes along the specified axis in the input
builder::UnaryContraction{agg_op}
.set(builder::ContractionOutput{"SelVal"}
.set(
builder::ContractionOutput{"SelVal"}
.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx)
{
out = (idx == this->op().get_reduction_axis() ? "rd" : "d") +
out =
(idx == this->op().get_reduction_axis() ? "rd"
: "d") +
std::to_string(idx);
}
})
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx)
{
if (idx == this->op().get_reduction_axis())
......@@ -82,13 +83,14 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
.add( // Compare the input against the (broadcasted) max values, and select the indices
// where the max val occurs
builder::Elementwise{"SelValIdxs",
"I == SelVal ? index(I, " + reduction_axis_str + ") : D" +
reduction_axis_str})
"I == SelVal ? index(I, " + reduction_axis_str +
") : D" + reduction_axis_str})
.add( // Select the maximum index
builder::UnaryContraction{"<"}
.set(builder::ContractionOutput{"SelIdx"}
.add_indices(
[&](std::back_insert_iterator<std::list<std::string>> out) {
.set(
builder::ContractionOutput{"SelIdx"}
.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx)
{
if (idx != this->op().get_reduction_axis())
......@@ -97,7 +99,8 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
}
}
})
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx = 0; idx < dim_limit; ++idx)
{
if (idx != this->op().get_reduction_axis())
......@@ -106,41 +109,45 @@ void ngraph::runtime::plaidml::IndexReductionImpl<O>::build_index_reduction(cons
}
}
}))
.set(builder::ContractionInput{"SelValIdxs"}.add_indices("d", 0, dim_limit)))
.set(builder::ContractionInput{"SelValIdxs"}.add_indices(
"d", 0, dim_limit)))
.add( // Convert to the requested output element type (if any)
builder::Elementwise{"O",
tile_converter("SelIdx", this->op().get_index_element_type())})
builder::Elementwise{
"O", tile_converter("SelIdx", this->op().get_index_element_type())})
.finalize());
}
}
template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ArgMax>
{
using Type = IndexReductionImpl<ngraph::op::ArgMax>;
};
template <>
struct ParentImpl<op::ArgMax>
{
using Type = IndexReductionImpl<op::ArgMax>;
};
template <>
struct ngraph::runtime::plaidml::ParentImpl<ngraph::op::ArgMin>
{
using Type = IndexReductionImpl<ngraph::op::ArgMin>;
};
template <>
struct ParentImpl<op::ArgMin>
{
using Type = IndexReductionImpl<op::ArgMin>;
};
// ArgMax computes the maximum index along a tensor axis.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ArgMax>::operator()()
{
// ArgMax computes the maximum index along a tensor axis.
template <>
void Impl<op::ArgMax>::operator()()
{
build_index_reduction(">");
}
}
// ArgMin computes the minimum index along a tensor axis.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ArgMin>::operator()()
{
// ArgMin computes the minimum index along a tensor axis.
template <>
void Impl<op::ArgMin>::operator()()
{
build_index_reduction("<");
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::ArgMax>::Registration register_argmax;
ngraph::runtime::plaidml::Impl<ngraph::op::ArgMin>::Registration register_argmin;
namespace
{
Impl<op::ArgMax>::Registration register_argmax;
Impl<op::ArgMin>::Registration register_argmin;
}
}
}
}
......@@ -20,10 +20,16 @@
namespace vp = vertexai::plaidml;
// Parameter binds a descriptor::Tensor to a PlaidML Placeholder.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Parameter binds a descriptor::Tensor to a PlaidML Placeholder.
template <>
void Impl<op::Parameter>::operator()()
{
check_inputs(0);
check_outputs(1);
vp::placeholder ph{build()->io_dim_override ? build()->io_dim_override_count
......@@ -33,22 +39,25 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::operator()()
build()->bindings.emplace(tv, TensorInfo{ph, TensorContents::DATA});
build()->composer.input(name, ph);
build()->input_names.emplace(tv, std::move(name));
}
}
// Result binds a PlaidML variable to a composed function output.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Result>::operator()()
{
// Result binds a PlaidML variable to a composed function output.
template <>
void Impl<op::Result>::operator()()
{
check_inputs(1);
check_outputs(1);
std::string name = std::string{"O"} + std::to_string(build()->output_names.size());
descriptor::Tensor* tv = op().get_output_tensor_ptr().get();
build()->composer.output(name, op_input());
build()->output_names.emplace(tv, std::move(name));
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Parameter>::Registration register_parameter;
ngraph::runtime::plaidml::Impl<ngraph::op::Result>::Registration register_result;
namespace
{
Impl<op::Parameter>::Registration register_parameter;
Impl<op::Result>::Registration register_result;
}
}
}
}
......@@ -17,21 +17,29 @@
#include "ngraph/op/lrn.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// LRN implements Local Response Normalization
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// LRN implements Local Response Normalization
template <>
void Impl<op::LRN>::operator()()
{
check_inputs(1);
check_outputs(1);
auto dim_limit = op().get_inputs()[0].get_shape().size();
auto rank = dim_limit - 2;
auto distance = op().get_nsize() / 2;
std::ostringstream div_expr;
div_expr << "I / pow(" << op().get_bias() << ".0 + ((" << op().get_alpha() << ".0 / "
<< op().get_nsize() << ".0) * S), " << op().get_beta() << ".0)";
div_expr << "I / pow(" << op().get_bias() << ".0 + ((" << op().get_alpha()
<< ".0 / " << op().get_nsize() << ".0) * S), " << op().get_beta() << ".0)";
set_output(
start_tile_function()
.add(builder::Input{op_input(), "I"}.add_dims({"N", "C"}).add_dims("D", 0, rank))
.add(builder::Input{op_input(), "I"}
.add_dims({"N", "C"})
.add_dims("D", 0, rank))
.add(builder::Output{"O"})
.add(builder::Elementwise{"ISQ", "I * I"})
.add(builder::UnaryContraction{"+"}
......@@ -43,14 +51,18 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::operator()()
.set(builder::ContractionInput{"ISQ"}
.add_indices({"n", "c + z - " + std::to_string(distance)})
.add_indices("d", 0, rank))
.add_constraints([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_constraints(
[&](std::back_insert_iterator<std::list<std::string>> out) {
out = "z < " + std::to_string(op().get_nsize());
}))
.add(builder::Elementwise{"O", div_expr.str()})
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::LRN>::Registration register_local_response_norm;
namespace
{
Impl<op::LRN>::Registration register_local_response_norm;
}
}
}
}
......@@ -19,10 +19,16 @@
#include "ngraph/op/or.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// And performs a simple elementwise logical and.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::And>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// And performs a simple elementwise logical and.
template <>
void Impl<op::And>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -32,12 +38,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::And>::operator()()
.add(builder::Elementwise{"C", "A ? B : A"})
.finalize(),
TensorContents::LOGICAL);
}
}
// Not performs a simple elementwise logical not.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Not>::operator()()
{
// Not performs a simple elementwise logical not.
template <>
void Impl<op::Not>::operator()()
{
check_inputs(1);
check_outputs(1);
set_output(start_tile_function()
......@@ -46,12 +52,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Not>::operator()()
.add(builder::Elementwise{"O", "cmp_eq(I, 0)"})
.finalize(),
TensorContents::LOGICAL);
}
}
// Or performs a simple elementwise logical or.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Or>::operator()()
{
// Or performs a simple elementwise logical or.
template <>
void Impl<op::Or>::operator()()
{
check_inputs(2);
check_outputs(1);
set_output(start_tile_function()
......@@ -61,11 +67,14 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Or>::operator()()
.add(builder::Elementwise{"C", "A ? A : B"})
.finalize(),
TensorContents::LOGICAL);
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::And>::Registration register_and;
ngraph::runtime::plaidml::Impl<ngraph::op::Not>::Registration register_not;
ngraph::runtime::plaidml::Impl<ngraph::op::Or>::Registration register_or;
namespace
{
Impl<op::And>::Registration register_and;
Impl<op::Not>::Registration register_not;
Impl<op::Or>::Registration register_or;
}
}
}
}
......@@ -20,10 +20,16 @@
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
#include "ngraph/runtime/plaidml/plaidml_translate.hpp"
// OneHot performs one-hot encoding along the requested axis.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// OneHot performs one-hot encoding along the requested axis.
template <>
void Impl<op::OneHot>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -68,9 +74,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
.add(builder::Input{op_input(), "I"}.add_dims("D", 0, in_shape.size()))
.add(builder::Input{static_cast<std::int64_t>(0), "Zero"})
.add(builder::Output{"O"})
.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"ZS"}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add(
builder::UnaryContraction{"="}
.set(
builder::ContractionOutput{"ZS"}
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < out_shape.size(); ++idx)
{
if (idx == op().get_one_hot_axis())
......@@ -85,15 +94,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::operator()()
})
.add_indices("d", 0, out_shape.size()))
.set(builder::ContractionInput{"Zero"}))
.add(builder::Elementwise{"Idx",
"index(ZS, " + std::to_string(op().get_one_hot_axis()) + ")"})
.add(builder::Elementwise{
"Idx", "index(ZS, " + std::to_string(op().get_one_hot_axis()) + ")"})
.add(builder::Elementwise{"IS", "reshape(I, " + in_reshape.str() + ")"})
.add(builder::Elementwise{"OV", "IS == Idx ? 1 : 0"})
.add(builder::Elementwise{"O", tile_converter("OV", op().get_element_type())})
.add(builder::Elementwise{"O",
tile_converter("OV", op().get_element_type())})
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::OneHot>::Registration register_one_hot;
namespace
{
Impl<op::OneHot>::Registration register_one_hot;
}
}
}
}
......@@ -20,10 +20,16 @@
#include "ngraph/runtime/plaidml/plaidml_convpool_formatter.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// AvgPool implements a batch average pooling operation.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// AvgPool implements a batch average pooling operation.
template <>
void Impl<op::AvgPool>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -92,12 +98,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::operator()()
f.add(cpf.PoolContraction()).add(builder::Elementwise{"O", "S / Count"});
set_output(f.finalize());
}
}
// MaxPool implements a batch max pooling operation.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::operator()()
{
// MaxPool implements a batch max pooling operation.
template <>
void Impl<op::MaxPool>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -156,11 +162,11 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::operator()()
.add(cpf.O_out_header())
.add(cpf.PoolContraction())
.finalize());
}
}
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()()
{
template <>
void Impl<op::AvgPoolBackprop>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -174,7 +180,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()()
if (include_padding)
{
throw std::runtime_error("Include padding in average not yet implemented in PlaidML");
throw std::runtime_error(
"Include padding in average not yet implemented in PlaidML");
}
ngraph::CoordinateDiff pad_above;
......@@ -229,18 +236,19 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::operator()()
{
std::ostringstream s;
s << "XI" << i - 2;
ret.add(builder::Input{static_cast<std::int64_t>(forward_arg_shape[i]), s.str()});
ret.add(
builder::Input{static_cast<std::int64_t>(forward_arg_shape[i]), s.str()});
}
set_output(ret.add(cpf.Broadcast_Ones())
.add(cpf.Count())
.add(builder::Elementwise{"S", "DO / Count"})
.add(cpf.PoolContraction())
.finalize());
}
}
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::operator()()
{
template <>
void Impl<op::MaxPoolBackprop>::operator()()
{
check_inputs(2);
check_outputs(1);
......@@ -299,14 +307,15 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::operator()()
.add(cpf.PoolContraction())
.add(cpf.PoolDerivContraction())
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::AvgPool>::Registration register_avg_pool;
ngraph::runtime::plaidml::Impl<ngraph::op::MaxPool>::Registration register_max_pool;
ngraph::runtime::plaidml::Impl<ngraph::op::AvgPoolBackprop>::Registration
register_avg_pool_backprop;
ngraph::runtime::plaidml::Impl<ngraph::op::MaxPoolBackprop>::Registration
register_max_pool_backprop;
namespace
{
Impl<op::AvgPool>::Registration register_avg_pool;
Impl<op::MaxPool>::Registration register_max_pool;
Impl<op::AvgPoolBackprop>::Registration register_avg_pool_backprop;
Impl<op::MaxPoolBackprop>::Registration register_max_pool_backprop;
}
}
}
}
......@@ -19,10 +19,16 @@
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// ReplaceSlice replaces part of a tensor with another tensor.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// ReplaceSlice replaces part of a tensor with another tensor.
template <>
void Impl<op::ReplaceSlice>::operator()()
{
check_inputs(2);
check_outputs(1);
......@@ -43,11 +49,13 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
.add(builder::Input{op_input(0), "L"}.add_dims("D", 0, shape.size()))
.add(builder::Input{op_input(1), "S"}.add_dims("SD", 0, shape.size()))
.add(builder::Output{"O"})
.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"O"}
.add(
builder::UnaryContraction{"="}
.set(
builder::ContractionOutput{"O"}
.add_dims("D", 0, shape.size())
.add_indices(
[&](std::back_insert_iterator<std::list<std::string>> out) {
.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx)
{
auto stride = op().get_strides()[idx];
......@@ -73,8 +81,10 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
out = didx.str();
}
}))
.set(builder::ContractionInput{"S"}.add_indices("d", 0, shape.size()))
.add_constraints([&](std::back_insert_iterator<std::list<std::string>> out) {
.set(builder::ContractionInput{"S"}.add_indices(
"d", 0, shape.size()))
.add_constraints(
[&](std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx)
{
out = "d" + std::to_string(idx) + " < " +
......@@ -84,9 +94,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::operator()()
})
.set_default("L"))
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::ReplaceSlice>::Registration register_replace_slice;
namespace
{
Impl<op::ReplaceSlice>::Registration register_replace_slice;
}
}
}
}
......@@ -19,10 +19,16 @@
#include "ngraph/op/reverse.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Reverse reverses the selected axes within a tensor.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Reverse reverses the selected axes within a tensor.
template <>
void Impl<op::Reverse>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -35,8 +41,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
.set(builder::ContractionOutput{"O"}
.add_indices("d", 0, shape.size())
.add_dims("D", 0, shape.size()))
.set(builder::ContractionInput{"I"}.add_indices(
[&](std::back_insert_iterator<std::list<std::string>> out) {
.set(builder::ContractionInput{"I"}.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < shape.size(); ++idx)
{
auto sidx = std::to_string(idx);
......@@ -51,9 +57,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::operator()()
}
})))
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Reverse>::Registration register_reverse;
namespace
{
Impl<op::Reverse>::Registration register_reverse;
}
}
}
}
......@@ -18,10 +18,16 @@
#include "ngraph/op/slice.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Slice takes a sub-slice of a tensor.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Slice takes a sub-slice of a tensor.
template <>
void Impl<op::Slice>::operator()()
{
check_inputs(1);
check_outputs(1);
NGRAPH_DEBUG << "Slice: low: " << op().get_lower_bounds();
......@@ -33,17 +39,21 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
start_tile_function()
.add(builder::Input{op_input(), "I"}.add_dims("ID", 0, dim_limit))
.add(builder::Output{"O"})
.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"O"}
.add(
builder::UnaryContraction{"="}
.set(
builder::ContractionOutput{"O"}
.add_indices("od", 0, dim_limit)
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (std::size_t idx = 0; idx < dim_limit; ++idx)
{
std::ostringstream s;
std::size_t stride = op().get_strides()[idx];
std::ptrdiff_t trim_count =
op().get_lower_bounds()[idx] +
(shape[idx] - op().get_upper_bounds()[idx]) + 1 - stride;
(shape[idx] - op().get_upper_bounds()[idx]) +
1 - stride;
if ((stride != 1) && trim_count)
{
s << "(";
......@@ -96,9 +106,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::operator()()
}
})))
.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Slice>::Registration register_slice;
namespace
{
Impl<op::Slice>::Registration register_slice;
}
}
}
}
......@@ -19,10 +19,16 @@
#include "ngraph/op/softmax.hpp"
#include "ngraph/runtime/plaidml/plaidml_impl.hpp"
// Softmax implements a standard ML softmax operation.
template <>
void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
// Softmax implements a standard ML softmax operation.
template <>
void Impl<op::Softmax>::operator()()
{
check_inputs(1);
check_outputs(1);
......@@ -30,7 +36,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
auto dim_limit = shape.size();
auto f = start_tile_function();
f.add(builder::Input{op_input(0), "I"}.add_dims("D", 0, dim_limit)).add(builder::Output{"O"});
f.add(builder::Input{op_input(0), "I"}.add_dims("D", 0, dim_limit))
.add(builder::Output{"O"});
bool reorder_needed = false;
bool saw_element = false;
......@@ -71,7 +78,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
{
f.add(builder::UnaryContraction{"="}
.set(builder::ContractionOutput{"RI"}
.add_dims([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_dims([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx : group_idxs)
{
out = "D" + std::to_string(idx);
......@@ -81,7 +89,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
out = "D" + std::to_string(idx);
}
})
.add_indices([&](std::back_insert_iterator<std::list<std::string>> out) {
.add_indices([&](
std::back_insert_iterator<std::list<std::string>> out) {
for (auto idx : group_idxs)
{
out = "d" + std::to_string(idx);
......@@ -117,7 +126,8 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
{
// Take the softmax.
std::ostringstream softmax;
softmax << "builtin_softmax(" << input << ", " << groups << ", " << elements << ")";
softmax << "builtin_softmax(" << input << ", " << groups << ", " << elements
<< ")";
f.add(builder::Elementwise{output, softmax.str()});
}
......@@ -159,9 +169,12 @@ void ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::operator()()
}
set_output(f.finalize());
}
}
namespace
{
ngraph::runtime::plaidml::Impl<ngraph::op::Softmax>::Registration register_softmax;
namespace
{
Impl<op::Softmax>::Registration register_softmax;
}
}
}
}
......@@ -38,6 +38,8 @@ topk_2d_max_one # No plans to implement TopK
topk_2d_min_all # No plans to implement TopK
topk_2d_min_partial # No plans to implement TopK
topk_2d_min_one # No plans to implement TopK
topk_int64 # No plans to implement TopK
topk_5d_max_partial # No plans to implement TopK
# Tests that PlaidML might be able to run at some point.
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
......@@ -84,3 +86,5 @@ sum_3d_eliminate_zero_dim # Empty dims apparently should produce shape
dot_0_0 # Empty dims apparently should produce shaped 0s
dot_matrix_2x0_0x2 # Empty dims apparently should produce shaped 0s
dot_2x0_0 # Empty dims apparently should produce shaped 0s
numeric_float_nan
numeric_double_nan
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment