Commit c00553ba authored by Rob Earhart's avatar Rob Earhart Committed by Scott Cyphers

Minor PlaidML updates (#3007)

* Update to use new backend constructor pattern

* Update PlaidML unit test exclusions
parent ac17d797
......@@ -16,6 +16,7 @@
set(SRC
plaidml_backend.cpp
plaidml_backend_constructor.cpp
plaidml_builder.cpp
plaidml_compilation_cache.cpp
plaidml_compiler.cpp
......
......@@ -22,7 +22,7 @@
namespace vp = vertexai::plaidml;
ngraph::runtime::plaidml::PlaidML_Backend::PlaidML_Backend(const char* configuration_string)
ngraph::runtime::plaidml::PlaidML_Backend::PlaidML_Backend(const std::string& configuration_string)
: m_config(parse_config_string(configuration_string))
, m_compiler{&m_config}
{
......@@ -67,18 +67,3 @@ void ngraph::runtime::plaidml::PlaidML_Backend::remove_compiled_function(
m_cache.forget(std::move(plaidml_exec));
}
}
extern "C" const char* get_ngraph_version_string()
{
return NGRAPH_VERSION;
}
extern "C" ngraph::runtime::Backend* new_backend(const char* configuration_string)
{
return new ngraph::runtime::plaidml::PlaidML_Backend{configuration_string};
}
extern "C" void delete_backend(ngraph::runtime::Backend* backend)
{
delete backend;
}
......@@ -39,7 +39,7 @@ namespace ngraph
class ngraph::runtime::plaidml::PlaidML_Backend final : public runtime::Backend
{
public:
PlaidML_Backend(const char* configuration_string);
PlaidML_Backend(const std::string& configuration_string);
~PlaidML_Backend() final {}
std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) final;
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/plaidml/plaidml_backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace plaidml
{
class PlaidML_BackendConstructor;
}
}
}
class ngraph::runtime::plaidml::PlaidML_BackendConstructor final
: public runtime::BackendConstructor
{
public:
~PlaidML_BackendConstructor() final {}
std::shared_ptr<Backend> create(const std::string& config) final;
};
std::shared_ptr<ngraph::runtime::Backend>
ngraph::runtime::plaidml::PlaidML_BackendConstructor::create(const std::string& config)
{
return std::make_shared<PlaidML_Backend>(config);
}
extern "C" ngraph::runtime::BackendConstructor* get_backend_constructor_pointer()
{
static ngraph::runtime::plaidml::PlaidML_BackendConstructor backend_constructor;
return &backend_constructor;
}
......@@ -71,7 +71,7 @@ namespace ngraph
}
ngraph::runtime::plaidml::Config
ngraph::runtime::plaidml::parse_config_string(const char* configuration_string)
ngraph::runtime::plaidml::parse_config_string(const std::string& configuration_string)
{
bool err = false;
bool help = false;
......@@ -103,7 +103,7 @@ ngraph::runtime::plaidml::Config
// oval_begin
// oval_end
const char* c = configuration_string;
const char* c = configuration_string.c_str();
while (*c && *c != ':')
{
++c;
......
......@@ -29,7 +29,7 @@ namespace ngraph
{
struct Config;
Config parse_config_string(const char* configuration_string);
Config parse_config_string(const std::string& configuration_string);
}
}
}
......
......@@ -124,3 +124,130 @@ scatter_add_1d_indices
scatter_add_scalar_indices
scatter_nd_add_batch_2d_to_3d
scatter_nd_add_2d_to_3d
# To be triaged -- bad kernels, numerical accuracy, edge conditions,
# unimplemented functionality, &c
cos
erf
sin
tan
not
abc_int64
concat_matrix_int64
select_double
convert_int32_bool
convert_float32_bool
tensor_constant_int64
constant_equality_bool
numeric_float_inf
numeric_double_inf
computation_reuse
pad_negative_exterior_1d_check_limits
pad_edge_1d
pad_edge_1d_top_neg
pad_edge_1d_top_neg_bigger_than_tensor
pad_edge_1d_bottom_neg
pad_edge_1d_bottom_neg_bigger_than_tensor
pad_edge_2d
pad_edge_2d_with_neg
pad_reflect_1d
pad_reflect_1d_top_neg
pad_reflect_1d_top_neg_bigger_than_tensor
pad_reflect_1d_bottom_neg
pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_negative_exterior_2d
pad_negative_exterior_2d_all_negative
pad_negative_exterior_4d
max_trivial_int8
max_trivial_5d_int32
max_3d_to_scalar_double
softmax_axis_3d
logical_and
logical_or
batch_norm_inference_parameters_duplication
batch_norm_fprop_b1c2h2w2
batch_norm_fprop_b2c2h2w1
batchnorm_fprop_b2c2d2h1w1
batch_norm_fprop_inference_b2c2h2w1
argmax_3D_axis_0
argmax_3D_axis_1
argmax_3D_axis_2
argmin_trivial_in_double
topk_2d_max_one_with_equal_values
sum_trivial_in_double
sum_stable_simple_double
one_hot_vector_many_categories
gather_no_axis_int8
gather_no_axis_int16
gather_no_axis_int32
gather_no_axis_int64
gather_no_axis_uint8
gather_no_axis_uint16
gather_no_axis_uint32
gather_no_axis_uint64
gather_no_axis_bool
elu
elu_negative_alpha
prelu
hardsigmoid
prelu_shared_slope
prelu_negative_slope
conv_bias_1d
conv_bias_2d
conv_bias_3d
conv_bias_bprop_2d
conv_bias_add_2d
group_conv
space_to_depth
depth_to_space
normalize_across_chw_scalar_scale_4d
normalize_across_chw_scalar_scale_3d
normalize_across_chw_scalar_scale_2d
normalize_across_chw_w_scale
gemm
fused_clamp
mvn_mean_normalization
mvn_mean_normalization_split_channels
mvn_mean_variance_normalization
mvn_mean_variance_normalization_split_channels
grn_4d
grn_2d_with_bias
scale_shift_no_broadcast
scale_shift
shuffle_channels_simple
shuffle_channels_negative_axis
shuffle_channels_float
squeeze
squeeze_default_axes
squared_difference
squared_difference_broadcast
fake_quantize
fake_quantize_with_clip
fake_quantize_with_clip_across_channels
dot_0_0
dot_2x0_0
equal
notequal
greater
greater_int64
greatereq
less
lesseq
lesseq_int32
lesseq_bool
broadcast_vector_rowwise_int64
minimum_int64
maximum_int64
auto_bcast_binary_elementwise
any_trivial
any_2x2x3_eliminate_dim_0
backwards_acos
backwards_asin
backwards_atan
backwards_softmax_all
backwards_softmax_axis
backwards_softmax_underflow
backwards_softmax_3d
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment