Commit 388fb89b authored by Adam Procter's avatar Adam Procter

Merge remote-tracking branch 'origin/master' into aprocter/dyn-replace-slice

parents 0b0fb5b7 ca220f7d
......@@ -301,6 +301,7 @@ if (LINUX)
else()
set(CMAKE_INSTALL_RPATH "$ORIGIN")
endif()
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
endif()
......
......@@ -310,6 +310,8 @@ set (SRC
op/fused/grn.hpp
op/fused/group_conv.hpp
op/fused/group_conv.cpp
op/fused/group_conv_transpose.hpp
op/fused/group_conv_transpose.cpp
op/fused/leaky_relu.cpp
op/fused/leaky_relu.hpp
op/fused/mvn.cpp
......
......@@ -150,6 +150,8 @@ add_library(onnx_import STATIC
op/selu.hpp
op/shape.hpp
op/shape.cpp
op/shrink.hpp
op/shrink.cpp
op/sigmoid.hpp
op/sign.hpp
op/sin.hpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "exceptions.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/greater.hpp"
#include "ngraph/op/less.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "shrink.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector shrink(const Node& node)
{
const auto input = node.get_ng_inputs().at(0);
const float bias = node.get_attribute_value<float>("bias", 0.0f);
const float lambd = node.get_attribute_value<float>("lambd", 0.5f);
ASSERT_VALID_ARGUMENT(node, !(lambd < 0.0f))
<< " The provided 'lambd' value:" << lambd << " must not be negative.";
const auto negative_lambd = ngraph::op::Constant::create(
input->get_element_type(), input->get_shape(), {-lambd});
const auto positive_lambd = ngraph::op::Constant::create(
input->get_element_type(), input->get_shape(), {lambd});
const auto bias_tensor = ngraph::op::Constant::create(
input->get_element_type(), input->get_shape(), {bias});
// Create a mask indicating locations of values that need to be adjusted
// by adding and subtracting bias
// All other values indicated by 'false' in the masks need to be zeroed out
std::shared_ptr<ngraph::Node> values_below_neg_lambd =
std::make_shared<ngraph::op::Less>(input, negative_lambd);
std::shared_ptr<ngraph::Node> values_above_pos_lambd =
std::make_shared<ngraph::op::Greater>(input, positive_lambd);
// Convert from bool to the input type to be able to multiply adjusted inputs
// by the created masks
values_below_neg_lambd = std::make_shared<ngraph::op::Convert>(
values_below_neg_lambd, input->get_element_type());
values_above_pos_lambd = std::make_shared<ngraph::op::Convert>(
values_above_pos_lambd, input->get_element_type());
std::shared_ptr<ngraph::Node> input_minus_bias = input - bias_tensor;
std::shared_ptr<ngraph::Node> input_plus_bias = input + bias_tensor;
// multiply by the corresponding mask to zero-out the values within
// the <-lambd;lambd> range and keep the bias-adjusted values from outside of it
input_minus_bias = values_above_pos_lambd * input_minus_bias;
input_plus_bias = values_below_neg_lambd * input_plus_bias;
return {input_plus_bias + input_minus_bias};
}
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "core/node.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
/// @brief ONNX Shrink operator
///
/// @note It operates on a single input tensor and two attributes: lambd and bias.
/// Input values greater or equal to '-lambd' and less or equal to 'lambd' are zeroed-out.
/// 'Bias' is added to the values that are less than '-lambd'
/// and subtracted from values greater than 'lambd'.
NodeVector shrink(const Node& node);
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -84,6 +84,7 @@ opset versions starting from `1` to `6` and to the latest opset version.
| Relu | 1-6- |
| Selu | 1-6- |
| Shape | 1- |
| Shrink | 1- |
| Sigmoid | 1-6- |
| Sign | 9- |
| Sin | 7- |
......
......@@ -94,6 +94,7 @@
#include "op/reshape.hpp"
#include "op/selu.hpp"
#include "op/shape.hpp"
#include "op/shrink.hpp"
#include "op/sigmoid.hpp"
#include "op/sign.hpp"
#include "op/sin.hpp"
......@@ -311,6 +312,7 @@ namespace ngraph
REGISTER_OPERATOR("Reshape", 1, reshape);
REGISTER_OPERATOR("Selu", 1, selu);
REGISTER_OPERATOR("Shape", 1, shape);
REGISTER_OPERATOR("Shrink", 1, shrink);
REGISTER_OPERATOR("Sigmoid", 1, sigmoid);
REGISTER_OPERATOR("Sign", 1, sign);
REGISTER_OPERATOR("Sin", 1, sin);
......
......@@ -104,6 +104,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
......
This diff is collapsed.
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstdlib>
#include <memory>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fused_op.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
namespace ngraph
{
namespace op
{
/// \brief Group Transpose Convolution (Deconvolution)
class GroupConvolutionTranspose : public util::FusedOp
{
public:
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] padding_begin The padding added at the beggining of each feature axis.
/// \param[in] padding_end The padding added at the end of each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] pad_type The provided padding type.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
///
GroupConvolutionTranspose(const std::shared_ptr<Node>& data,
const std::shared_ptr<Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& padding_begin,
const CoordinateDiff& padding_end,
const CoordinateDiff& output_padding,
const std::size_t groups = 1UL,
const PadType& pad_type = PadType::EXPLICIT,
const Shape& output_shape = Shape{});
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose(const std::shared_ptr<Node>& data,
const std::shared_ptr<Node>& filters,
const std::size_t groups = 1UL);
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose(const std::shared_ptr<Node>& data,
const std::shared_ptr<Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& output_padding,
const Shape& output_shape,
const std::size_t groups = 1UL);
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose(const std::shared_ptr<Node>& data,
const std::shared_ptr<Node>& filters,
const Shape& output_shape,
const std::size_t groups = 1UL);
std::shared_ptr<Node> get_filters() { return get_argument(1); }
std::shared_ptr<Node> get_data() { return get_argument(0); }
const Strides& get_strides() const { return m_strides; }
const Strides& get_dilations() const { return m_dilations; }
const CoordinateDiff& get_padding_begin() const { return m_padding_begin; }
const CoordinateDiff& get_padding_end() const { return m_padding_end; }
const CoordinateDiff& get_output_padding() const { return m_output_padding; }
std::size_t get_groups() const { return m_groups; }
const PadType& get_pad_type() const { return m_pad_type; }
const Shape& get_output_shape() const { return m_output_shape; }
virtual void pre_validate_and_infer_types() override;
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
private:
///
/// \brief Calculate the shape of the data batch from forward propagation.
///
/// \return The data batch shape.
///
Shape get_data_batch_shape() const;
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_padding_begin;
CoordinateDiff m_padding_end;
CoordinateDiff m_output_padding;
std::size_t m_groups;
PadType m_pad_type;
Shape m_output_shape;
};
}
}
......@@ -27,6 +27,7 @@ NGRAPH_OP(FakeQuantize, ngraph::op)
NGRAPH_OP(GRN, ngraph::op)
NGRAPH_OP(Gemm, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(GroupConvolutionTranspose, ngraph::op)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(LeakyRelu, ngraph::op)
NGRAPH_OP(MVN, ngraph::op)
......
......@@ -24,8 +24,9 @@
using namespace std;
using namespace ngraph;
op::Result::Result(const shared_ptr<Node>& arg)
op::Result::Result(const shared_ptr<Node>& arg, bool needs_default_layout)
: Op("Result", check_single_output_args({arg}))
, m_needs_default_layout(needs_default_layout)
{
constructor_validate_and_infer_types();
// always borrow the placement conf even the default one
......@@ -44,11 +45,7 @@ shared_ptr<Node> op::Result::copy_with_new_args(const NodeVector& new_args) cons
{
check_new_args_count(this, new_args);
auto res = make_shared<Result>(new_args.at(0));
if (res)
{
res->set_needs_default_layout(m_needs_default_layout);
}
auto res = make_shared<Result>(new_args.at(0), m_needs_default_layout);
return std::move(res);
}
......
......@@ -30,7 +30,7 @@ namespace ngraph
/// \brief Allows a value to be used as a function result.
///
/// \param arg Node that produces the input tensor.
Result(const std::shared_ptr<Node>& arg);
Result(const std::shared_ptr<Node>& arg, bool needs_default_layout = false);
void validate_and_infer_types() override;
......
......@@ -38,13 +38,13 @@ namespace ngraph
auto arg_buffer_index = external_function->get_buffer_index(args[0].get_name());
auto arg1_buffer_index = external_function->get_buffer_index(args[1].get_name());
auto arg4_buffer_index = external_function->get_buffer_index(args[4].get_name());
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
auto out1_buffer_index = external_function->get_buffer_index(out[1].get_name());
size_t element_count = out[0].get_size();
bool use_seed = drop->get_use_seed();
double keep_prob = drop->get_keep_prob();
// Note: for performance optimization in addition to parallel RNG with multiple,
// threads, we create, initialize and advance each msr here in builder instead of
......@@ -56,7 +56,7 @@ namespace ngraph
std::vector<std::minstd_rand> vmsr(nthr);
if (use_seed)
{
uint32_t seed = drop->get_seed();
uint64_t seed = drop->get_seed();
for (size_t i = 0; i < nthr; i++)
{
std::minstd_rand msr;
......@@ -72,13 +72,15 @@ namespace ngraph
element_count,
arg_buffer_index,
arg1_buffer_index,
arg4_buffer_index,
out0_buffer_index,
out1_buffer_index,
keep_prob,
vmsr,
use_seed](CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
bool training = static_cast<bool>(
static_cast<float*>(ctx->buffer_data[arg1_buffer_index])[0]);
double keep_prob =
static_cast<double*>(ctx->buffer_data[arg4_buffer_index])[0];
runtime::cpu::kernel::generate_dropout(
static_cast<float*>(ctx->buffer_data[arg_buffer_index]),
static_cast<float*>(ctx->buffer_data[out0_buffer_index]),
......@@ -96,13 +98,15 @@ namespace ngraph
element_count,
arg_buffer_index,
arg1_buffer_index,
arg4_buffer_index,
out0_buffer_index,
out1_buffer_index,
keep_prob,
vmsr,
use_seed](CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
bool training = static_cast<bool>(
static_cast<double*>(ctx->buffer_data[arg1_buffer_index])[0]);
double keep_prob =
static_cast<double*>(ctx->buffer_data[arg4_buffer_index])[0];
runtime::cpu::kernel::generate_dropout(
static_cast<double*>(ctx->buffer_data[arg_buffer_index]),
static_cast<double*>(ctx->buffer_data[out0_buffer_index]),
......
......@@ -26,11 +26,9 @@ using namespace ngraph;
op::Dropout::Dropout(const std::shared_ptr<Node>& input,
const std::shared_ptr<Node>& gm_const,
const std::shared_ptr<Node>& use_seed,
const uint32_t seed,
const double keep_prob)
: Op("Dropout", check_single_output_args({input, gm_const, use_seed}))
, m_seed(seed)
, m_keep_prob(keep_prob)
const std::shared_ptr<Node>& seed,
const std::shared_ptr<Node>& keep_prob)
: Op("Dropout", check_single_output_args({input, gm_const, use_seed, seed, keep_prob}))
{
constructor_validate_and_infer_types();
......@@ -41,13 +39,13 @@ op::Dropout::Dropout(const std::shared_ptr<Node>& input,
shared_ptr<Node> op::Dropout::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 3)
if (new_args.size() != 5)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<Dropout>(
new_args.at(0), new_args.at(1), new_args.at(2), m_seed, m_keep_prob);
new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4));
}
bool op::Dropout::get_use_seed() const
......@@ -60,3 +58,14 @@ bool op::Dropout::get_use_seed() const
}
return use_seed;
}
uint64_t op::Dropout::get_seed() const
{
uint64_t seed = 0;
if (auto const_op = dynamic_pointer_cast<op::Constant>(get_argument(3)))
{
auto seed_ptr = static_cast<const uint64_t*>(const_op->get_data_ptr());
seed = *seed_ptr;
}
return seed;
}
......@@ -29,20 +29,15 @@ namespace ngraph
Dropout(const std::shared_ptr<Node>& input,
const std::shared_ptr<Node>& gm_const,
const std::shared_ptr<Node>& use_seed,
const uint32_t seed,
const double keep_prob); // keep_prob = 1 - dropout_prob
const std::shared_ptr<Node>& seed,
const std::shared_ptr<Node>& keep_prob); // keep_prob = 1 - dropout_prob
bool get_use_seed() const;
uint32_t get_seed() const { return m_seed; }
double get_keep_prob() const { return m_keep_prob; }
void set_seed(uint32_t new_seed) { m_seed = new_seed; }
void set_keep_prob(double new_keep_prob) { m_keep_prob = new_keep_prob; }
uint64_t get_seed() const;
double get_keep_prob() const;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
uint32_t m_seed;
double m_keep_prob;
};
}
}
......@@ -923,8 +923,8 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_dropout()
auto x = std::make_shared<pattern::op::Label>(element::f32, shape);
auto x_label = std::make_shared<pattern::op::Label>(x, nullptr, NodeVector{x});
uint32_t seed = 1234;
auto seed_label = std::make_shared<pattern::op::Label>(element::u32, Shape{0});
uint64_t seed = 1234;
auto seed_label = std::make_shared<pattern::op::Label>(element::u64, Shape{0});
double value = 0.9;
auto value_const = ngraph::op::Constant::create(element::f32, Shape{1, 1, 2, 2}, {value});
......@@ -960,15 +960,28 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_dropout()
NGRAPH_DEBUG << "training argument to GenerateMask must be constant";
return false;
}
if (!std::dynamic_pointer_cast<ngraph::op::Constant>(gm->get_argument(2)))
{
NGRAPH_DEBUG << "use_seed argument to GenerateMask must be constant";
return false;
}
if (!std::dynamic_pointer_cast<ngraph::op::Constant>(gm->get_argument(3)))
{
NGRAPH_DEBUG << "seed argument to GenerateMask must be constant";
return false;
}
if (!std::dynamic_pointer_cast<ngraph::op::Constant>(gm->get_argument(4)))
{
NGRAPH_DEBUG << "probability argument to GenerateMask must be constant";
return false;
}
auto gm_value = gm->get_probability();
auto gm_seed = gm->get_seed();
auto training = gm->get_argument(0); //for training purpose this is always going to be 1
auto use_seed_arg = gm->get_argument(2); // this is the use_seed node
auto dropout_n = std::make_shared<ngraph::op::Dropout>(pattern_map[x],
gm->get_argument(0),
gm->get_argument(2),
gm->get_argument(3),
gm->get_argument(4));
auto dropout_n = std::make_shared<ngraph::op::Dropout>(
pattern_map[x], training, use_seed_arg, gm_seed, gm_value);
auto goe1 = std::make_shared<ngraph::op::GetOutputElement>(dropout_n, 0);
ngraph::replace_node(m.get_match_root(), goe1);
......
......@@ -86,6 +86,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
......@@ -2063,6 +2064,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::GatherND:
case OP_TYPEID::GenerateMask:
case OP_TYPEID::GRN:
case OP_TYPEID::GroupConvolutionTranspose:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::LeakyRelu:
case OP_TYPEID::MVN:
......@@ -2183,6 +2185,7 @@ bool runtime::intelgpu::IntelGPUBackend::is_supported_impl(const Node& node)
case OP_TYPEID::FakeQuantize:
case OP_TYPEID::Gemm:
case OP_TYPEID::GRN:
case OP_TYPEID::GroupConvolutionTranspose:
case OP_TYPEID::LeakyRelu:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
......
......@@ -259,4 +259,94 @@ backwards_softmax_underflow
backwards_softmax_3d
batch_mat_mul_forward
dot_matrix_2x0_0x2
# dgkutnic ww24.5: these tests are to be triaged by the PlaidML team
convolution_3d_1item_large_5o3i_padded_uneven_filter_uneven_data_dilation_data_dilated
select
product_trivial
product_trivial_5d
product_to_scalar
product_matrix_columns
product_matrix_rows
product_3d_to_matrix_most_sig
product_3d_to_matrix_least_sig
product_3d_to_vector
product_3d_to_scalar
product_2d_to_scalar_int32
product_to_scalar_int32
product_to_scalar_int8
max_trivial
max_trivial_5d
max_to_scalar
max_to_scalar_int8
max_matrix_columns
max_matrix_rows
max_matrix_rows_int32
max_3d_to_matrix_most_sig
max_3d_to_matrix_least_sig
max_3d_to_vector
max_3d_to_scalar
max_3d_to_scalar_int32
min_trivial
min_trivial_5d
min_trivial_5d_int32
min_to_scalar
min_to_scalar_int8
min_matrix_columns
min_matrix_rows
min_matrix_rows_int32
min_3d_to_matrix_most_sig
min_3d_to_matrix_least_sig
min_3d_to_vector
min_3d_to_scalar
min_3d_to_scalar_int32
sum_to_scalar
sum_large_1d_to_scalar
sum_matrix_columns
sum_matrix_6d
sum_matrix_rows
sum_3d_to_matrix_most_sig
sum_3d_to_matrix_least_sig
sum_3d_to_vector
sum_3d_to_scalar
sum_3d_to_scalar_int32
sum_5d_to_scalar
sum_5d_to_scalar_int32
sum_2d_to_scalar_int8
sum_stable_acc
sum_stable_simple_float
divide_python_rounding_int32
any_2x2_to_scalar_true
any_2x2_to_scalar_false
any_2x3_eliminate_col_dim
any_2x3_eliminate_row_dim
any_2x2x3_eliminate_dim_1
any_2x2x3_eliminate_dim_2
any_2x2x3_eliminate_dims_0_1
any_2x2x3_eliminate_dims_0_2
any_2x2x3_eliminate_dims_1_2
any_2x2x3_eliminate_dims_0_1_2
all_trivial
all_2x2_to_scalar_false
all_2x2_to_scalar_true
all_2x3_eliminate_col_dim
all_2x3_eliminate_row_dim
all_2x2x3_eliminate_dim_0
all_2x2x3_eliminate_dim_1
all_2x2x3_eliminate_dim_2
all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
all_dynamic_axis
all_change_axis
backwards_broadcast0
backwards_broadcast1
backwards_select
backwards_select_nested
backwards_sum_v2s
backwards_sum_m2s
backwards_sum_m2v_0
backwards_sum_m2v_1
backwards_batchmatmul_tensor2_tensor2
......@@ -74,6 +74,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
......@@ -1078,6 +1079,31 @@ static shared_ptr<ngraph::Function>
pad_type);
break;
}
case OP_TYPEID::GroupConvolutionTranspose:
{
auto strides = node_js.at("strides").get<vector<size_t>>();
auto dilations = node_js.at("dilations").get<vector<size_t>>();
auto padding_begin = node_js.at("padding_begin").get<vector<ptrdiff_t>>();
auto padding_end = node_js.at("padding_end").get<vector<ptrdiff_t>>();
auto output_padding = node_js.at("output_padding").get<vector<ptrdiff_t>>();
auto groups = node_js.at("groups").get<size_t>();
op::PadType pad_type = node_js["pad_type"].empty()
? op::PadType::EXPLICIT
: static_cast<op::PadType>(node_js.at("pad_type"));
auto output_shape = node_js.at("output_shape").get<vector<size_t>>();
node = make_shared<op::GroupConvolutionTranspose>(args[0],
args[1],
strides,
dilations,
padding_begin,
padding_end,
output_padding,
groups,
pad_type,
output_shape);
break;
}
case OP_TYPEID::LeakyRelu:
{
node = make_shared<op::LeakyRelu>(args[0], args[1]);
......@@ -1417,7 +1443,9 @@ static shared_ptr<ngraph::Function>
}
case OP_TYPEID::Result:
{
node = make_shared<op::Result>(args[0]);
auto needs_default_layout =
get_or_default<bool>(node_js, "needs_default_layout", false);
node = make_shared<op::Result>(args[0], needs_default_layout);
break;
}
case OP_TYPEID::Reverse:
......@@ -2089,6 +2117,19 @@ static json write(const Node& n, bool binary_constant_data)
node["pad_type"] = tmp->get_pad_type();
break;
}
case OP_TYPEID::GroupConvolutionTranspose:
{
auto tmp = dynamic_cast<const op::GroupConvolutionTranspose*>(&n);
node["strides"] = tmp->get_strides();
node["dilations"] = tmp->get_dilations();
node["padding_begin"] = tmp->get_padding_begin();
node["padding_end"] = tmp->get_padding_end();
node["output_padding"] = tmp->get_output_padding();
node["groups"] = tmp->get_groups();
node["pad_type"] = tmp->get_pad_type();
node["output_shape"] = tmp->get_output_shape();
break;
}
case OP_TYPEID::LeakyRelu: { break;
}
case OP_TYPEID::Less:
......@@ -2341,7 +2382,11 @@ static json write(const Node& n, bool binary_constant_data)
node["output_shape"] = tmp->get_output_shape();
break;
}
case OP_TYPEID::Result: { break;
case OP_TYPEID::Result:
{
auto tmp = dynamic_cast<const op::Result*>(&n);
node["needs_default_layout"] = tmp->needs_default_layout();
break;
}
case OP_TYPEID::Reverse:
{
......
......@@ -1156,3 +1156,94 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip_across_channels)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, group_conv_transpose)
{
const CoordinateDiff output_padding{1, 1};
const CoordinateDiff padding_begin{1, 1};
const CoordinateDiff padding_end{1, 1};
Strides strides{2, 2};
Strides dilations{1, 1};
size_t groups = 1;
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 1, 3, 3});
auto filters = make_shared<op::Parameter>(element::f32, Shape{1, 1, 3, 3});
auto gct = make_shared<op::GroupConvolutionTranspose>(
data, filters, strides, dilations, padding_begin, padding_end, output_padding, groups);
auto function = make_shared<Function>(NodeVector{gct}, ParameterVector{data, filters});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
// X
test_case.add_input<float>(vector<float>{0.16857791f,
-0.15161794f,
0.08540368f,
0.1820628f,
-0.21746576f,
0.08245695f,
0.1431433f,
-0.43156421f,
0.30591947f});
// W
test_case.add_input<float>({-0.06230065f,
0.37932432f,
-0.25388849f,
0.33878803f,
0.43709868f,
-0.22477469f,
0.04118127f,
-0.44696793f,
0.06373066f});
test_case.add_expected_output(
Shape{1, 1, 6, 6},
vector<float>{
0.07368518f, -0.08925839f, -0.06627201f, 0.06301362f, 0.03732984f, -0.01919658f,
-0.00628807f, -0.02817563f, -0.01472169f, 0.04392925f, -0.00689478f, -0.01549204f,
0.07957941f, -0.11459791f, -0.09505399f, 0.07681622f, 0.03604182f, -0.01853423f,
-0.0270785f, -0.00680824f, -0.06650258f, 0.08004665f, 0.07918708f, -0.0724144f,
0.06256775f, -0.17838378f, -0.18863615f, 0.20064656f, 0.133717f, -0.06876295f,
-0.06398046f, -0.00864975f, 0.19289537f, -0.01490572f, -0.13673618f, 0.01949645f});
test_case.set_tolerance(3);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, group_conv_transpose_output_shape)
{
const CoordinateDiff output_padding{};
const Shape output_shape{1, 1, 1, 14};
Strides strides{1, 1};
Strides dilations{1, 1};
size_t groups = 1;
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 10});
auto filters = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 5});
auto gct = make_shared<op::GroupConvolutionTranspose>(
data, filters, strides, dilations, output_padding, output_shape, groups);
auto function = make_shared<Function>(NodeVector{gct}, ParameterVector{data, filters});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
// X
test_case.add_input<float>(
vector<float>{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f});
// W
test_case.add_input<float>({1.0f, 2.0f, 3.0f, 2.0f, 1.0f});
test_case.add_expected_output(Shape{1, 1, 1, 14},
vector<float>{0.0f,
1.0f,
4.0f,
10.0f,
18.0f,
27.0f,
36.0f,
45.0f,
54.0f,
63.0f,
62.0f,
50.0f,
26.0f,
9.0f});
test_case.run();
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Shrink"
attribute {
name: "lambd"
f: 1.5
type: FLOAT
}
attribute {
name: "bias"
f: 0.5
type: FLOAT
}
}
name: "shrink_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 11
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 11
}
}
}
}
}
}
opset_import {
version: 9
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Shrink"
attribute {
name: "lambd"
f: 1.4
type: FLOAT
}
attribute {
name: "bias"
f: 1.5
type: FLOAT
}
}
name: "shrink_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 11
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 11
}
}
}
}
}
}
opset_import {
version: 9
}
......@@ -1456,3 +1456,29 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_hardmax)
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_shrink_float)
{
const auto shrink_fn = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/shrink_float.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(shrink_fn, "${BACKEND_NAME}");
test_case.add_input<float>(
{-2.0f, -1.6f, -1.5f, -1.4f, -1.0f, 0.0f, 1.0f, 1.4f, 1.5f, 1.6f, 2.0f});
test_case.add_expected_output<float>(
Shape{11}, {-1.5f, -1.1f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.1f, 1.5f});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_shrink_int)
{
const auto shrink_fn = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/shrink_int.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(shrink_fn, "${BACKEND_NAME}");
test_case.add_input<int>({-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5});
test_case.add_expected_output<int>(Shape{11}, {-4, -3, -2, -1, 0, 0, 0, 1, 2, 3, 4});
test_case.run();
}
This diff is collapsed.
......@@ -97,9 +97,8 @@ namespace ngraph
"All function results already have expected outputs.");
auto function_output_type = results.at(m_output_index)->get_element_type();
auto function_output_shape = results.at(m_output_index)->get_shape();
m_result_tensors.emplace_back(
m_backend->create_tensor(function_output_type, function_output_shape));
m_backend->create_tensor(function_output_type, expected_shape));
m_expected_outputs.emplace_back(std::make_shared<ngraph::op::Constant>(
function_output_type, expected_shape, values));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment