Commit be16a2fd authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Michał Karzyński

[SPEC] Add BinaryConvolution:v1 (#3830)

parent 1533b97f
......@@ -116,6 +116,8 @@ set (SRC
op/avg_pool.hpp
op/batch_norm.cpp
op/batch_norm.hpp
op/binary_convolution.cpp
op/binary_convolution.hpp
op/broadcast.cpp
op/broadcast_distributed.cpp
op/ceiling.cpp
......
......@@ -95,6 +95,7 @@ namespace ngraph
#include "ngraph/op/atan.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/binary_convolution.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/broadcast_distributed.hpp"
#include "ngraph/op/ceiling.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/binary_convolution.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::BinaryConvolution::type_info;
op::v1::BinaryConvolution::BinaryConvolution(const Output<Node>& data,
const Output<Node>& kernel,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const BinaryConvolutionMode& mode,
float pad_value,
const PadType& auto_pad)
: Op({data, kernel})
, m_strides(strides)
, m_dilations(dilations)
, m_pads_begin(pads_begin)
, m_pads_end(pads_end)
, m_mode(mode)
, m_pad_value(pad_value)
, m_auto_pad(auto_pad)
{
constructor_validate_and_infer_types();
}
op::v1::BinaryConvolution::BinaryConvolution(const Output<Node>& data,
const Output<Node>& kernel,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const std::string& mode,
float pad_value,
const PadType& auto_pad)
: BinaryConvolution(data,
kernel,
strides,
pads_begin,
pads_end,
dilations,
mode_from_string(mode),
pad_value,
auto_pad)
{
}
void op::v1::BinaryConvolution::validate_and_infer_types()
{
const PartialShape& data_batch_shape = get_input_partial_shape(0);
element::Type data_batch_et = get_input_element_type(0);
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_pads_begin.size() == 0)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pads_end.size() == 0)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
}
element::Type result_et;
PartialShape result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
result_shape =
infer_convolution_forward(this,
data_batch_shape,
Strides(static_cast<size_t>(data_batch_shape.rank()) - 2, 1),
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
set_output_type(0, result_et, result_shape);
}
shared_ptr<Node> op::v1::BinaryConvolution::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v1::BinaryConvolution>(new_args.at(0),
new_args.at(1),
m_strides,
m_pads_begin,
m_pads_end,
m_dilations,
m_mode,
m_pad_value,
m_auto_pad);
}
void op::v1::BinaryConvolution::generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas)
{
throw ngraph_error("BinaryConvolution generate_adjoints not implemented");
}
op::v1::BinaryConvolution::BinaryConvolutionMode
op::v1::BinaryConvolution::mode_from_string(const std::string& mode) const
{
static const std::map<std::string, BinaryConvolutionMode> allowed_values = {
{"xnor-popcount", BinaryConvolutionMode::XNOR_POPCOUNT}};
NODE_VALIDATION_CHECK(
this, allowed_values.count(mode) > 0, "Invalid binary convolution mode value passed in.");
return allowed_values.at(mode);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
class BinaryConvolution : public Op
{
public:
enum class BinaryConvolutionMode
{
// Interpret input data and kernel values: 0 as -1, 1 as 1
XNOR_POPCOUNT
};
NGRAPH_API
static constexpr NodeTypeInfo type_info{"BinaryConvolution", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a binary convolution operation.
BinaryConvolution() = default;
/// \brief Constructs a binary convolution operation.
/// \param data The node producing the input data batch tensor.
/// \param kernel The node producing the filters tensor.
/// \param strides The strides.
/// \param pads_begin The beginning of padding shape.
/// \param pads_end The end of padding shape.
/// \param dilations The dilations.
/// \param mode Defines how input tensor 0/1 values and weights 0/1 are interpreted.
/// \param pad_value Floating-point value used to fill pad area.
/// \param auto_pad The pad type for automatically computing padding sizes.
///
/// Output `[N, C_OUT, R1, ... Rf]`
BinaryConvolution(const Output<Node>& data,
const Output<Node>& kernel,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const BinaryConvolutionMode& mode,
float pad_value,
const PadType& auto_pad = PadType::EXPLICIT);
BinaryConvolution(const Output<Node>& data,
const Output<Node>& kernel,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const std::string& mode,
float pad_value,
const PadType& auto_pad = PadType::EXPLICIT);
size_t get_version() const override { return 1; }
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
/// \return The strides.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
/// \return The dilations.
const Strides& get_dilations() const { return m_dilations; }
void set_dilations(const Strides& dilations) { m_dilations = dilations; }
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const { return m_pads_begin; }
void set_pads_begin(const CoordinateDiff& pads_begin) { m_pads_begin = pads_begin; }
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const { return m_pads_end; }
void set_adding_above(const CoordinateDiff& pads_end) { m_pads_end = pads_end; }
/// \return The pad type for convolution.
const PadType& get_auto_pad() const { return m_auto_pad; }
void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; }
/// \return The mode of convolution.
const BinaryConvolutionMode& get_mode() const { return m_mode; }
void set_mode(const BinaryConvolutionMode& mode) { m_mode = mode; }
/// \return The pad value.
const float get_pad_value() const { return m_pad_value; }
void set_pad_value(float pad_value) { m_pad_value = pad_value; }
protected:
BinaryConvolutionMode mode_from_string(const std::string& mode) const;
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
BinaryConvolutionMode m_mode;
float m_pad_value;
PadType m_auto_pad;
};
}
} // namespace op
} // namespace ngraph
......@@ -67,6 +67,7 @@ NGRAPH_OP(BatchMatMul, ngraph::op)
NGRAPH_OP(BatchNormInference, ngraph::op)
NGRAPH_OP(BatchNormTraining, ngraph::op)
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op)
NGRAPH_OP(BinaryConvolution, ngraph::op)
NGRAPH_OP(Broadcast, ngraph::op)
NGRAPH_OP(BroadcastDistributed, ngraph::op)
NGRAPH_OP(BroadcastLike, ngraph::op)
......
......@@ -32,6 +32,7 @@
#include "ngraph/op/argmin.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/binary_convolution.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/broadcast_distributed.hpp"
#include "ngraph/op/concat.hpp"
......@@ -415,6 +416,11 @@ private:
avg_pool->get_include_padding_in_avg_computation());
break;
}
case OP_TYPEID::BinaryConvolution:
{
throw unsupported_op("Unsupported op '" + node.description() + "'");
break;
}
case OP_TYPEID::GenerateMask:
{
bool use_seed = static_cast<bool>(args[2]->get_data_ptr<const int32_t>()[0]);
......
......@@ -35,6 +35,7 @@
#include "ngraph/op/atan.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/binary_convolution.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/broadcast_distributed.hpp"
#include "ngraph/op/ceiling.hpp"
......@@ -919,6 +920,27 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
args[2], args[0], args[1], args[3], args[4], args[5], epsilon);
break;
}
case OP_TYPEID::BinaryConvolution:
{
auto strides = node_js.at("strides").get<vector<size_t>>();
auto dilations = node_js.at("dilations").get<vector<size_t>>();
auto pads_begin = node_js.at("pads_begin").get<vector<std::ptrdiff_t>>();
auto pads_end = node_js.at("pads_end").get<vector<std::ptrdiff_t>>();
auto mode = node_js.at("mode").get<op::v1::BinaryConvolution::BinaryConvolutionMode>();
auto pad_value = node_js.at("pad_value").get<float>();
op::PadType auto_pad = read_pad_type(node_js);
node = make_shared<op::v1::BinaryConvolution>(args[0],
args[1],
strides,
pads_begin,
pads_end,
dilations,
mode,
pad_value,
auto_pad);
break;
}
case OP_TYPEID::Broadcast:
{
if (op_version == 0)
......@@ -2641,6 +2663,18 @@ json JSONSerializer::serialize_node(const Node& n)
node["eps"] = tmp->get_eps_value();
break;
}
case OP_TYPEID::BinaryConvolution:
{
auto tmp = static_cast<const op::v1::BinaryConvolution*>(&n);
node["strides"] = tmp->get_strides();
node["dilations"] = tmp->get_dilations();
node["pads_begin"] = tmp->get_pads_begin();
node["pads_end"] = tmp->get_pads_end();
node["mode"] = tmp->get_mode();
node["pad_value"] = tmp->get_pad_value();
node["auto_pad"] = tmp->get_auto_pad();
break;
}
case OP_TYPEID::Broadcast:
{
if (op_version == 0)
......
......@@ -482,3 +482,40 @@ TEST(serialize, opset1_strided_slice)
EXPECT_EQ(strided_slice_out->get_shrink_axis_mask(), shrink_axis_mask);
EXPECT_EQ(strided_slice_out->get_ellipsis_mask(), ellipsis_mask);
}
TEST(serialize, opset1_binary_convolution)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 2});
auto filter = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 2});
const Strides strides{1, 1};
const CoordinateDiff pads_begin{0, 0};
const CoordinateDiff pads_end{0, 0};
const Strides dilations{1, 1};
const std::string mode = "xnor-popcount";
const float pad_value = 2.1f;
const auto auto_pad = op::PadType::NOTSET;
auto binary_conv_in = make_shared<op::v1::BinaryConvolution>(
data, filter, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
auto result = make_shared<op::Result>(binary_conv_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, filter});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_binary_conv = g_result->input(0).get_source_output().get_node_shared_ptr();
auto binary_conv_out = as_type_ptr<op::v1::BinaryConvolution>(g_binary_conv);
EXPECT_EQ(binary_conv_out->description(), "BinaryConvolution");
EXPECT_EQ(binary_conv_out->get_version(), 1);
EXPECT_EQ(binary_conv_out->get_strides(), strides);
EXPECT_EQ(binary_conv_out->get_pads_begin(), pads_begin);
EXPECT_EQ(binary_conv_out->get_pads_end(), pads_end);
EXPECT_EQ(binary_conv_out->get_dilations(), dilations);
EXPECT_EQ(binary_conv_out->get_mode(),
op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT);
EXPECT_EQ(binary_conv_out->get_pad_value(), pad_value);
EXPECT_EQ(binary_conv_out->get_auto_pad(), auto_pad);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment