Commit 49baa903 authored by gaurides's avatar gaurides Committed by Scott Cyphers

Remove functions from cpu which were moved to core (#2962)

* Remove functions from cpu which were moved to core

* Fix a typo

* Remove unused function
parent df2a27ad
......@@ -29,7 +29,7 @@ namespace ngraph
class GenerateMask : public op::Op
{
public:
/// \brief Constructs a GenerateMask node with a given shape, sed,
/// \brief Constructs a GenerateMask node with a given shape, seed,
/// probability and training/inference mode
GenerateMask(const std::shared_ptr<Node>& training,
const Shape& shape,
......
......@@ -382,128 +382,7 @@ bool runtime::cpu::pass::CPURnnMatFusion::run_on_function(std::shared_ptr<Functi
#define TI(x) std::type_index(typeid(x))
std::shared_ptr<Node> set_or_check_if_same(std::shared_ptr<Node> oldn, std::shared_ptr<Node> newn)
{
if (!oldn)
{
return newn;
}
else
{
if (oldn != newn)
{
NGRAPH_DEBUG << " different data nodes";
return nullptr;
}
return oldn;
}
}
static bool is_trivial_convolution(std::shared_ptr<op::Convolution> conv)
{
Strides stride_1{1, 1};
CoordinateDiff pad_0{0, 0};
return conv->get_window_dilation_strides() == stride_1 ||
conv->get_data_dilation_strides() == stride_1 || conv->get_padding_above() == pad_0 ||
conv->get_padding_below() == pad_0;
}
std::shared_ptr<Node> fuse_group_convolution(const std::shared_ptr<Node>& n)
{
Shape win_size_1{1, 1, 1, 1};
auto data_label = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 4, 9});
auto weights_label = std::make_shared<pattern::op::Label>(element::f32, Shape{4, 2, 3});
auto slice_data = std::make_shared<op::Slice>(
data_label, Coordinate{0, 0, 0}, Coordinate{1, 2, 9}, Strides{1, 1, 1});
auto slice_weights = std::make_shared<op::Slice>(
weights_label, Coordinate{0, 0, 0}, Coordinate{2, 2, 3}, Strides{1, 1, 1});
auto slice_weights_label =
std::make_shared<pattern::op::Label>(slice_weights, nullptr, NodeVector{slice_weights});
auto conv = std::make_shared<op::Convolution>(slice_data, slice_weights_label);
auto matcher = std::make_shared<pattern::Matcher>(conv);
NGRAPH_DEBUG << "In simplify_concat (group convolution) for " << n->get_name();
std::shared_ptr<Node> data;
std::shared_ptr<Node> weights;
auto concat = std::static_pointer_cast<op::Concat>(n);
std::shared_ptr<op::Convolution> sconv;
NodeVector slices;
const size_t CHANNEL = 1;
if (concat->get_concatenation_axis() != CHANNEL)
{
NGRAPH_DEBUG << "concatenating on an axis different from channel";
return {nullptr};
}
for (auto arg : n->get_arguments())
{
if (!matcher->match(arg))
{
NGRAPH_DEBUG << arg->get_name() << " doesn't match";
return {nullptr};
}
sconv = std::static_pointer_cast<op::Convolution>(arg);
if (arg->get_input_shape(0).size() != 4)
{
NGRAPH_DEBUG << "convolution data's rank isn't equal to 4";
return {nullptr};
}
if (!is_trivial_convolution(sconv))
{
NGRAPH_DEBUG << arg->get_name() << " isn't trivial convolution";
return {nullptr};
}
auto pattern_map = matcher->get_pattern_map();
data = set_or_check_if_same(data, pattern_map[data_label]);
weights = set_or_check_if_same(weights, pattern_map[weights_label]);
if (!data || !weights)
{
NGRAPH_DEBUG << "data or weights nodes are different among slices";
return {nullptr};
}
const size_t IC = 1;
auto slice = pattern_map[slice_weights_label];
if (weights->get_shape().at(IC) != slice->get_shape().at(IC))
{
slices.push_back(slice);
}
}
// TF-flavoured group convolution needs channels re-arranged
// MKLDNN requires group slicing to be done on OC
// MKLDNN [4,2,-]
// ordering w00 w01 w10 w11 w20 w21 w30 w31 produces g00 g01 g10 g11
// whereas
// TF [2,4,-]
// ordering w00 w01 w02 w03 w10 w11 w12 w13 produces g00 g10 g01 g11
const size_t CONCAT_AXIS_OC = 0;
if (!slices.empty())
{
weights = std::make_shared<op::Concat>(slices, CONCAT_AXIS_OC);
}
auto new_conv = std::make_shared<op::GroupConvolution>(data,
weights,
sconv->get_window_movement_strides(),
sconv->get_window_dilation_strides(),
sconv->get_padding_below(),
sconv->get_padding_above(),
sconv->get_data_dilation_strides(),
n->get_arguments().size());
return move(new_conv);
}
// Moved set_or_check_if_same and fuse_group_convolution to core pass batch_fusion
std::shared_ptr<Node> fuse_batch_mat_mul_transpose(const std::shared_ptr<Node>& n)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment