Commit 9a54569b authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

add robust replacement for is_functionally_identical that relies on comparing…

add robust replacement for is_functionally_identical that relies on comparing emitting functions as string (#441)
parent 85c29ba7
......@@ -335,58 +335,3 @@ bool Node::has_same_type(std::shared_ptr<const Node> node) const
}
return true;
}
bool Node::is_functionally_identical(const Node& other) const
{
return false;
}
bool Node::test_identical(const Node& other) const
{
bool rc = true;
if (this->description() == other.description())
{
const deque<descriptor::Input>& i1 = this->get_inputs();
const deque<descriptor::Input>& i2 = other.get_inputs();
const deque<descriptor::Output>& o1 = this->get_outputs();
const deque<descriptor::Output>& o2 = other.get_outputs();
if (i1.size() == i2.size() && o1.size() == o2.size())
{
for (size_t i = 0; i < i1.size(); i++)
{
auto tvl1 = i1[i].get_output().get_tensor_view()->get_tensor_view_layout();
auto tvl2 = i2[i].get_output().get_tensor_view()->get_tensor_view_layout();
if (tvl1->get_shape() != tvl2->get_shape())
{
rc = false;
}
else if (*tvl1 != *tvl2)
{
rc = false;
}
}
for (size_t i = 0; i < o1.size(); i++)
{
auto tvl1 = o1[i].get_tensor_view()->get_tensor_view_layout();
auto tvl2 = o2[i].get_tensor_view()->get_tensor_view_layout();
if (tvl1->get_shape() != tvl2->get_shape())
{
rc = false;
}
else if (*tvl1 != *tvl2)
{
rc = false;
}
}
}
else
{
rc = false;
}
}
else
{
rc = false;
}
return rc;
}
......@@ -166,8 +166,6 @@ namespace ngraph
// True if this and node have one output with same element type and shape
bool has_same_type(std::shared_ptr<const Node> node) const;
virtual bool is_functionally_identical(const Node&) const;
protected:
void add_output(const element::Type& element_type, const Shape& shape);
void assert_argument_list_equivalency(const Nodes& b);
......
......@@ -23,8 +23,3 @@ void ngraph::op::Abs::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * std::make_shared<op::Sign>(x));
}
bool ngraph::op::Abs::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -53,7 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Abs>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Acos>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -23,8 +23,3 @@ void ngraph::op::Add::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta);
adjoints.add_delta(y, delta);
}
bool ngraph::op::Add::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -55,7 +55,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Add>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Asin>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Atan>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -199,29 +199,6 @@ op::AvgPool::AvgPool(const std::shared_ptr<Node>& arg, const Shape& window_shape
{
}
bool op::AvgPool::is_functionally_identical(const Node& other) const
{
// TODO: temporary workaround for MKLDNN issue
// remove 'return false' and uncomment below when fixed
return false;
/*
bool rc = true;
if (Node::is_functionally_identical(other))
{
const AvgPool& rhs = dynamic_cast<const AvgPool&>(other);
rc &= m_window_shape == rhs.m_window_shape;
rc &= m_window_movement_strides == rhs.m_window_movement_strides;
rc &= m_padding_below == rhs.m_padding_below;
rc &= m_padding_above == rhs.m_padding_above;
}
else
{
rc = false;
}
return rc;
*/
}
op::AvgPoolBprop::AvgPoolBprop(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& delta,
const Shape& window_shape,
......@@ -237,24 +214,6 @@ op::AvgPoolBprop::AvgPoolBprop(const std::shared_ptr<Node>& arg,
set_value_type_checked(get_input_element_type(0), arg->get_shape());
}
bool op::AvgPoolBprop::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::is_functionally_identical(other))
{
const AvgPoolBprop& rhs = dynamic_cast<const AvgPoolBprop&>(other);
rc &= m_window_shape == rhs.m_window_shape;
rc &= m_window_movement_strides == rhs.m_window_movement_strides;
rc &= m_padding_below == rhs.m_padding_below;
rc &= m_padding_above == rhs.m_padding_above;
}
else
{
rc = false;
}
return rc;
}
void op::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
......
......@@ -104,7 +104,6 @@ namespace ngraph
m_padding_below,
m_padding_above);
}
bool is_functionally_identical(const Node&) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......@@ -153,8 +152,6 @@ namespace ngraph
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Shape& get_padding_below() const { return m_padding_below; }
const Shape& get_padding_above() const { return m_padding_above; }
bool is_functionally_identical(const Node&) const override;
protected:
Shape m_window_shape;
Strides m_window_movement_strides;
......
......@@ -45,19 +45,3 @@ void op::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, make_shared<op::Sum>(delta, m_broadcast_axes));
}
bool op::Broadcast::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Broadcast& obj = dynamic_cast<const Broadcast&>(other);
rc &= m_shape == obj.m_shape;
rc &= m_broadcast_axes == obj.m_broadcast_axes;
}
else
{
rc = false;
}
return rc;
}
......@@ -73,8 +73,6 @@ namespace ngraph
/// \return A set containing the indices of the broadcast axes (0-based).
const AxisSet& get_broadcast_axes() const { return m_broadcast_axes; }
const Shape& get_broadcast_shape() const { return m_shape; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -51,10 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Ceiling>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -102,18 +102,3 @@ void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const std::shar
pos = next_pos;
}
}
bool op::Concat::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Concat& concat = dynamic_cast<const Concat&>(other);
rc &= m_concatenation_axis == concat.m_concatenation_axis;
}
else
{
rc = false;
}
return rc;
}
......@@ -74,8 +74,6 @@ namespace ngraph
/// \return The concatenation axis.
size_t get_concatenation_axis() const { return m_concatenation_axis; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -32,18 +32,3 @@ void ngraph::op::Convert::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, std::make_shared<op::Convert>(delta, x->get_element_type()));
}
bool op::Convert::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Convert& obj = dynamic_cast<const Convert&>(other);
rc &= m_element_type == obj.m_element_type;
}
else
{
rc = false;
}
return rc;
}
......@@ -61,8 +61,6 @@ namespace ngraph
}
const element::Type& get_convert_element_type() const { return m_element_type; }
bool is_functionally_identical(const Node&) const override;
protected:
const ngraph::element::Type m_element_type;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -369,30 +369,6 @@ std::shared_ptr<Node>
m_data_dilation_strides);
}
bool op::Convolution::is_functionally_identical(const Node& other) const
{
// TODO: temporary workaround for MKLDNN issue
// remove 'return false' and uncomment below when fixed
return false;
/*
bool rc = true;
if (Node::test_identical(other))
{
const Convolution& rhs = dynamic_cast<const Convolution&>(other);
rc &= m_window_movement_strides == rhs.m_window_movement_strides;
rc &= m_window_dilation_strides == rhs.m_window_dilation_strides;
rc &= m_padding_below == rhs.m_padding_below;
rc &= m_padding_above == rhs.m_padding_above;
rc &= m_data_dilation_strides == rhs.m_data_dilation_strides;
}
else
{
rc = false;
}
return rc;
*/
}
void op::Convolution::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
......@@ -523,28 +499,6 @@ std::shared_ptr<Node> op::ConvolutionBackpropData::copy_with_new_args(
m_data_dilation_strides_forward);
}
bool op::ConvolutionBackpropData::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const ConvolutionBackpropData& rhs = dynamic_cast<const ConvolutionBackpropData&>(other);
rc &= m_data_batch_shape == rhs.m_data_batch_shape;
rc &= m_window_movement_strides_forward == rhs.m_window_movement_strides_forward;
rc &= m_window_dilation_strides_forward == rhs.m_window_dilation_strides_forward;
rc &= m_padding_below_forward == rhs.m_padding_below_forward;
rc &= m_padding_above_forward == rhs.m_padding_above_forward;
rc &= m_data_dilation_strides_forward == rhs.m_data_dilation_strides_forward;
// The _backward fields do not need to be tested here since they are derived from the
// _forward ones.
}
else
{
rc = false;
}
return rc;
}
op::ConvolutionBackpropFilters::ConvolutionBackpropFilters(
const std::shared_ptr<Node>& data_batch,
const Shape& filters_shape,
......@@ -642,26 +596,3 @@ std::shared_ptr<Node> op::ConvolutionBackpropFilters::copy_with_new_args(
m_padding_above_forward,
m_data_dilation_strides_forward);
}
bool op::ConvolutionBackpropFilters::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const ConvolutionBackpropFilters& rhs =
dynamic_cast<const ConvolutionBackpropFilters&>(other);
rc &= m_filters_shape == rhs.m_filters_shape;
rc &= m_window_movement_strides_forward == rhs.m_window_movement_strides_forward;
rc &= m_window_dilation_strides_forward == rhs.m_window_dilation_strides_forward;
rc &= m_padding_below_forward == rhs.m_padding_below_forward;
rc &= m_padding_above_forward == rhs.m_padding_above_forward;
rc &= m_data_dilation_strides_forward == rhs.m_data_dilation_strides_forward;
// The _backward fields do not need to be tested here since they are derived from the
// _forward ones.
}
else
{
rc = false;
}
return rc;
}
......@@ -116,7 +116,6 @@ namespace ngraph
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override;
bool is_functionally_identical(const Node&) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......@@ -167,7 +166,6 @@ namespace ngraph
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override;
bool is_functionally_identical(const Node&) const override;
/// \return The data batch shape.
const Shape& get_data_batch_shape() const { return m_data_batch_shape; }
......@@ -263,7 +261,6 @@ namespace ngraph
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override;
bool is_functionally_identical(const Node&) const override;
/// \return The filters tensor shape.
const Shape& get_filters_shape() const { return m_filters_shape; }
......
......@@ -24,8 +24,3 @@ void ngraph::op::Cos::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, -delta * (std::make_shared<op::Sin>(x)));
}
bool ngraph::op::Cos::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Cos>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -23,8 +23,3 @@ void ngraph::op::Cosh::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * (std::make_shared<op::Sinh>(x)));
}
bool ngraph::op::Cosh::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Cosh>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -25,8 +25,3 @@ void ngraph::op::Divide::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * shared_from_this() / x);
adjoints.add_delta(y, -delta * shared_from_this() / y);
}
bool ngraph::op::Divide::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -56,7 +56,6 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
bool is_functionally_identical(const Node&) const override;
};
}
inline std::shared_ptr<ngraph::Node> operator/(const std::shared_ptr<ngraph::Node> arg0,
......
......@@ -141,18 +141,3 @@ void op::Dot::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_
auto x_reshaped_dot_delta = make_shared<Dot>(x_reshaped, delta, I_shape.size()); // JK
adjoints.add_delta(y, x_reshaped_dot_delta);
}
bool op::Dot::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Dot& rhs = dynamic_cast<const Dot&>(other);
rc &= m_reduction_axes_count == rhs.m_reduction_axes_count;
}
else
{
rc = false;
}
return rc;
}
......@@ -85,7 +85,6 @@ namespace ngraph
return std::make_shared<Dot>(
new_args.at(0), new_args.at(1), m_reduction_axes_count);
}
bool is_functionally_identical(const Node&) const override;
protected:
size_t m_reduction_axes_count;
......
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Equal>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -22,8 +22,3 @@ void ngraph::op::Exp::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * shared_from_this());
}
bool ngraph::op::Exp::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -54,7 +54,6 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
bool is_functionally_identical(const Node&) const override;
};
}
}
......@@ -51,10 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Floor>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Greater>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<GreaterEq>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Less>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<LessEq>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -22,8 +22,3 @@ void ngraph::op::Log::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta / x);
}
bool ngraph::op::Log::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -54,7 +54,6 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
bool is_functionally_identical(const Node&) const override;
};
}
}
......@@ -157,27 +157,6 @@ op::MaxPool::MaxPool(const std::shared_ptr<Node>& arg, const Shape& window_shape
{
}
bool op::MaxPool::is_functionally_identical(const Node& other) const
{
// TODO: temporary workaround for MKLDNN issue
// remove 'return false' and uncomment below when fixed
return false;
/*
bool rc = true;
if (Node::test_identical(other))
{
const MaxPool& rhs = dynamic_cast<const MaxPool&>(other);
rc &= m_window_shape == rhs.m_window_shape;
rc &= m_window_movement_strides == rhs.m_window_movement_strides;
}
else
{
rc = false;
}
return rc;
*/
}
void op::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
......
......@@ -63,7 +63,6 @@ namespace ngraph
return std::make_shared<MaxPool>(
new_args.at(0), m_window_shape, m_window_movement_strides);
}
bool is_functionally_identical(const Node&) const override;
/// \return The window shape.
const Shape& get_window_shape() const { return m_window_shape; }
......
......@@ -33,8 +33,3 @@ void ngraph::op::Maximum::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(
y, delta * make_shared<op::Convert>(make_shared<op::Greater>(y, x), y->get_element_type()));
}
bool ngraph::op::Maximum::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -53,7 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Maximum>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -34,8 +34,3 @@ void ngraph::op::Minimum::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(
y, delta * make_shared<op::Convert>(make_shared<op::Less>(y, x), y->get_element_type()));
}
bool ngraph::op::Minimum::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -53,7 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Minimum>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -23,8 +23,3 @@ void ngraph::op::Multiply::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * y);
adjoints.add_delta(y, x * delta);
}
bool ngraph::op::Multiply::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -53,7 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Multiply>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -21,8 +21,3 @@ void ngraph::op::Negative::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, -delta);
}
bool ngraph::op::Negative::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -54,7 +54,6 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
bool is_functionally_identical(const Node&) const override;
};
}
inline std::shared_ptr<ngraph::Node> operator-(const std::shared_ptr<ngraph::Node> arg0)
......
......@@ -22,8 +22,3 @@ op::Not::Not(const shared_ptr<Node>& arg)
: op::UnaryElementwise("Not", arg->get_element_type(), arg)
{
}
bool ngraph::op::Not::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -48,7 +48,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Not>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
};
}
}
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<NotEqual>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -41,19 +41,3 @@ op::OneHot::OneHot(const std::shared_ptr<Node>& arg, const Shape& shape, size_t
set_value_type_checked(make_shared<TensorViewType>(input_element_type, shape));
}
bool op::OneHot::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const OneHot& rhs = dynamic_cast<const OneHot&>(other);
rc &= m_shape == rhs.m_shape;
rc &= m_one_hot_axis == rhs.m_one_hot_axis;
}
else
{
rc = false;
}
return rc;
}
......@@ -60,8 +60,6 @@ namespace ngraph
/// \return The index of the one-hot axis.
size_t get_one_hot_axis() const { return m_one_hot_axis; }
bool is_functionally_identical(const Node&) const override;
protected:
Shape m_shape;
size_t m_one_hot_axis;
......
......@@ -79,23 +79,6 @@ std::shared_ptr<Node>
new_args.at(0), new_args.at(1), m_padding_below, m_padding_above, m_padding_interior);
}
bool op::Pad::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::is_functionally_identical(other))
{
const Pad& rhs = dynamic_cast<const Pad&>(other);
rc &= m_padding_below == rhs.m_padding_below;
rc &= m_padding_above == rhs.m_padding_above;
rc &= m_padding_interior == rhs.m_padding_interior;
}
else
{
rc = false;
}
return rc;
}
/* The "y" half of this is going to be a bit tricky... best way to handle it, I think,
is to ReplaceSlice the non-padded values in the incoming delta tensor with a zero
broadcasted to x's shape; then sum that and backprop the result to y.
......
......@@ -78,8 +78,6 @@ namespace ngraph
const Shape& get_padding_above() const { return m_padding_above; }
/// \return The interior padding sizes.
const Shape& get_padding_interior() const { return m_padding_interior; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -28,8 +28,3 @@ void ngraph::op::Power::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * y * shared_from_this() / x);
adjoints.add_delta(y, delta * shared_from_this() * log_x);
}
bool ngraph::op::Power::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -53,7 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Power>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -127,8 +127,3 @@ op::ReduceWindow::ReduceWindow(const std::shared_ptr<Node>& arg_reductee,
set_value_type_checked(input_reductee.get_element_type(), result_shape);
}
bool op::ReduceWindow::is_functionally_identical(const Node& other) const
{
return false;
}
......@@ -85,8 +85,6 @@ namespace ngraph
const Shape& get_window_shape() const { return m_window_shape; }
/// \return The window movement strides.
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
bool is_functionally_identical(const Node&) const override;
protected:
std::shared_ptr<Function> m_reduction_function;
Shape m_window_shape;
......
......@@ -55,10 +55,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Remainder>(new_args.at(0), new_args.at(1));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -132,20 +132,3 @@ void op::ReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(
y, std::make_shared<op::Slice>(delta, m_lower_bounds, m_upper_bounds, m_strides));
}
bool op::ReplaceSlice::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const ReplaceSlice& slice = dynamic_cast<const ReplaceSlice&>(other);
rc &= m_lower_bounds == slice.m_lower_bounds;
rc &= m_upper_bounds == slice.m_upper_bounds;
rc &= m_strides == slice.m_strides;
}
else
{
rc = false;
}
return rc;
}
......@@ -87,8 +87,6 @@ namespace ngraph
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -99,19 +99,3 @@ void op::Reshape::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(get_input_op(0), reshape);
}
bool op::Reshape::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Reshape& reshape = dynamic_cast<const Reshape&>(other);
rc &= m_input_order == reshape.m_input_order;
rc &= m_output_shape == reshape.m_output_shape;
}
else
{
rc = false;
}
return rc;
}
......@@ -79,8 +79,6 @@ namespace ngraph
const AxisVector& get_input_order() const { return m_input_order; }
/// \return The shape of the output tensor.
const Shape& get_output_shape() const { return m_output_shape; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -50,18 +50,3 @@ void op::Reverse::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, make_shared<op::Reverse>(delta, m_reversed_axes));
}
bool op::Reverse::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Reverse& obj = dynamic_cast<const Reverse&>(other);
rc &= m_reversed_axes == obj.m_reversed_axes;
}
else
{
rc = false;
}
return rc;
}
......@@ -60,8 +60,6 @@ namespace ngraph
/// \return The set of axes to reverse.
const AxisSet& get_reversed_axes() const { return m_reversed_axes; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -63,8 +63,3 @@ void ngraph::op::Select::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * p_as_x_type);
adjoints.add_delta(y, delta * not_p_as_y_type);
}
bool ngraph::op::Select::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -54,7 +54,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Select>(new_args.at(0), new_args.at(1), new_args.at(2));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -53,10 +53,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Sign>(new_args.at(0));
}
bool is_functionally_identical(const Node& other) const override
{
return test_identical(other);
}
};
}
}
......@@ -23,8 +23,3 @@ void ngraph::op::Sin::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * (std::make_shared<op::Cos>(x)));
}
bool ngraph::op::Sin::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Sin>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -23,8 +23,3 @@ void ngraph::op::Sinh::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta * (std::make_shared<op::Cosh>(x)));
}
bool ngraph::op::Sinh::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Sinh>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -97,20 +97,3 @@ void op::Slice::generate_adjoints(autodiff::Adjoints& adjoints, const std::share
adjoints.add_delta_to_slice(x, delta, m_lower_bounds, m_upper_bounds, m_strides);
}
bool op::Slice::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Slice& slice = dynamic_cast<const Slice&>(other);
rc &= m_lower_bounds == slice.m_lower_bounds;
rc &= m_upper_bounds == slice.m_upper_bounds;
rc &= m_strides == slice.m_strides;
}
else
{
rc = false;
}
return rc;
}
......@@ -85,8 +85,6 @@ namespace ngraph
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -23,8 +23,3 @@ void ngraph::op::Sqrt::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta / (shared_from_this() + shared_from_this()));
}
bool ngraph::op::Sqrt::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Sqrt>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -24,8 +24,3 @@ void ngraph::op::Subtract::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta);
adjoints.add_delta(y, -delta);
}
bool ngraph::op::Subtract::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -56,7 +56,6 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
bool is_functionally_identical(const Node&) const override;
};
}
inline std::shared_ptr<ngraph::Node> operator-(const std::shared_ptr<ngraph::Node> arg0,
......
......@@ -60,18 +60,3 @@ void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_
adjoints.add_delta(x, make_shared<op::Broadcast>(delta, x_shape, m_reduction_axes));
}
bool op::Sum::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::test_identical(other))
{
const Sum& slice = dynamic_cast<const Sum&>(other);
rc &= m_reduction_axes == slice.m_reduction_axes;
}
else
{
rc = false;
}
return rc;
}
......@@ -93,8 +93,6 @@ namespace ngraph
/// \return The axis positions (0-based) to be eliminated through summation.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......
......@@ -26,8 +26,3 @@ void ngraph::op::Tan::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta / (c * c));
}
bool ngraph::op::Tan::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Tan>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -26,8 +26,3 @@ void ngraph::op::Tanh::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x, delta / (c * c));
}
bool ngraph::op::Tanh::is_functionally_identical(const Node& other) const
{
return test_identical(other);
}
......@@ -51,7 +51,6 @@ namespace ngraph
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Tanh>(new_args.at(0));
}
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
......
......@@ -384,6 +384,17 @@ using namespace ngraph::runtime;
continue;
}
vector<shared_ptr<Node>> op_list{tmp.begin(), tmp.end()};
unordered_map<const Node*, string> node_cache;
for (size_t i = 0; i < op_list.size(); i++)
{
if (op_list[i]->is_constant() || op_list[i]->is_parameter())
{
continue;
}
Node& node = *op_list[i];
string s = emit_op_as_function(node, "f");
node_cache.insert({&node, s});
}
for (size_t i = 0; i < op_list.size() - 1; i++)
{
if (op_list[i]->is_constant() || op_list[i]->is_parameter())
......@@ -397,64 +408,21 @@ using namespace ngraph::runtime;
string match_function_name;
for (size_t j = i + 1; j < op_list.size(); j++)
{
if (op_list[i]->is_functionally_identical(*op_list[j]))
Node* op1 = op_list[i].get();
Node* op2 = op_list[j].get();
if (is_functionally_identical(*op1, *op2, node_cache))
{
if (match_function_name.empty())
{
match_function_name = "func_" + op_list[i]->get_name();
match_functions.insert({op_list[i].get(), match_function_name});
match_function_name = "func_" + op1->get_name();
match_functions.insert({op1, match_function_name});
}
match_functions.insert({op_list[j].get(), match_function_name});
match_functions.insert({op2, match_function_name});
}
}
if (!match_function_name.empty())
{
writer << "static void " << match_function_name << "(";
writer.indent++;
// Work around a compiler warning (*node inside typeid may have effects
// with shared pointers, which is fine here but clang doesn't like it.)
auto& n = *op_list[i];
auto handler = dispatcher.find(type_index(typeid(n)));
vector<TensorViewWrapper> in;
size_t arg_index = 0;
set<string> arg_names;
for (const descriptor::Input& input : n.get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name()))
{
arg_names.insert(tvw.get_name());
if (arg_index++ > 0)
{
writer << ",";
}
writer << "\n";
writer << tvw.get_type() << "* " << tvw.get_name();
}
in.push_back(tvw);
}
vector<TensorViewWrapper> out;
for (const descriptor::Output& output : n.get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0)
{
writer << ",";
}
writer << "\n";
writer << tvw.get_type() << "* " << tvw.get_name();
out.push_back(tvw);
}
writer.indent--;
writer << "\n)\n";
writer << "{\n";
writer.indent++;
handler->second(writer, &n, in, out);
writer.indent--;
writer << "}\n";
writer << emit_op_as_function(*op_list[i], match_function_name);
}
}
}
......@@ -821,3 +789,107 @@ void runtime::cpu::CPU_ExternalFunction::emit_debug_function_exit(
{
writer << "timer_" << node->get_name() << ".stop();\n";
}
bool runtime::cpu::CPU_ExternalFunction::is_functionally_identical(
const Node& n1, const Node& n2, const unordered_map<const Node*, string>& node_cache)
{
return node_cache.at(&n1) == node_cache.at(&n2);
}
string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
const string& function_name)
{
codegen::CodeWriter writer;
writer << "static void " << function_name << "(";
writer.indent++;
// Work around a compiler warning (*node inside typeid may have effects
// with shared pointers, which is fine here but clang doesn't like it.)
auto handler = dispatcher.find(type_index(typeid(node)));
vector<TensorViewWrapper> in;
size_t arg_index = 0;
set<string> arg_names;
for (const descriptor::Input& input : node.get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name()))
{
arg_names.insert(tvw.get_name());
if (arg_index++ > 0)
{
writer << ",";
}
writer << "\n";
writer << tvw.get_type() << "* " << tvw.get_name();
}
in.push_back(tvw);
}
vector<TensorViewWrapper> out;
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0)
{
writer << ",";
}
writer << "\n";
writer << tvw.get_type() << "* " << tvw.get_name();
out.push_back(tvw);
}
writer.indent--;
writer << "\n)\n";
writer << "{\n";
writer.indent++;
handler->second(writer, &node, in, out);
writer.indent--;
writer << "}\n";
string rc = writer.get_code();
if (function_name == "f")
{
rc = strip_comments(rc);
}
return rc;
}
string runtime::cpu::CPU_ExternalFunction::strip_comments(const string& s)
{
stringstream out;
for (size_t i = 0; i < s.size(); i++)
{
if (i < s.size() - 2)
{
if (s[i] == '/' && s[i + 1] == '/')
{
// line comment
i += 2;
while (s[i] != '\n')
{
i++;
}
out << '\n';
}
else if (s[i] == '/' && s[i + 1] == '*')
{
// multi-line comment
i += 2;
while (!(s[i] == '*' && s[i + 1] == '/'))
{
i++;
}
i++;
}
else
{
out << s[i];
}
}
else
{
out << s[i];
}
}
return out.str();
}
......@@ -74,6 +74,13 @@ namespace ngraph
const Node&,
const std::unordered_map<descriptor::TensorView*, std::vector<size_t>>&);
bool is_functionally_identical(
const Node&,
const Node&,
const std::unordered_map<const Node*, std::string>& node_cache);
std::string emit_op_as_function(const Node&, const std::string& function_name);
std::string strip_comments(const std::string&);
std::unique_ptr<codegen::Compiler> m_compiler;
std::unique_ptr<codegen::ExecutionEngine> m_execution_engine;
bool m_emit_timing;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment