Unverified Commit e05a356e authored by Jayaram Bobba's avatar Jayaram Bobba Committed by GitHub

Merge branch 'master' into jmenon/mkldnn-compile

parents d8199edc 96cabff0
...@@ -76,6 +76,7 @@ set (SRC ...@@ -76,6 +76,7 @@ set (SRC
ops/sum.cpp ops/sum.cpp
ops/tan.cpp ops/tan.cpp
ops/tanh.cpp ops/tanh.cpp
ops/util/arithmetic_reduction.cpp
ops/util/binary_elementwise_arithmetic.cpp ops/util/binary_elementwise_arithmetic.cpp
ops/util/binary_elementwise_comparison.cpp ops/util/binary_elementwise_comparison.cpp
ops/util/binary_elementwise.cpp ops/util/binary_elementwise.cpp
......
...@@ -90,8 +90,10 @@ ...@@ -90,8 +90,10 @@
#include "ngraph/ops/less.hpp" #include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp" #include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp" #include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp" #include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp" #include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp" #include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp" #include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp" #include "ngraph/ops/negative.hpp"
...@@ -102,6 +104,7 @@ ...@@ -102,6 +104,7 @@
#include "ngraph/ops/pad.hpp" #include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp" #include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp" #include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Max-reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the maximum element.
///
/// This is equivalent to Reduce where `arg_init` = -inf and `reduction_function` is \f$f(x,y) = max(x,y)\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through max-reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by taking the maximum element. |
class Max : public util::ArithmeticReduction
{
public:
/// \brief Constructs a max-reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Max(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Max", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Max>(new_args.at(0), m_reduction_axes);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Min-reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the minimum element.
///
/// This is equivalent to Reduce where `arg_init` = -inf and `reduction_function` is \f$f(x,y) = min(x,y)\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through min-reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by taking the minimum element. |
class Min : public util::ArithmeticReduction
{
public:
/// \brief Constructs a min-reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Min(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Min", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Min>(new_args.at(0), m_reduction_axes);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Product reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
///
/// \f[
/// \mathit{product}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 * 3 * 5), (2 * 4 * 6) \right] =
/// \left[ 15, 48 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{product}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 * 2), (3 * 4), (5 * 6) \right] =
/// \left[ 2, 12, 30 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{product}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 * 2) * (3 * 4) * (5 * 6) =
/// 720~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// This is equivalent to Reduce where `arg_init` = 1 and `reduction_function` is \f$f(x,y) = x*y\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through product. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by product. |
class Product : public util::ArithmeticReduction
{
public:
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Product(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Product", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Product>(new_args.at(0), m_reduction_axes);
}
};
}
}
...@@ -15,46 +15,11 @@ ...@@ -15,46 +15,11 @@
*******************************************************************************/ *******************************************************************************/
#include "ngraph/ops/sum.hpp" #include "ngraph/ops/sum.hpp"
#include "ngraph/function.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
op::Sum::Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: RequiresTensorViewArgs("Sum", {arg})
, m_reduction_axes(reduction_axes)
{
auto& input = get_inputs().at(0);
auto& input_element_type = input.get_element_type();
if (input_element_type == element::boolean)
{
throw ngraph_error("Argument for sum must have numeric element type");
}
auto input_shape = input.get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= input_shape.size())
{
throw ngraph_error("Reduction axis for sum is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(input_shape.at(i));
}
}
set_value_type_checked(input.get_element_type(), result_shape);
}
void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta) void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
{ {
auto x = get_inputs().at(0).get_output().get_node(); auto x = get_inputs().at(0).get_output().get_node();
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#pragma once #pragma once
#include "ngraph/ops/util/requires_tensor_view_args.hpp" #include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -76,14 +76,17 @@ namespace ngraph ...@@ -76,14 +76,17 @@ namespace ngraph
/// | Type | Description | /// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | /// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
class Sum : public util::RequiresTensorViewArgs class Sum : public util::ArithmeticReduction
{ {
public: public:
/// \brief Constructs a summation operation. /// \brief Constructs a summation operation.
/// ///
/// \param arg The tensor view to be summed. /// \param arg The tensor view to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated. /// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes); Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Sum", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args( virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override const std::vector<std::shared_ptr<Node>>& new_args) const override
...@@ -95,13 +98,9 @@ namespace ngraph ...@@ -95,13 +98,9 @@ namespace ngraph
return std::make_shared<Sum>(new_args.at(0), m_reduction_axes); return std::make_shared<Sum>(new_args.at(0), m_reduction_axes);
} }
/// \return The axis positions (0-based) to be eliminated through summation.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override; const std::shared_ptr<Node>& delta) override;
AxisSet m_reduction_axes;
}; };
} }
} }
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "ngraph/ops/util/arithmetic_reduction.hpp"
using namespace std;
using namespace ngraph;
op::util::ArithmeticReduction::ArithmeticReduction(const std::string& node_type,
const std::shared_ptr<Node>& arg,
const AxisSet& reduction_axes)
: RequiresTensorViewArgs(node_type, {arg})
, m_reduction_axes(reduction_axes)
{
auto& input = get_inputs().at(0);
auto input_shape = input.get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= input_shape.size())
{
throw ngraph_error("Reduction axis for arithmetic reduction operator is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(input_shape.at(i));
}
}
set_value_type_checked(input.get_element_type(), result_shape);
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
namespace util
{
/// \brief Abstract base class for arithmetic reduction operations, i.e., operations where chosen axes of the input tensors
/// are eliminated (reduced out) by repeated application of a particular binary arithmetic operation.
class ArithmeticReduction : public RequiresTensorViewArgs
{
public:
/// \brief Constructs an arithmetic reduction operation.
///
/// \param arg Node that produces the first input tensor.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
ArithmeticReduction(const std::string& node_type,
const std::shared_ptr<Node>& arg,
const AxisSet& reduction_axes);
/// \return The axis positions (0-based) to be eliminated through reduction.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
AxisSet m_reduction_axes;
};
}
}
}
This diff is collapsed.
...@@ -61,8 +61,10 @@ ...@@ -61,8 +61,10 @@
#include "ngraph/ops/less.hpp" #include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp" #include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp" #include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp" #include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp" #include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp" #include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp" #include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp" #include "ngraph/ops/negative.hpp"
...@@ -73,6 +75,7 @@ ...@@ -73,6 +75,7 @@
#include "ngraph/ops/pad.hpp" #include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp" #include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp" #include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp" #include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
...@@ -223,6 +226,9 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -223,6 +226,9 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Pad), &runtime::cpu::CPU_Emitter::emit<op::Pad>}, {TI(ngraph::op::Pad), &runtime::cpu::CPU_Emitter::emit<op::Pad>},
{TI(ngraph::op::BatchNorm), &runtime::cpu::CPU_Emitter::emit<op::BatchNorm>}, {TI(ngraph::op::BatchNorm), &runtime::cpu::CPU_Emitter::emit<op::BatchNorm>},
{TI(ngraph::op::MaxPoolBackprop), &runtime::cpu::CPU_Emitter::emit<op::MaxPoolBackprop>}, {TI(ngraph::op::MaxPoolBackprop), &runtime::cpu::CPU_Emitter::emit<op::MaxPoolBackprop>},
{TI(ngraph::op::Product), &runtime::cpu::CPU_Emitter::emit<op::Product>},
{TI(ngraph::op::Max), &runtime::cpu::CPU_Emitter::emit<op::Max>},
{TI(ngraph::op::Min), &runtime::cpu::CPU_Emitter::emit<op::Min>},
}; };
runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction( runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction(
...@@ -286,10 +292,13 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -286,10 +292,13 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/concat.hpp" #include "ngraph/runtime/kernel/concat.hpp"
#include "ngraph/runtime/kernel/convolution.hpp" #include "ngraph/runtime/kernel/convolution.hpp"
#include "ngraph/runtime/kernel/dot.hpp" #include "ngraph/runtime/kernel/dot.hpp"
#include "ngraph/runtime/kernel/max.hpp"
#include "ngraph/runtime/kernel/max_pool.hpp" #include "ngraph/runtime/kernel/max_pool.hpp"
#include "ngraph/runtime/kernel/min.hpp"
#include "ngraph/runtime/kernel/not.hpp" #include "ngraph/runtime/kernel/not.hpp"
#include "ngraph/runtime/kernel/one_hot.hpp" #include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp" #include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/product.hpp"
#include "ngraph/runtime/kernel/reduce.hpp" #include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp" #include "ngraph/runtime/kernel/reduce_window.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
......
...@@ -29,9 +29,12 @@ ...@@ -29,9 +29,12 @@
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/dot.hpp" #include "ngraph/ops/dot.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp" #include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/one_hot.hpp" #include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp" #include "ngraph/ops/pad.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp" #include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp" #include "ngraph/ops/replace_slice.hpp"
...@@ -67,8 +70,10 @@ ...@@ -67,8 +70,10 @@
#include "ngraph/runtime/kernel/less.hpp" #include "ngraph/runtime/kernel/less.hpp"
#include "ngraph/runtime/kernel/less_eq.hpp" #include "ngraph/runtime/kernel/less_eq.hpp"
#include "ngraph/runtime/kernel/log.hpp" #include "ngraph/runtime/kernel/log.hpp"
#include "ngraph/runtime/kernel/max.hpp"
#include "ngraph/runtime/kernel/max_pool.hpp" #include "ngraph/runtime/kernel/max_pool.hpp"
#include "ngraph/runtime/kernel/maximum.hpp" #include "ngraph/runtime/kernel/maximum.hpp"
#include "ngraph/runtime/kernel/min.hpp"
#include "ngraph/runtime/kernel/minimum.hpp" #include "ngraph/runtime/kernel/minimum.hpp"
#include "ngraph/runtime/kernel/multiply.hpp" #include "ngraph/runtime/kernel/multiply.hpp"
#include "ngraph/runtime/kernel/negate.hpp" #include "ngraph/runtime/kernel/negate.hpp"
...@@ -77,6 +82,7 @@ ...@@ -77,6 +82,7 @@
#include "ngraph/runtime/kernel/one_hot.hpp" #include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp" #include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/power.hpp" #include "ngraph/runtime/kernel/power.hpp"
#include "ngraph/runtime/kernel/product.hpp"
#include "ngraph/runtime/kernel/reduce.hpp" #include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp" #include "ngraph/runtime/kernel/reduce_window.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp" #include "ngraph/runtime/kernel/replace_slice.hpp"
...@@ -489,6 +495,15 @@ private: ...@@ -489,6 +495,15 @@ private:
reinterpret_cast<T*>(out[0]->get_data_ptr()), reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count()); out[0]->get_element_count());
} }
else if (node_op == "Max")
{
const op::Max* max = static_cast<const op::Max*>(&node);
kernel::max<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
max->get_reduction_axes());
}
else if (node_op == "Maximum") else if (node_op == "Maximum")
{ {
kernel::maximum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()), kernel::maximum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
...@@ -524,6 +539,15 @@ private: ...@@ -524,6 +539,15 @@ private:
max_pool_backprop->get_padding_below(), max_pool_backprop->get_padding_below(),
max_pool_backprop->get_padding_above()); max_pool_backprop->get_padding_above());
} }
else if (node_op == "Min")
{
const op::Min* min = static_cast<const op::Min*>(&node);
kernel::min<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
min->get_reduction_axes());
}
else if (node_op == "Minimum") else if (node_op == "Minimum")
{ {
kernel::minimum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()), kernel::minimum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
...@@ -589,6 +613,15 @@ private: ...@@ -589,6 +613,15 @@ private:
reinterpret_cast<T*>(out[0]->get_data_ptr()), reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count()); out[0]->get_element_count());
} }
else if (node_op == "Product")
{
const op::Product* product = static_cast<const op::Product*>(&node);
kernel::product<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
product->get_reduction_axes());
}
else if (node_op == "Reduce") else if (node_op == "Reduce")
{ {
ngraph::op::Reduce* reduce = dynamic_cast<ngraph::op::Reduce*>(&node); ngraph::op::Reduce* reduce = dynamic_cast<ngraph::op::Reduce*>(&node);
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include <limits>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void max(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
T minval = std::numeric_limits<T>::has_infinity
? -std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::min();
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = minval;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
T x = arg[input_transform.index(input_coord)];
T max = out[output_transform.index(output_coord)];
if (x > max)
{
out[output_transform.index(output_coord)] = x;
}
}
}
}
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include <limits>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void min(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
T minval = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::max();
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = minval;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
T x = arg[input_transform.index(input_coord)];
T min = out[output_transform.index(output_coord)];
if (x < min)
{
out[output_transform.index(output_coord)] = x;
}
}
}
}
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void product(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
out[output_transform.index(output_coord)] *=
arg[input_transform.index(input_coord)];
}
}
}
}
}
...@@ -43,8 +43,10 @@ ...@@ -43,8 +43,10 @@
#include "ngraph/ops/less.hpp" #include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp" #include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp" #include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp" #include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp" #include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp" #include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp" #include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp" #include "ngraph/ops/negative.hpp"
...@@ -53,6 +55,7 @@ ...@@ -53,6 +55,7 @@
#include "ngraph/ops/one_hot.hpp" #include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp" #include "ngraph/ops/pad.hpp"
#include "ngraph/ops/power.hpp" #include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp" #include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp" #include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp" #include "ngraph/ops/remainder.hpp"
...@@ -552,6 +555,11 @@ static shared_ptr<ngraph::Function> ...@@ -552,6 +555,11 @@ static shared_ptr<ngraph::Function>
{ {
node = make_shared<op::Log>(args[0]); node = make_shared<op::Log>(args[0]);
} }
else if (node_op == "Max")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Max>(args[0], reduction_axes);
}
else if (node_op == "MaxPool") else if (node_op == "MaxPool")
{ {
auto window_shape = node_js.at("window_shape").get<vector<size_t>>(); auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
...@@ -601,6 +609,11 @@ static shared_ptr<ngraph::Function> ...@@ -601,6 +609,11 @@ static shared_ptr<ngraph::Function>
{ {
node = make_shared<op::Maximum>(args[0], args[1]); node = make_shared<op::Maximum>(args[0], args[1]);
} }
else if (node_op == "Min")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Min>(args[0], reduction_axes);
}
else if (node_op == "Minimum") else if (node_op == "Minimum")
{ {
node = make_shared<op::Minimum>(args[0], args[1]); node = make_shared<op::Minimum>(args[0], args[1]);
...@@ -647,6 +660,11 @@ static shared_ptr<ngraph::Function> ...@@ -647,6 +660,11 @@ static shared_ptr<ngraph::Function>
{ {
node = make_shared<op::Power>(args[0], args[1]); node = make_shared<op::Power>(args[0], args[1]);
} }
else if (node_op == "Product")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Product>(args[0], reduction_axes);
}
else if (node_op == "Reduce") else if (node_op == "Reduce")
{ {
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>(); auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
...@@ -951,6 +969,11 @@ static json write(const Node& n) ...@@ -951,6 +969,11 @@ static json write(const Node& n)
else if (node_op == "Log") else if (node_op == "Log")
{ {
} }
else if (node_op == "Max")
{
auto tmp = dynamic_cast<const op::Max*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "MaxPool") else if (node_op == "MaxPool")
{ {
auto tmp = dynamic_cast<const op::MaxPool*>(&n); auto tmp = dynamic_cast<const op::MaxPool*>(&n);
...@@ -970,6 +993,11 @@ static json write(const Node& n) ...@@ -970,6 +993,11 @@ static json write(const Node& n)
else if (node_op == "Maximum") else if (node_op == "Maximum")
{ {
} }
else if (node_op == "Min")
{
auto tmp = dynamic_cast<const op::Min*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "Minimum") else if (node_op == "Minimum")
{ {
} }
...@@ -1004,6 +1032,11 @@ static json write(const Node& n) ...@@ -1004,6 +1032,11 @@ static json write(const Node& n)
node["shape"] = tmp->get_shape(); node["shape"] = tmp->get_shape();
node["element_type"] = write_element_type(tmp->get_element_type()); node["element_type"] = write_element_type(tmp->get_element_type());
} }
else if (node_op == "Product")
{
auto tmp = dynamic_cast<const op::Product*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "Power") else if (node_op == "Power")
{ {
} }
......
This diff is collapsed.
...@@ -546,4 +546,4 @@ TEST(pattern, variance) ...@@ -546,4 +546,4 @@ TEST(pattern, variance)
auto var_graph = construct_variance_graph(); auto var_graph = construct_variance_graph();
ASSERT_TRUE(n.match(var_graph, variance)); ASSERT_TRUE(n.match(var_graph, variance));
ASSERT_EQ(n.get_pattern_map()[var_graph], variance); ASSERT_EQ(n.get_pattern_map()[var_graph], variance);
} }
\ No newline at end of file
...@@ -631,7 +631,7 @@ TEST(type_prop, reduce_nonscalar) ...@@ -631,7 +631,7 @@ TEST(type_prop, reduce_nonscalar)
{ {
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0}); auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
// Should have thrown, so fail if it didn't // Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator"; FAIL() << "Did not detect non-scalar initial value for reduce";
} }
catch (const ngraph_error& error) catch (const ngraph_error& error)
{ {
...@@ -656,7 +656,7 @@ TEST(type_prop, reduce_elem_type_mismatch) ...@@ -656,7 +656,7 @@ TEST(type_prop, reduce_elem_type_mismatch)
{ {
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0}); auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
// Should have thrown, so fail if it didn't // Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator"; FAIL() << "Did not detect element type mismatch for reduce";
} }
catch (const ngraph_error& error) catch (const ngraph_error& error)
{ {
...@@ -816,7 +816,7 @@ TEST(type_prop, reduce_axis_oob) ...@@ -816,7 +816,7 @@ TEST(type_prop, reduce_axis_oob)
{ {
auto r = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0, 2, 1}); auto r = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0, 2, 1});
// Should have thrown, so fail if it didn't // Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator"; FAIL() << "Did not detect out-of-bound axis for reduce";
} }
catch (const ngraph_error& error) catch (const ngraph_error& error)
{ {
...@@ -6048,3 +6048,45 @@ TEST(type_prop, pad_deduce_interior_padding_wrong_rank) ...@@ -6048,3 +6048,45 @@ TEST(type_prop, pad_deduce_interior_padding_wrong_rank)
FAIL() << "Deduced type check failed for unexpected reason"; FAIL() << "Deduced type check failed for unexpected reason";
} }
} }
TEST(type_prop, sum_deduce)
{
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
auto r0 = make_shared<op::Sum>(param_0, AxisSet{0});
ASSERT_EQ(r0->get_element_type(), element::f32);
ASSERT_EQ(r0->get_shape(), (Shape{4}));
auto r1 = make_shared<op::Sum>(param_0, AxisSet{1});
ASSERT_EQ(r1->get_element_type(), element::f32);
ASSERT_EQ(r1->get_shape(), (Shape{2}));
auto r01 = make_shared<op::Sum>(param_0, AxisSet{0, 1});
ASSERT_EQ(r01->get_element_type(), element::f32);
ASSERT_EQ(r01->get_shape(), (Shape{}));
auto r_none = make_shared<op::Sum>(param_0, AxisSet{});
ASSERT_EQ(r_none->get_element_type(), element::f32);
ASSERT_EQ(r_none->get_shape(), (Shape{2, 4}));
}
TEST(type_prop, sum_axis_oob)
{
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
try
{
auto r = make_shared<op::Sum>(param_0, AxisSet{0, 2, 1});
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect out-of-bound axis for sum";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(),
std::string("Reduction axis for arithmetic reduction operator is out of bounds"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment