Unverified Commit 26740bf5 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge branch 'master' into examples

parents 621df65a 96cabff0
......@@ -76,6 +76,7 @@ set (SRC
ops/sum.cpp
ops/tan.cpp
ops/tanh.cpp
ops/util/arithmetic_reduction.cpp
ops/util/binary_elementwise_arithmetic.cpp
ops/util/binary_elementwise_comparison.cpp
ops/util/binary_elementwise.cpp
......
......@@ -90,8 +90,10 @@
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
......@@ -102,6 +104,7 @@
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp"
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Max-reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the maximum element.
///
/// This is equivalent to Reduce where `arg_init` = -inf and `reduction_function` is \f$f(x,y) = max(x,y)\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through max-reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by taking the maximum element. |
class Max : public util::ArithmeticReduction
{
public:
/// \brief Constructs a max-reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Max(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Max", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Max>(new_args.at(0), m_reduction_axes);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Min-reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the minimum element.
///
/// This is equivalent to Reduce where `arg_init` = -inf and `reduction_function` is \f$f(x,y) = min(x,y)\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through min-reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by taking the minimum element. |
class Min : public util::ArithmeticReduction
{
public:
/// \brief Constructs a min-reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Min(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Min", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Min>(new_args.at(0), m_reduction_axes);
}
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
/// \brief Product reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
///
/// \f[
/// \mathit{product}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 * 3 * 5), (2 * 4 * 6) \right] =
/// \left[ 15, 48 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{product}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 * 2), (3 * 4), (5 * 6) \right] =
/// \left[ 2, 12, 30 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{product}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 * 2) * (3 * 4) * (5 * 6) =
/// 720~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// This is equivalent to Reduce where `arg_init` = 1 and `reduction_function` is \f$f(x,y) = x*y\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through product. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by product. |
class Product : public util::ArithmeticReduction
{
public:
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor view to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Product(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Product", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Product>(new_args.at(0), m_reduction_axes);
}
};
}
}
......@@ -15,46 +15,11 @@
*******************************************************************************/
#include "ngraph/ops/sum.hpp"
#include "ngraph/function.hpp"
#include "ngraph/ops/broadcast.hpp"
using namespace std;
using namespace ngraph;
op::Sum::Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: RequiresTensorViewArgs("Sum", {arg})
, m_reduction_axes(reduction_axes)
{
auto& input = get_inputs().at(0);
auto& input_element_type = input.get_element_type();
if (input_element_type == element::boolean)
{
throw ngraph_error("Argument for sum must have numeric element type");
}
auto input_shape = input.get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= input_shape.size())
{
throw ngraph_error("Reduction axis for sum is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(input_shape.at(i));
}
}
set_value_type_checked(input.get_element_type(), result_shape);
}
void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
{
auto x = get_inputs().at(0).get_output().get_node();
......
......@@ -16,7 +16,7 @@
#pragma once
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
#include "ngraph/ops/util/arithmetic_reduction.hpp"
namespace ngraph
{
......@@ -76,14 +76,17 @@ namespace ngraph
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
class Sum : public util::RequiresTensorViewArgs
class Sum : public util::ArithmeticReduction
{
public:
/// \brief Constructs a summation operation.
///
/// \param arg The tensor view to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes);
Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction("Sum", arg, reduction_axes)
{
}
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
......@@ -95,13 +98,9 @@ namespace ngraph
return std::make_shared<Sum>(new_args.at(0), m_reduction_axes);
}
/// \return The axis positions (0-based) to be eliminated through summation.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
AxisSet m_reduction_axes;
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "ngraph/ops/util/arithmetic_reduction.hpp"
using namespace std;
using namespace ngraph;
op::util::ArithmeticReduction::ArithmeticReduction(const std::string& node_type,
const std::shared_ptr<Node>& arg,
const AxisSet& reduction_axes)
: RequiresTensorViewArgs(node_type, {arg})
, m_reduction_axes(reduction_axes)
{
auto& input = get_inputs().at(0);
auto input_shape = input.get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= input_shape.size())
{
throw ngraph_error("Reduction axis for arithmetic reduction operator is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(input_shape.at(i));
}
}
set_value_type_checked(input.get_element_type(), result_shape);
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
namespace util
{
/// \brief Abstract base class for arithmetic reduction operations, i.e., operations where chosen axes of the input tensors
/// are eliminated (reduced out) by repeated application of a particular binary arithmetic operation.
class ArithmeticReduction : public RequiresTensorViewArgs
{
public:
/// \brief Constructs an arithmetic reduction operation.
///
/// \param arg Node that produces the first input tensor.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
ArithmeticReduction(const std::string& node_type,
const std::shared_ptr<Node>& arg,
const AxisSet& reduction_axes);
/// \return The axis positions (0-based) to be eliminated through reduction.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
AxisSet m_reduction_axes;
};
}
}
}
This diff is collapsed.
......@@ -61,8 +61,10 @@
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
......@@ -73,6 +75,7 @@
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp"
......@@ -223,6 +226,9 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Pad), &runtime::cpu::CPU_Emitter::emit<op::Pad>},
{TI(ngraph::op::BatchNorm), &runtime::cpu::CPU_Emitter::emit<op::BatchNorm>},
{TI(ngraph::op::MaxPoolBackprop), &runtime::cpu::CPU_Emitter::emit<op::MaxPoolBackprop>},
{TI(ngraph::op::Product), &runtime::cpu::CPU_Emitter::emit<op::Product>},
{TI(ngraph::op::Max), &runtime::cpu::CPU_Emitter::emit<op::Max>},
{TI(ngraph::op::Min), &runtime::cpu::CPU_Emitter::emit<op::Min>},
};
runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction(
......@@ -283,10 +289,13 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/concat.hpp"
#include "ngraph/runtime/kernel/convolution.hpp"
#include "ngraph/runtime/kernel/dot.hpp"
#include "ngraph/runtime/kernel/max.hpp"
#include "ngraph/runtime/kernel/max_pool.hpp"
#include "ngraph/runtime/kernel/min.hpp"
#include "ngraph/runtime/kernel/not.hpp"
#include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/product.hpp"
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
......
......@@ -56,7 +56,16 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
throw ngraph_error("Error allocating CPU Tensor View memory");
}
buffer = static_cast<char*>(ptr);
// GCC major versions below 5 do not implement C++11 std::align
#if !defined(__GNUC__) || __GNUC__ >= 5
std::align(BufferAlignment, buffer_size, ptr, allocation_size);
#else
ptr = static_cast<char*>(ptr) + (BufferAlignment - 1);
ptr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) &
~(uintptr_t(BufferAlignment - 1)));
#endif
aligned_buffer = static_cast<char*>(ptr);
}
}
......
......@@ -40,11 +40,12 @@ namespace ngraph
const std::unordered_set<std::type_index> s_op_registry{
TI(ngraph::op::AvgPool),
TI(ngraph::op::AvgPoolBackprop),
TI(ngraph::op::BatchNorm),
TI(ngraph::op::Convolution),
TI(ngraph::op::ConvolutionBackpropData),
TI(ngraph::op::ConvolutionBackpropFilters),
TI(ngraph::op::MaxPool),
TI(ngraph::op::BatchNorm)};
TI(ngraph::op::MaxPoolBackprop)};
bool IsMKLDNNOp(ngraph::Node& op)
{
......
......@@ -29,9 +29,12 @@
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp"
......@@ -67,8 +70,10 @@
#include "ngraph/runtime/kernel/less.hpp"
#include "ngraph/runtime/kernel/less_eq.hpp"
#include "ngraph/runtime/kernel/log.hpp"
#include "ngraph/runtime/kernel/max.hpp"
#include "ngraph/runtime/kernel/max_pool.hpp"
#include "ngraph/runtime/kernel/maximum.hpp"
#include "ngraph/runtime/kernel/min.hpp"
#include "ngraph/runtime/kernel/minimum.hpp"
#include "ngraph/runtime/kernel/multiply.hpp"
#include "ngraph/runtime/kernel/negate.hpp"
......@@ -77,6 +82,7 @@
#include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/power.hpp"
#include "ngraph/runtime/kernel/product.hpp"
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
......@@ -489,6 +495,15 @@ private:
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Max")
{
const op::Max* max = static_cast<const op::Max*>(&node);
kernel::max<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
max->get_reduction_axes());
}
else if (node_op == "Maximum")
{
kernel::maximum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
......@@ -524,6 +539,15 @@ private:
max_pool_backprop->get_padding_below(),
max_pool_backprop->get_padding_above());
}
else if (node_op == "Min")
{
const op::Min* min = static_cast<const op::Min*>(&node);
kernel::min<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
min->get_reduction_axes());
}
else if (node_op == "Minimum")
{
kernel::minimum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
......@@ -589,6 +613,15 @@ private:
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Product")
{
const op::Product* product = static_cast<const op::Product*>(&node);
kernel::product<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
product->get_reduction_axes());
}
else if (node_op == "Reduce")
{
ngraph::op::Reduce* reduce = dynamic_cast<ngraph::op::Reduce*>(&node);
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include <limits>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void max(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
T minval = std::numeric_limits<T>::has_infinity
? -std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::min();
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = minval;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
T x = arg[input_transform.index(input_coord)];
T max = out[output_transform.index(output_coord)];
if (x > max)
{
out[output_transform.index(output_coord)] = x;
}
}
}
}
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include <limits>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void min(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
T minval = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::max();
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = minval;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
T x = arg[input_transform.index(input_coord)];
T min = out[output_transform.index(output_coord)];
if (x < min)
{
out[output_transform.index(output_coord)] = x;
}
}
}
}
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void product(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
CoordinateTransform output_transform(out_shape);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
out[output_transform.index(output_coord)] *=
arg[input_transform.index(input_coord)];
}
}
}
}
}
......@@ -43,8 +43,10 @@
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/max.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/min.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
......@@ -53,6 +55,7 @@
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/remainder.hpp"
......@@ -552,6 +555,11 @@ static shared_ptr<ngraph::Function>
{
node = make_shared<op::Log>(args[0]);
}
else if (node_op == "Max")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Max>(args[0], reduction_axes);
}
else if (node_op == "MaxPool")
{
auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
......@@ -601,6 +609,11 @@ static shared_ptr<ngraph::Function>
{
node = make_shared<op::Maximum>(args[0], args[1]);
}
else if (node_op == "Min")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Min>(args[0], reduction_axes);
}
else if (node_op == "Minimum")
{
node = make_shared<op::Minimum>(args[0], args[1]);
......@@ -647,6 +660,11 @@ static shared_ptr<ngraph::Function>
{
node = make_shared<op::Power>(args[0], args[1]);
}
else if (node_op == "Product")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
node = make_shared<op::Product>(args[0], reduction_axes);
}
else if (node_op == "Reduce")
{
auto reduction_axes = node_js.at("reduction_axes").get<set<size_t>>();
......@@ -951,6 +969,11 @@ static json write(const Node& n)
else if (node_op == "Log")
{
}
else if (node_op == "Max")
{
auto tmp = dynamic_cast<const op::Max*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "MaxPool")
{
auto tmp = dynamic_cast<const op::MaxPool*>(&n);
......@@ -970,6 +993,11 @@ static json write(const Node& n)
else if (node_op == "Maximum")
{
}
else if (node_op == "Min")
{
auto tmp = dynamic_cast<const op::Min*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "Minimum")
{
}
......@@ -1004,6 +1032,11 @@ static json write(const Node& n)
node["shape"] = tmp->get_shape();
node["element_type"] = write_element_type(tmp->get_element_type());
}
else if (node_op == "Product")
{
auto tmp = dynamic_cast<const op::Product*>(&n);
node["reduction_axes"] = tmp->get_reduction_axes();
}
else if (node_op == "Power")
{
}
......
......@@ -1405,3 +1405,88 @@ TEST(${BACKEND_NAME}, backwards_reverse_3d_02)
};
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
}
TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
Shape shape_a{4, 1, 4, 4}; //in NCHW
Shape maxpool_shape{4, 1, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape window_shape{2, 2};
auto window_movement_strides = Strides{1, 1};
auto maxpool = make_shared<op::MaxPool>(A, window_shape, window_movement_strides);
auto f = make_shared<Function>(maxpool, op::Parameters{A});
shared_ptr<runtime::TensorView> ep =
backend->make_primary_tensor_view(element::f32, maxpool_shape);
vector<float> dataEp(shape_size(maxpool_shape), 4);
shared_ptr<runtime::TensorView> input =
backend->make_primary_tensor_view(element::f32, shape_a);
shared_ptr<runtime::TensorView> output =
backend->make_primary_tensor_view(element::f32, shape_a);
vector<float> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27,
13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71,
17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68,
45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23};
vector<float> expected{//delta
0, 8, 0, 0, 0, 0, 0, 4, 0, 8, 16, 0, 0, 0, 0, 0, 0, 4, 0, 4, 8, 0,
0, 0, 0, 4, 4, 0, 4, 4, 0, 4, 0, 0, 8, 0, 4, 0, 0, 0, 8, 0, 16, 0,
0, 0, 0, 0, 0, 8, 0, 0, 4, 0, 4, 0, 4, 0, 16, 0, 0, 0, 0, 0};
copy_data(ep, dataEp);
copy_data(input, dataInput);
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
auto df = autodiff::backprop_function(f);
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(read_vector<float>(output) == expected);
}
TEST(${BACKEND_NAME}, backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
Shape shape_a{1, 2, 5, 5}; //in NCHW
Shape maxpool_shape{1, 2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape window_shape{3, 3};
auto window_movement_strides = Strides{2, 2};
auto maxpool = make_shared<op::MaxPool>(A, window_shape, window_movement_strides);
auto f = make_shared<Function>(maxpool, op::Parameters{A});
shared_ptr<runtime::TensorView> ep =
backend->make_primary_tensor_view(element::f32, maxpool_shape);
vector<float> dataEp(shape_size(maxpool_shape), 4);
shared_ptr<runtime::TensorView> input =
backend->make_primary_tensor_view(element::f32, shape_a);
shared_ptr<runtime::TensorView> output =
backend->make_primary_tensor_view(element::f32, shape_a);
vector<float> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
vector<float> expected{//delta
4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 4, 0, 4, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0};
copy_data(ep, dataEp);
copy_data(input, dataInput);
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
auto df = autodiff::backprop_function(f);
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(read_vector<float>(output) == expected);
}
\ No newline at end of file
......@@ -76,13 +76,15 @@ TEST(benchmark, mxnet_seq2seq_backward)
TEST(benchmark, mxnet_sockeye_seq2seq_forward)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_forward.json");
const string json_path =
file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_forward.json");
run_benchmark(json_path, "CPU", 10);
}
TEST(benchmark, mxnet_sockeye_seq2seq_backward)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_backward.json");
const string json_path =
file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_backward.json");
run_benchmark(json_path, "CPU", 10);
}
......
This diff is collapsed.
......@@ -546,4 +546,4 @@ TEST(pattern, variance)
auto var_graph = construct_variance_graph();
ASSERT_TRUE(n.match(var_graph, variance));
ASSERT_EQ(n.get_pattern_map()[var_graph], variance);
}
\ No newline at end of file
}
......@@ -631,7 +631,7 @@ TEST(type_prop, reduce_nonscalar)
{
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
FAIL() << "Did not detect non-scalar initial value for reduce";
}
catch (const ngraph_error& error)
{
......@@ -656,7 +656,7 @@ TEST(type_prop, reduce_elem_type_mismatch)
{
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
FAIL() << "Did not detect element type mismatch for reduce";
}
catch (const ngraph_error& error)
{
......@@ -816,7 +816,7 @@ TEST(type_prop, reduce_axis_oob)
{
auto r = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0, 2, 1});
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
FAIL() << "Did not detect out-of-bound axis for reduce";
}
catch (const ngraph_error& error)
{
......@@ -6048,3 +6048,45 @@ TEST(type_prop, pad_deduce_interior_padding_wrong_rank)
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, sum_deduce)
{
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
auto r0 = make_shared<op::Sum>(param_0, AxisSet{0});
ASSERT_EQ(r0->get_element_type(), element::f32);
ASSERT_EQ(r0->get_shape(), (Shape{4}));
auto r1 = make_shared<op::Sum>(param_0, AxisSet{1});
ASSERT_EQ(r1->get_element_type(), element::f32);
ASSERT_EQ(r1->get_shape(), (Shape{2}));
auto r01 = make_shared<op::Sum>(param_0, AxisSet{0, 1});
ASSERT_EQ(r01->get_element_type(), element::f32);
ASSERT_EQ(r01->get_shape(), (Shape{}));
auto r_none = make_shared<op::Sum>(param_0, AxisSet{});
ASSERT_EQ(r_none->get_element_type(), element::f32);
ASSERT_EQ(r_none->get_shape(), (Shape{2, 4}));
}
TEST(type_prop, sum_axis_oob)
{
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
try
{
auto r = make_shared<op::Sum>(param_0, AxisSet{0, 2, 1});
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect out-of-bound axis for sum";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(),
std::string("Reduction axis for arithmetic reduction operator is out of bounds"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment