Commit df4d896b authored by Pruthvi's avatar Pruthvi Committed by Scott Cyphers

Merge cumulative sum to r0.25.1 for Waymo (#3977)

* - Cherry-pick CumSum implementation [ https://github.com/NervanaSystems/ngraph/pull/3873/ ] to r0.25.1

* - Style fix
- fix unit test failures

* - update dummy function for CumSum in cpu_builder_registry header file

* - fix build failures
- remove explicit Op versioning

* Move export
parent 4f7d8fa9
......@@ -128,6 +128,8 @@ set (SRC
op/cos.hpp
op/cosh.cpp
op/cosh.hpp
op/cum_sum.cpp
op/cum_sum.hpp
op/crop_and_resize.cpp
op/crop_and_resize.hpp
op/dequantize.cpp
......
......@@ -82,6 +82,7 @@
#include "ngraph/op/cos.hpp"
#include "ngraph/op/cosh.hpp"
#include "ngraph/op/crop_and_resize.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
const string op::CumSum::type_name{"CumSum"};
op::CumSum::CumSum(const Output<Node>& arg,
const Output<Node>& axis,
const bool exclusive,
const bool reverse)
: Op({arg, axis})
, m_exclusive(exclusive)
, m_reverse(reverse)
{
NODE_VALIDATION_CHECK(this,
axis.get_element_type() == element::i32 ||
axis.get_element_type() == element::i64,
"axis element type must be either int64_t or int32_t but got (",
axis.get_element_type(),
").");
set_output_type(0, arg.get_element_type(), arg.get_shape());
}
shared_ptr<Node> op::CumSum::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<op::CumSum>(new_args.at(0), new_args.at(1), m_exclusive, m_reverse);
}
shared_ptr<Node> op::CumSum::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/axis_set.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Tensor cumulative sum operation.
///
/// Compute the cumulative sum of the input tensor along the axis specified.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- |
/// --------------------------------------------------------------------------------------------------|
/// | `exclusive` | If set to 1 will return exclusive sum in which the top
/// element
/// is not included. |
/// | | In other terms, if set to 1, the j-th output element
/// would be
/// the
/// sum of the first (j-1) elements.|
/// | | Otherwise, it would be the sum of the first j elements.
/// |
///
/// | | Description |
/// | -------------------- | -------------------------------------------------- |
/// | `reverse` | if set to 1, performs the sum in reverse direction |
///
/// ## Inputs
///
/// | | Description |
/// | ----- | ------------------------------------------------------ |
/// | `arg` | An input tensor of any shape and numeric element type. |
///
/// | | Description |
/// | ----- |
/// ------------------------------------------------------------------------------------------------|
/// | `axis`| zero dimension tensor specifying axis position along which cumulative sum
/// must
/// be performed. |
///
/// ## Output
///
/// | Description |
/// |
/// ------------------------------------------------------------------------------------|
/// | Output tensor of the same type as `arg` with cumulative sums of the arg's elements
/// |
class CumSum : public Op
{
public:
NGRAPH_API
static const std::string type_name;
const std::string& description() const override { return type_name; }
/// \brief Constructs a cumulative summation operation.
CumSum() = default;
/// \brief Constructs a cumulative summation operation.
///
/// \param arg The tensor to be summed.
/// \param axis zero dimension tensor specifying axis position along which
/// cumulative
/// sum must be performed
CumSum(const Output<Node>& arg,
const Output<Node>& axis,
const bool exclusive = false,
const bool reverse = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The default value for CumSum.
virtual std::shared_ptr<Node> get_default_value() const override;
bool is_exclusive() const { return m_exclusive; }
bool is_reverse() const { return m_reverse; }
private:
bool m_exclusive;
bool m_reverse;
};
}
}
......@@ -80,6 +80,7 @@ NGRAPH_OP(ConvolutionBackpropData, ngraph::op)
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op)
NGRAPH_OP(Cos, ngraph::op)
NGRAPH_OP(Cosh, ngraph::op)
NGRAPH_OP(CumSum, ngraph::op)
NGRAPH_OP(CropAndResize, ngraph::op)
NGRAPH_OP(Dequantize, ngraph::op)
NGRAPH_OP(Divide, ngraph::op)
......
......@@ -52,6 +52,7 @@ set(SRC
builder/convert.cpp
builder/convert_layout.cpp
builder/convolution.cpp
builder/cum_sum.cpp
builder/dot.cpp
builder/dropout.cpp
builder/embedding_lookup.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/cpu/kernel/cum_sum.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp"
using namespace std;
using namespace ngraph;
namespace ngraph
{
namespace runtime
{
namespace cpu
{
template <>
void Builder::BUILDER_DECL(ngraph::op::CumSum)
{
#define FUNCTOR_CUMSUM(T, M) \
do \
{ \
auto functor = [&, \
kernel, \
arg0_buffer_index, \
arg1_buffer_index, \
out0_buffer_index, \
tensor_shape, \
cumsum_op](CPURuntimeContext* ctx, CPUExecutionContext* /* ectx */) { \
runtime::cpu::kernel::reference_cumsum<T, M>(ctx->buffer_data[arg0_buffer_index], \
ctx->buffer_data[arg1_buffer_index], \
ctx->buffer_data[out0_buffer_index], \
tensor_shape, \
cumsum_op->is_exclusive(), \
cumsum_op->is_reverse()); \
}; \
functors.emplace_back(functor); \
} while (0)
(void)node;
auto cumsum_op = static_cast<const ngraph::op::CumSum*>(node);
auto tensor_shape = args[0].get_shape();
auto arg0_buffer_index = external_function->get_buffer_index(args[0].get_name());
auto arg1_buffer_index = external_function->get_buffer_index(args[1].get_name());
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
auto& functors = external_function->get_functors();
if (args[0].get_element_type() == element::f32 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(runtime::cpu::kernel::reference_cumsum<float, int32_t>)>
kernel;
FUNCTOR_CUMSUM(float, int32_t);
}
else if (args[0].get_element_type() == element::f32 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(runtime::cpu::kernel::reference_cumsum<float, int64_t>)>
kernel;
FUNCTOR_CUMSUM(float, int64_t);
}
else if (args[0].get_element_type() == element::f64 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(runtime::cpu::kernel::reference_cumsum<double, int32_t>)>
kernel;
FUNCTOR_CUMSUM(double, int32_t);
}
else if (args[0].get_element_type() == element::f64 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(runtime::cpu::kernel::reference_cumsum<double, int64_t>)>
kernel;
FUNCTOR_CUMSUM(double, int64_t);
}
else if (args[0].get_element_type() == element::i32 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<int32_t, int32_t>)>
kernel;
FUNCTOR_CUMSUM(int32_t, int32_t);
}
else if (args[0].get_element_type() == element::i32 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<int32_t, int64_t>)>
kernel;
FUNCTOR_CUMSUM(int32_t, int64_t);
}
else if (args[0].get_element_type() == element::i64 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<int64_t, int32_t>)>
kernel;
FUNCTOR_CUMSUM(int64_t, int32_t);
}
else if (args[0].get_element_type() == element::i64 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<int64_t, int64_t>)>
kernel;
FUNCTOR_CUMSUM(int64_t, int64_t);
}
else if (args[0].get_element_type() == element::u32 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<uint32_t, int32_t>)>
kernel;
FUNCTOR_CUMSUM(uint32_t, int32_t);
}
else if (args[0].get_element_type() == element::u32 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<uint32_t, int64_t>)>
kernel;
FUNCTOR_CUMSUM(uint32_t, int64_t);
}
else if (args[0].get_element_type() == element::u64 &&
args[1].get_element_type() == element::i32)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<uint64_t, int32_t>)>
kernel;
FUNCTOR_CUMSUM(uint64_t, int32_t);
}
else if (args[0].get_element_type() == element::u64 &&
args[1].get_element_type() == element::i64)
{
std::function<decltype(
runtime::cpu::kernel::reference_cumsum<uint64_t, int64_t>)>
kernel;
FUNCTOR_CUMSUM(uint64_t, int64_t);
}
}
REGISTER_OP_BUILDER(CumSum);
#ifdef NGRAPH_CPU_STATIC_LIB_ENABLE
void register_builders_cumsum_cpp() {}
#endif
}
}
}
......@@ -36,6 +36,7 @@
#include "ngraph/op/constant.hpp"
#include "ngraph/op/cos.hpp"
#include "ngraph/op/cosh.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/equal.hpp"
#include "ngraph/op/exp.hpp"
......
......@@ -37,6 +37,7 @@ namespace ngraph
register_builders_convert_cpp();
register_builders_convert_layout_cpp();
register_builders_convolution_cpp();
register_builders_cumsum_cpp();
register_builders_dot_cpp();
register_builders_dropout_cpp();
register_builders_embedding_lookup_cpp();
......
......@@ -36,6 +36,7 @@ namespace ngraph
void register_builders_convert_cpp();
void register_builders_convert_layout_cpp();
void register_builders_convolution_cpp();
void register_builders_cumsum_cpp();
void register_builders_dot_cpp();
void register_builders_dropout_cpp();
void register_builders_embedding_lookup_cpp();
......
......@@ -46,6 +46,7 @@
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/cos.hpp"
#include "ngraph/op/cosh.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
......@@ -4331,6 +4332,22 @@ namespace ngraph
}
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::CumSum)
{
const ngraph::op::CumSum* cumsum = static_cast<const ngraph::op::CumSum*>(node);
writer.block_begin();
writer << "reference::cumsum<" << args[0].get_element_type().c_type_string();
writer << ", " << args[1].get_element_type().c_type_string() << ">(";
writer << " " << args[0].get_name() << ",\n";
writer << " " << args[1].get_name() << ",\n";
writer << " " << out[0].get_name() << ",\n";
writer << " {" << join(args[0].get_shape()) << "},\n";
writer << " " << cumsum->is_exclusive() << ",\n";
writer << " " << cumsum->is_reverse() << ");\n";
writer.block_end();
}
#undef TI
} // namespace cpu
} // namespace runtime
......
......@@ -68,6 +68,7 @@
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/cos.hpp"
#include "ngraph/op/cosh.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
......@@ -358,6 +359,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::Sinh), &runtime::cpu::CPU_Emitter::emit<op::Sinh>},
{TI(ngraph::op::Cos), &runtime::cpu::CPU_Emitter::emit<op::Cos>},
{TI(ngraph::op::Cosh), &runtime::cpu::CPU_Emitter::emit<op::Cosh>},
{TI(ngraph::op::CumSum), &runtime::cpu::CPU_Emitter::emit<op::CumSum>},
{TI(ngraph::op::Tan), &runtime::cpu::CPU_Emitter::emit<op::Tan>},
{TI(ngraph::op::Tanh), &runtime::cpu::CPU_Emitter::emit<op::Tanh>},
{TI(ngraph::op::TopK), &runtime::cpu::CPU_Emitter::emit<op::TopK>},
......@@ -551,6 +553,7 @@ void runtime::cpu::CPU_ExternalFunction::compile(ngraph::pass::PassConfig& pass_
#include "ngraph/runtime/reference/broadcast.hpp"
#include "ngraph/runtime/reference/concat.hpp"
#include "ngraph/runtime/reference/convolution.hpp"
#include "ngraph/runtime/reference/cum_sum.hpp"
#include "ngraph/runtime/reference/dequantize.hpp"
#include "ngraph/runtime/reference/dot.hpp"
#include "ngraph/runtime/reference/embedding_lookup.hpp"
......
......@@ -272,6 +272,14 @@ namespace ngraph
const double value,
const std::vector<std::minstd_rand>& vmsr,
const bool use_seed);
template <typename InputElementType, typename AxisElementType>
void reference_cumsum(void* input_tensor,
void* axis_tensor,
void* out,
const Shape& tensor_shape,
const bool exclusive,
const bool reverse);
}
}
}
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/runtime/cpu/cpu_executor.hpp"
#include "ngraph/runtime/reference/cum_sum.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace kernel
{
template <typename InputElementType, typename AxisElementType>
void reference_cumsum(void* input_tensor,
void* axis_tensor,
void* out,
const Shape& tensor_shape,
const bool exclusive,
const bool reverse)
{
reference::cumsum<InputElementType, AxisElementType>(
static_cast<const InputElementType*>(input_tensor),
static_cast<const AxisElementType*>(axis_tensor),
static_cast<InputElementType*>(out),
tensor_shape,
exclusive,
reverse);
}
}
}
}
}
......@@ -35,6 +35,7 @@
#include "ngraph/op/concat.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
......@@ -99,6 +100,7 @@
#include "ngraph/runtime/reference/copy.hpp"
#include "ngraph/runtime/reference/cos.hpp"
#include "ngraph/runtime/reference/cosh.hpp"
#include "ngraph/runtime/reference/cum_sum.hpp"
#include "ngraph/runtime/reference/dequantize.hpp"
#include "ngraph/runtime/reference/divide.hpp"
#include "ngraph/runtime/reference/dot.hpp"
......@@ -1641,6 +1643,30 @@ private:
element_count);
break;
}
case OP_TYPEID::CumSum:
{
const op::CumSum* cumsum = static_cast<const op::CumSum*>(&node);
auto axis_et = node.get_input_element_type(1);
if (axis_et == element::i32)
{
reference::cumsum<T, int32_t>(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const int32_t>(),
out[0]->get_data_ptr<T>(),
node.get_input_shape(0),
cumsum->is_exclusive(),
cumsum->is_reverse());
}
else if (axis_et == element::i64)
{
reference::cumsum<T, int64_t>(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const int64_t>(),
out[0]->get_data_ptr<T>(),
node.get_input_shape(0),
cumsum->is_exclusive(),
cumsum->is_reverse());
}
break;
}
case OP_TYPEID::DynBroadcast:
case OP_TYPEID::Transpose:
case OP_TYPEID::DynPad:
......
......@@ -284,6 +284,12 @@ lstm_cell_activaction_functions
divide_python_rounding_int32
backwards_batchmatmul_tensor2_tensor2
# unsupported op: `CumSum`
cum_sum_default
cum_sum_2dim
cum_sum_3d
cum_sum_2dim_allmodes
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
model_quant_conv_linear
model_conv_integer_no_zero_point
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/type/bfloat16.hpp"
#include "ngraph/type/float16.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T, typename P>
void cumsum(const T* arg,
const P* axis_tensor,
T* out,
const Shape& tensor_shape,
const bool exclusive,
const bool reverse)
{
CoordinateTransform temp_transform(tensor_shape);
for (const Coordinate& output_coord : temp_transform)
{
out[temp_transform.index(output_coord)] = 0;
}
P axis = axis_tensor[0];
P rank = tensor_shape.size();
if (axis < -rank || axis > rank)
{
throw ngraph_error("axis must be in the range [-rank, rank]");
}
axis = axis < 0 ? rank + axis : axis;
auto get_key = [&, axis](const Coordinate& coord) -> Coordinate {
Coordinate result(coord.size(), 0);
result[axis] = coord[axis];
for (size_t i = 0; i < coord.size(); i++)
{
result[i] = coord[i] - result[i];
}
return result;
};
auto update_output_buffer =
[&](size_t input_index,
size_t output_index,
T& prev,
std::vector<std::pair<size_t, T>>& tensor_vec) -> void {
tensor_vec[input_index].second = prev + tensor_vec[input_index].second;
out[tensor_vec[output_index].first] = tensor_vec[input_index].second;
// update prev to hold the last result value to compute ruuning sum for
// subsequent iter
prev = out[tensor_vec[output_index].first];
};
auto cum_sum =
[&, exclusive, reverse](std::vector<std::pair<size_t, T>>& tensor_vec) {
if (!reverse)
{
T prev = 0;
for (size_t i = 0; i < tensor_vec.size(); i++)
{
if (exclusive && i == 0)
{
out[tensor_vec[i].first] = prev;
continue;
}
// we will compute running sum of j-1 elements if exlusive=1 or else
// for j elements if exclusive = 0
size_t arg_index = exclusive == 1 ? i - 1 : i;
update_output_buffer(arg_index, i, prev, tensor_vec);
}
}
else // reverse == true
{
T prev = 0;
for (size_t i = tensor_vec.size(); i-- > 0;)
{
if (exclusive && i == tensor_vec.size() - 1)
{
out[tensor_vec[i].first] = prev;
continue;
}
// we will compute running sum of j-1 elements if exlusive=1 or else
// for j elements if exclusive = 0
size_t arg_index = exclusive == 1 ? i + 1 : i;
update_output_buffer(arg_index, i, prev, tensor_vec);
}
}
};
// Map to collect tensor elements belonging to the same axis
std::map<Coordinate, std::vector<std::pair<size_t, T>>> map_cooord_to_val;
CoordinateTransform input_transform(tensor_shape);
for (const Coordinate& input_coord : input_transform)
{
// points to the current element in the input tensor
T current = arg[input_transform.index(input_coord)];
auto key = get_key(input_coord);
auto index = input_transform.index(input_coord);
if (map_cooord_to_val.find(key) != map_cooord_to_val.end())
{
map_cooord_to_val[key].push_back(std::make_pair(index, current));
}
else
{
map_cooord_to_val.insert({key, std::vector<std::pair<size_t, T>>()});
map_cooord_to_val[key].push_back(std::make_pair(index, current));
}
}
// iterate the map and perform cumulative sum over the give axis
for (auto& it : map_cooord_to_val)
{
cum_sum(it.second);
}
}
}
}
}
......@@ -46,6 +46,7 @@
#include "ngraph/op/cos.hpp"
#include "ngraph/op/cosh.hpp"
#include "ngraph/op/crop_and_resize.hpp"
#include "ngraph/op/cum_sum.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
......@@ -1118,6 +1119,13 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::Cosh>(args[0]);
break;
}
case OP_TYPEID::CumSum:
{
auto exclusive = node_js.at("exclusive");
auto reverse = node_js.at("reverse");
node = make_shared<op::CumSum>(args[0], args[1], exclusive, reverse);
break;
}
case OP_TYPEID::CropAndResize:
{
auto resize_method =
......@@ -2373,6 +2381,13 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::Cosh: { break;
}
case OP_TYPEID::CumSum:
{
auto tmp = static_cast<const op::CumSum*>(&n);
node["exclusive"] = tmp->is_exclusive();
node["reverse"] = tmp->is_reverse();
break;
}
case OP_TYPEID::CropAndResize:
{
auto tmp = static_cast<const op::CropAndResize*>(&n);
......
......@@ -233,6 +233,7 @@ set(MULTI_TEST_SRC
backend/convert.in.cpp
backend/convolution.in.cpp
backend/convolution_reference.in.cpp
backend/cum_sum.in.cpp
backend/dot.in.cpp
backend/dyn_broadcast.in.cpp
backend/dyn_replace_slice_reference.in.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
static std::mt19937_64 random_generator;
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default)
{
Shape shape{1, 4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axis = make_shared<op::Parameter>(element::i32, Shape{1});
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
copy_data(axis_tensor, vector<int32_t>{1});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, axis_tensor});
EXPECT_TRUE(test::all_close_f((vector<float>{1, 3, 6, 10}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim)
{
Shape shape{2, 4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axis = make_shared<op::Parameter>(element::i64, Shape{1});
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7});
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
copy_data(axis_tensor, vector<int64_t>{0});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, axis_tensor});
EXPECT_TRUE(
test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10}), read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_3d)
{
auto test_cumsum_3d = [](const int32_t axis_val) -> void {
Shape shape{3, 2, 4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axis = make_shared<op::Parameter>(element::i32, Shape{1});
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
copy_data(axis_tensor, vector<int32_t>{axis_val});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, axis_tensor});
if (axis_val == 0)
{
EXPECT_TRUE(
test::all_close_f((vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14,
16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 42, 45}),
read_vector<float>(result)));
}
else if (axis_val == 1)
{
EXPECT_TRUE(
test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10, 8, 9, 10, 11,
20, 22, 24, 26, 16, 17, 18, 19, 36, 38, 40, 42}),
read_vector<float>(result)));
}
else if (axis_val == 2)
{
EXPECT_TRUE(
test::all_close_f((vector<float>{0, 1, 3, 6, 4, 9, 15, 22, 8, 17, 27, 38,
12, 25, 39, 54, 16, 33, 51, 70, 20, 41, 63, 86}),
read_vector<float>(result)));
}
};
test_cumsum_3d(0);
test_cumsum_3d(1);
test_cumsum_3d(2);
}
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_allmodes)
{
auto test_cum_sum_allmodes = [](const int64_t axis_val, int exclusive, int reverse) {
Shape shape{2, 4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axis = make_shared<op::Parameter>(element::i64, Shape{1});
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis, exclusive, reverse),
ParameterVector{A, axis});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7});
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
copy_data(axis_tensor, vector<int64_t>{axis_val});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, axis_tensor});
if (axis_val == 1 && exclusive == 1 && reverse == 0)
{
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 1, 3, 0, 4, 9, 15}),
read_vector<float>(result)));
}
else if (axis_val == 1 && exclusive == 0 && reverse == 1)
{
EXPECT_TRUE(test::all_close_f((vector<float>{6, 6, 5, 3, 22, 18, 13, 7}),
read_vector<float>(result)));
}
else if (axis_val == 1 && exclusive == 1 && reverse == 1)
{
EXPECT_TRUE(test::all_close_f((vector<float>{6, 5, 3, 0, 18, 13, 7, 0}),
read_vector<float>(result)));
}
else if (axis_val == 0 && exclusive == 0 && reverse == 0)
{
EXPECT_TRUE(test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10}),
read_vector<float>(result)));
}
else if (axis_val == 0 && exclusive == 1 && reverse == 1)
{
EXPECT_TRUE(test::all_close_f((vector<float>{4, 5, 6, 7, 0, 0, 0, 0}),
read_vector<float>(result)));
}
else if (axis_val == 0 && exclusive == 0 && reverse == 1)
{
EXPECT_TRUE(test::all_close_f((vector<float>{4, 6, 8, 10, 4, 5, 6, 7}),
read_vector<float>(result)));
}
};
test_cum_sum_allmodes(1, 1, 0);
test_cum_sum_allmodes(-1, 0, 1);
test_cum_sum_allmodes(-1, 1, 1);
test_cum_sum_allmodes(0, 0, 0);
test_cum_sum_allmodes(0, 1, 1);
test_cum_sum_allmodes(0, 0, 1);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment