Commit 7b305e3e authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Finish de-Eigenization (#282)

* Simpler kernel for broadcast

* Fixed behavior for integer divide-by-zero, added unit tests

* Strided and higher-dimensional slice (just tested to 3D)

* Higher-dimensional sum

* Replace-slice de-Eigenized; NOT TESTED AT HIGHER DIMENSIONS YET

* Correct sum behavior when eliminating zero-length axes; add unit tests; also, add higher-dim unit tests for replace-slice

* Higher-dimensional reduce, 'cause hey, why not?

* Remove BroadcastScalarInstruction

* Adding test for an observed failure at trivial sum on 5-tensors

* De-Eigenized and higher-dimmified concat

* Replace 'auto' in the kernels

* temporary delete to ease merge

* Re-insert tests that were deleted to ease merge

* Refactor view-iteration

* De-Eigenize reshape

* Rework divide kernel to use std::enable_if to distinguish between floating and non-floating types

* Update docs to reflect newly implemented cases in several ops

* Rename parameters to View for more clarity; remove axis_walk_order (it's redundant)

* Formatting

* More terminological rejiggering

* De-Eigenize scalar-tensor product

* De-Eigenize dot

* Update docstrings

* Remove 'implementation status' tables from docstrings

* Change step -> strides everywhere for consistent terminology

* Formatting

* Replace asserts in view.cpp with exceptions

* Fix typo

* Fix incorrect result type in dot1d test (ouch...)

* Add missing support for Float64 to ngvm/external_function

* Add int16 and uint16 (how was this missing?)

* A few more additions relative to the missing element types

* Disable tests that will not pass on CPU; they can still be run with test/unit-test --gtest_also_run_disabled_tests --gtest_filter='DISABLED_NGVM.*'

* Move project_ and inject_ functions to common.[ch]pp, not view.[ch]pp

* Rename View to CoordinateTransform

* Add prefix ++ and += to CoordinateIterator
parent 84ec0dad
......@@ -16,7 +16,8 @@ set (SRC
builder/autobroadcast.cpp
builder/numpy_transpose.cpp
builder/reduce_ops.cpp
coordinate_iterator.cpp
common.cpp
coordinate_transform.cpp
descriptor/input.cpp
descriptor/layout/dense_tensor_view_layout.cpp
descriptor/layout/tensor_view_layout.cpp
......
......@@ -139,7 +139,7 @@ void autodiff::Adjoints::add_delta_to_slice(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step)
const Strides& strides)
{
auto x_tensor_view_type = std::dynamic_pointer_cast<const TensorViewType>(x->get_value_type());
auto delta_tensor_view_type =
......@@ -153,18 +153,18 @@ void autodiff::Adjoints::add_delta_to_slice(const std::shared_ptr<Node>& x,
if (m_adjoint_map.end() == adjoint_it)
{
auto zeros = make_zero(x->get_outputs().at(0).get_tensor_view_type());
m_adjoint_map.insert(
{x.get(),
std::make_shared<op::ReplaceSlice>(zeros, delta, lower_bounds, upper_bounds, step)});
m_adjoint_map.insert({x.get(),
std::make_shared<op::ReplaceSlice>(
zeros, delta, lower_bounds, upper_bounds, strides)});
}
else
{
adjoint_it->second = std::make_shared<op::ReplaceSlice>(
adjoint_it->second,
std::make_shared<op::Slice>(adjoint_it->second, lower_bounds, upper_bounds, step) +
std::make_shared<op::Slice>(adjoint_it->second, lower_bounds, upper_bounds, strides) +
delta,
lower_bounds,
upper_bounds,
step);
strides);
}
}
......@@ -62,12 +62,12 @@ namespace ngraph
/// @param delta A backprop contribution
/// @param lower_bounds Lower bounds of slice to add to
/// @param upper_bounds Upper bounds of slice to add to
/// @param step Step (or stride) of slice to add to
/// @param strides Strides of slice to add to
void add_delta_to_slice(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& delta,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step);
const Strides& strides);
protected:
std::unordered_map<Node*, std::shared_ptr<Node>> m_adjoint_map;
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/common.hpp"
using namespace ngraph;
// TODO: check validity, i.e. that all deleted_axes are valid
Coordinate ngraph::project_coordinate(const Coordinate& coord, const AxisSet& deleted_axes)
{
Coordinate result;
for (size_t i = 0; i < coord.size(); i++)
{
if (deleted_axes.find(i) == deleted_axes.end())
{
result.push_back(coord[i]);
}
}
return result;
}
Shape ngraph::project_shape(const Shape& shape, const AxisSet& deleted_axes)
{
return project_coordinate(shape, deleted_axes);
}
// TODO: for the moment, just one axis at a time, please. Later could pass in an std::map from axis positions to axis lengths.
// TODO: check validity, i.e. that the new axis is < coord_size+1.
Coordinate
ngraph::inject_coordinate(const Coordinate& coord, size_t new_axis_pos, size_t new_axis_val)
{
Coordinate result;
size_t original_pos = 0;
for (size_t result_pos = 0; result_pos < coord.size() + 1; result_pos++)
{
if (result_pos == new_axis_pos)
{
result.push_back(new_axis_val);
}
else
{
result.push_back(coord[original_pos++]);
}
}
return result;
}
Shape ngraph::inject_shape(const Shape& shape, size_t new_axis_pos, size_t new_axis_length)
{
return inject_coordinate(shape, new_axis_pos, new_axis_length);
}
......@@ -51,4 +51,10 @@ namespace ngraph
/// @brief Strides of a tensor
using Strides = std::vector<size_t>;
Coordinate project_coordinate(const Coordinate& coord, const AxisSet& deleted_axes);
Shape project_shape(const Shape& shape, const AxisSet& deleted_axes);
Coordinate inject_coordinate(const Coordinate& coord, size_t new_axis_pos, size_t new_axis_val);
Shape inject_shape(const Shape& shape, size_t new_axis_pos, size_t new_axis_length);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <cstdio>
#include <iostream>
#include <vector>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_iterator.hpp"
#include "ngraph/except.hpp"
using namespace ngraph;
CoordinateIterator::CoordinateIterator(const Shape& space_shape,
const Strides& strides,
const Coordinate& window_outer_corner,
const Coordinate& window_inner_corner)
: m_space_shape(space_shape)
, m_strides(strides)
, m_window_outer_corner(window_outer_corner)
, m_window_inner_corner(window_inner_corner)
, m_current_coordinate(window_inner_corner)
{
if (space_shape.size() != window_inner_corner.size())
{
throw ngraph_error("Coordinate iterator inner corner rank does not match space shape rank");
}
if (space_shape.size() != window_outer_corner.size())
{
throw ngraph_error("Coordinate iterator outer corner rank does not match space shape rank");
}
if (space_shape.size() != strides.size())
{
throw ngraph_error("Coordinate iterator stride rank does not match space shape rank");
}
for (size_t i = 0; i < space_shape.size(); i++)
{
if (window_inner_corner[i] > window_outer_corner[i])
{
throw ngraph_error("Coordinate iterator inner corner is outside outer corner");
}
if (window_inner_corner[i] >= m_space_shape[i])
{
throw ngraph_error("Coordinate iterator inner corner is out of bounds");
}
if (window_outer_corner[i] > m_space_shape[i])
{
throw ngraph_error("Coordinate iterator outer corner is out of bounds");
}
if (m_strides[i] == 0)
{
throw ngraph_error("Coordinate iterator stride is zero");
}
}
}
CoordinateIterator::CoordinateIterator(const Shape& space_shape)
: CoordinateIterator(space_shape,
Strides(space_shape.size(), 1),
space_shape,
Coordinate(space_shape.size(), 0))
{
}
CoordinateIterator::CoordinateIterator(const Shape& space_shape, const Strides& strides)
: CoordinateIterator(space_shape, strides, space_shape, Coordinate(space_shape.size(), 0))
{
}
size_t CoordinateIterator::get_current_index() const
{
size_t index = 0;
size_t stride = 1;
for (size_t i = m_space_shape.size(); i-- > 0;)
{
index += m_current_coordinate[i] * stride;
stride *= m_space_shape[i];
}
return index;
}
bool CoordinateIterator::increment()
{
bool overflow = true;
for (size_t i = m_space_shape.size(); i-- > 0;)
{
m_current_coordinate[i] += m_strides[i];
if (m_current_coordinate[i] >= m_window_outer_corner[i])
{
m_current_coordinate[i] = m_window_inner_corner[i];
}
else
{
overflow = false;
break;
}
}
return !overflow;
}
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cassert>
#include <cstdio>
#include <iostream>
#include <vector>
#include "ngraph/common.hpp"
namespace ngraph
{
class CoordinateTransform
{
public:
CoordinateTransform(const Shape& source_shape,
const Coordinate& source_start_corner,
const Coordinate& source_end_corner,
const Strides& source_strides,
const AxisVector& source_axis_order);
CoordinateTransform(const Shape& source_shape,
const Coordinate& source_start_corner,
const Coordinate& source_end_corner,
const Strides& source_strides);
CoordinateTransform(const Shape& source_shape,
const Coordinate& source_start_corner,
const Coordinate& source_end_corner);
CoordinateTransform(const Shape& source_space_shape);
size_t index(const Coordinate& c) const;
bool in_bounds(const Coordinate& c) const;
Coordinate get_target_shape() const;
class Iterator
{
public:
Iterator(const Shape& target_shape, bool is_end = false);
void operator++();
Iterator operator++(int);
void operator+=(size_t n);
Coordinate operator*();
bool operator!=(const Iterator& it);
bool operator==(const Iterator& it);
private:
Shape m_target_shape;
Shape m_axis_walk_order;
Coordinate m_coordinate;
bool m_oob;
bool m_empty;
};
Iterator begin() noexcept { return Iterator(m_target_shape); }
Iterator end() noexcept { return Iterator(m_target_shape, true); }
private:
Coordinate to_source_coordinate(const Coordinate& c) const;
size_t index_source(const Coordinate& c) const;
Shape m_source_space_shape;
Shape m_source_start_corner;
Shape m_source_end_corner;
Strides m_source_strides;
AxisVector m_source_axis_order;
Shape m_target_shape;
size_t m_n_axes;
};
}
......@@ -45,7 +45,7 @@
#include "ngraph/builder/numpy_transpose.hpp"
#include "ngraph/builder/reduce_ops.hpp"
#include "ngraph/common.hpp"
#include "ngraph/coordinate_iterator.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/descriptor/buffer.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = |\texttt{arg}[i_1,\dots,i_n]|\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------- |
/// | NGVM | Implemented for signed types only. |
class Abs : public UnaryElementwiseArithmetic
{
public:
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arccos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Acos : public UnaryElementwiseArithmetic
{
public:
......
......@@ -36,12 +36,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] + \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Add : public BinaryElementwiseArithmetic
{
public:
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arcsin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Asin : public UnaryElementwiseArithmetic
{
public:
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arctan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Atan : public UnaryElementwiseArithmetic
{
public:
......
......@@ -49,13 +49,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_n] = T[del([i_1,\dots,i_n],A)]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Broadcast : public RequiresTensorViewArgs
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lceil \texttt{arg}[i_1,\dots,i_n] \rceil\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Ceiling : public UnaryElementwiseArithmetic
{
public:
......
......@@ -84,7 +84,7 @@ void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const std::shar
Coordinate arg_delta_slice_lower = Coordinate(concat_result_shape.size(), 0);
Coordinate arg_delta_slice_upper = concat_result_shape;
Coordinate arg_delta_slice_step = Coordinate(concat_result_shape.size(), 1);
Coordinate arg_delta_slice_strides = Coordinate(concat_result_shape.size(), 1);
size_t pos = 0;
......@@ -105,7 +105,7 @@ void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const std::shar
adjoints.add_delta(
arg,
make_shared<op::Slice>(
delta, arg_delta_slice_lower, arg_delta_slice_upper, arg_delta_slice_step));
delta, arg_delta_slice_lower, arg_delta_slice_upper, arg_delta_slice_strides));
pos = next_pos;
}
......
......@@ -57,12 +57,6 @@ namespace ngraph
/// | Type | Description |
/// | ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_{a-1},\Sigma_i(d^i_a),d_{a+1},\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T\f$ is the concatenation of the input tensors along axis \f$a\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------- |
/// | NGVM | Implemented for vectors and matrices. |
class Concat : public RequiresTensorViewArgs
{
public:
......
......@@ -37,9 +37,11 @@ namespace
((et) == element::Bool::element_type()) ? (f<element::Bool>(__VA_ARGS__)) : \
((et) == element::Float32::element_type()) ? (f<element::Float32>(__VA_ARGS__)) : \
((et) == element::Int8::element_type()) ? (f<element::Int8>(__VA_ARGS__)) : \
((et) == element::Int16::element_type()) ? (f<element::Int16>(__VA_ARGS__)) : \
((et) == element::Int32::element_type()) ? (f<element::Int32>(__VA_ARGS__)) : \
((et) == element::Int64::element_type()) ? (f<element::Int64>(__VA_ARGS__)) : \
((et) == element::UInt8::element_type()) ? (f<element::UInt8>(__VA_ARGS__)) : \
((et) == element::UInt16::element_type()) ? (f<element::UInt16>(__VA_ARGS__)) : \
((et) == element::UInt32::element_type()) ? (f<element::UInt32>(__VA_ARGS__)) : \
((et) == element::UInt64::element_type()) ? (f<element::UInt64>(__VA_ARGS__)) : \
(throw ngraph_error(err_msg)) \
......
......@@ -62,12 +62,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | A constant tensor with the specified element type, shape, and values. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
template <typename T>
class ParameterizedConstant : public ConstantBase
{
......@@ -110,8 +104,12 @@ namespace ngraph
/// \brief A 32-bit floating-point tensor constant.
using Float32Constant = ParameterizedConstant<element::Float32>;
/// \brief A 64-bit floating-point tensor constant.
using Float64Constant = ParameterizedConstant<element::Float64>;
/// \brief An 8-bit signed integer tensor constant.
using Int8Constant = ParameterizedConstant<element::Int8>;
/// \brief A 16-bit signed integer tensor constant.
using Int16Constant = ParameterizedConstant<element::Int16>;
/// \brief A 32-bit signed integer tensor constant.
using Int32Constant = ParameterizedConstant<element::Int32>;
/// \brief A 64-bit signed integer tensor constant.
......@@ -119,6 +117,8 @@ namespace ngraph
/// \brief An 8-bit unsigned integer tensor constant.
using UInt8Constant = ParameterizedConstant<element::UInt8>;
/// \brief A 16-bit unsigned integer tensor constant.
using UInt16Constant = ParameterizedConstant<element::UInt16>;
/// \brief A 32-bit unsigned integer tensor constant.
using UInt32Constant = ParameterizedConstant<element::UInt32>;
/// \brief A 64-bit unsigned integer tensor constant.
using UInt64Constant = ParameterizedConstant<element::UInt64>;
......@@ -140,12 +140,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | A constant tensor with the specified element type, shape, and values. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Constant : public ConstantBase
{
public:
......
......@@ -42,12 +42,6 @@ namespace ngraph
/// | Type | Description |
/// | ----------------------- | --------------------------------------------------------------------------------------------------------- |
/// | \f$E'[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{convert}_{(E,E')}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Convert : public UnaryElementwise
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cos : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cosh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cosh : public UnaryElementwiseArithmetic
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mathbin{/} \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Divide : public BinaryElementwiseArithmetic
{
public:
......
......@@ -34,7 +34,7 @@ namespace ngraph
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then the result will have shape {3,3}.)
///
///
/// = Case 1: Scalar-tensor product =
/// # Case 1: Scalar-tensor product
///
/// ## Inputs
///
......@@ -51,13 +51,7 @@ namespace ngraph
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
///
/// = Case 2: Vector-tensor product =
/// # Case 2: Vector-tensor product
///
/// ## Inputs
///
......@@ -67,20 +61,14 @@ namespace ngraph
/// | `arg1` | \f$E[d_1,\dots,d_n,d]~(n \geq 0)\f$ | A tensor of any shape whose innermost dimension matches `arg0`'s size, with the same element type as `arg0`. |
///
/// <i>(Note: in the particular case where \f$n = 0\f$, this is a vector dot product; when \f$n = 1\f$, this is a vector-matrix product.)</i>
//
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
/// ## Output
///
/// | Backend | Status |
/// | ------- | ------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank 2 or less. |
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \Sigma_{0 \le k < d}(\mathtt{arg0}[k] \cdot \mathtt{arg1}[i_1,\dots,i_n,k])\f$. |
///
/// = Case 3: Tensor-tensor product =
/// # Case 3: Tensor-tensor product
///
/// ## Inputs
///
......@@ -93,15 +81,9 @@ namespace ngraph
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------|
/// | \f$E[d_1,\dots,d_{n-1},d'_1,\dots,d'_{m-2},d'_{m}]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_{n-1},j_1,\dots,j_{m-2},j_m] = \dots\f$ TODO: FIXME: finish this; but it's like numpy. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank of exactly 2. |
/// | Type | Description |
/// | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$E[d_1,\dots,d_{n-1},d'_1,\dots,d'_{m-2},d'_{m}]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_{n-1},j_1,\dots,j_{m-2},j_m] = \Sigma_{0 \le k < d_n}(\texttt{arg0}[i_1,\dots,i_{n-1},k] \cdot \texttt{arg1}[j_1,\dots,j_{n-2},k,j_n])\f$ |
class Dot : public RequiresTensorViewArgs
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Equal : public BinaryElementwiseComparison
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \exp(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Exp : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lfloor \texttt{arg}[i_1,\dots,i_n] \rfloor\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Floor : public UnaryElementwiseArithmetic
{
public:
......
......@@ -39,12 +39,6 @@ namespace ngraph
/// | Type | Description |
/// | --------- | -------------------------------------------------------- |
/// | \f$T_R\f$ | The tensor returned by `function` when called on `args`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class FunctionCall : public Node
{
public:
......
......@@ -39,12 +39,6 @@ namespace ngraph
/// | Type | Description |
/// | --------- | ------------------------------------- |
/// | \f$T_n\f$ | The `n`th element of the input tuple. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GetTupleElement : public Node
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \gt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Greater : public BinaryElementwiseComparison
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \geq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GreaterEq : public BinaryElementwiseComparison
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \lt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Less : public BinaryElementwiseComparison
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \leq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class LessEq : public BinaryElementwiseComparison
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \ln(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Log : public UnaryElementwiseArithmetic
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \max(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Maximum : public BinaryElementwiseArithmetic
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \min(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Minimum : public BinaryElementwiseArithmetic
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \cdot \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Multiply : public BinaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = -(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Negative : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg}[i_1,\dots,i_n] = 0\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Not : public UnaryElementwise
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class NotEqual : public BinaryElementwiseComparison
{
public:
......
......@@ -38,15 +38,8 @@ namespace ngraph
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------- |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_{m-1},i_m,i_{m+1},\dots,i_n] = 1\f$ if \f$T[i_1,\dots,i_{m-1},i_{m+1},\dots,i_n] = i_m\f$, else \f$0\f$. However, \f$T'\f$ is undefined if any non-integral value or any out-of-bounds value is detected in the input tensor. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | NGVM | Fully implemented. NOTE: Execution throws `std::range_error` if either a non-integral value or an out-of-bounds value is detected in the input tensor. |
class OneHot : public RequiresTensorViewArgs
{
public:
......
......@@ -39,12 +39,6 @@ namespace ngraph
/// | Type | Description |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------- |
/// | \f$T\f$ | The value of the parameter, supplied by the `FunctionCall` to this function or in the initial `ngraph::runtime::CallFrame`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Parameter : public Node
{
protected:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Power : public BinaryElementwiseArithmetic
{
public:
......
......@@ -80,13 +80,6 @@ namespace ngraph
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Reduce : public RequiresTensorViewArgs
{
public:
......
......@@ -20,7 +20,7 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise remainder operation.
/// \brief (NOT IMPLEMENTED) Elementwise remainder operation.
///
/// (TODO: Get a bit more clarity on this: is it just "mod"? What about negative numbers and floats?)
///
......@@ -36,12 +36,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mod \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Remainder : public BinaryElementwiseArithmetic
{
public:
......
......@@ -23,11 +23,11 @@ op::ReplaceSlice::ReplaceSlice(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step)
const Strides& strides)
: RequiresTensorViewArgs("ReplaceSlice", {arg0, arg1})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(step)
, m_strides(strides)
{
check_args();
}
......@@ -39,7 +39,7 @@ op::ReplaceSlice::ReplaceSlice(const std::shared_ptr<Node>& arg0,
: RequiresTensorViewArgs("ReplaceSlice", {arg0, arg1})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(Shape(lower_bounds.size(), 1))
, m_strides(Shape(lower_bounds.size(), 1))
{
check_args();
}
......@@ -76,10 +76,10 @@ void op::ReplaceSlice::check_args()
"Number of upper bounds provided for slice does not match number of input axes");
}
if (m_step.size() != arg0_shape.size())
if (m_strides.size() != arg0_shape.size())
{
throw ngraph_error(
"Number of step axes provided for slice does not match number of input axes");
"Number of strides provided for slice does not match number of input axes");
}
Shape slice_shape;
......@@ -96,14 +96,14 @@ void op::ReplaceSlice::check_args()
throw ngraph_error("Lower bound for slice is greater than upper bound");
}
if (0 == m_step[i])
if (0 == m_strides[i])
{
throw ngraph_error("Step distance for slice is zero");
throw ngraph_error("Stride for slice is zero");
}
size_t slice_axis_size = m_upper_bounds[i] - m_lower_bounds[i];
slice_axis_size =
slice_axis_size / m_step[i] + ((slice_axis_size % m_step[i] == 0) ? 0 : 1);
slice_axis_size / m_strides[i] + ((slice_axis_size % m_strides[i] == 0) ? 0 : 1);
slice_shape.push_back(slice_axis_size);
}
......@@ -128,7 +128,7 @@ void op::ReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints,
adjoints.add_delta(x,
std::make_shared<op::ReplaceSlice>(
delta, zeros_shaped_like_y, m_lower_bounds, m_upper_bounds, m_step));
adjoints.add_delta(y,
std::make_shared<op::Slice>(delta, m_lower_bounds, m_upper_bounds, m_step));
delta, zeros_shaped_like_y, m_lower_bounds, m_upper_bounds, m_strides));
adjoints.add_delta(
y, std::make_shared<op::Slice>(delta, m_lower_bounds, m_upper_bounds, m_strides));
}
......@@ -25,30 +25,24 @@ namespace ngraph
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice to be overwritten. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice to be overwritten. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `step` | The "step" or "stride" \f$s_i\f$ for the tensor slice to be overwritten. For example, a stride of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
/// | | Description |
/// | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice to be overwritten. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice to be overwritten. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `strides` | The strides \f$s_i\f$ for the tensor slice to be overwritten. For example, in the matrix case, strides of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_n]\f$ where \f$(d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$ | A tensor of the same element type and rank as `arg0`, whose shape is determined by the lower and upper slice bounds and slice step. |
/// | | Type | Description |
/// | ------ | ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_n]\f$ where \f$(d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$ | A tensor of the same element type and rank as `arg0`, whose shape is determined by the lower and upper slice bounds and slice strides. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$ where \f$T[i_1,\dots,i_n] = \texttt{arg1}[j_1,\dots,j_n]\f$ if \f$j_1,\dots,j_n\f$ is in bounds for `arg1` and for all \f$m\f$, \f$i_m = l_m + j_m s_m\f$, otherwise \f$\texttt{arg0}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class ReplaceSlice : public RequiresTensorViewArgs
{
public:
......@@ -58,16 +52,16 @@ namespace ngraph
/// \param arg1 The tensor view to write into `arg0`.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param step The slicing step; for example, step of `{n,m}` means to take
/// every nth row and every mth column of `arg0` as part of the
/// slice to be replaced.
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// every nth row and every mth column of `arg0` as part of the
/// slice to be replaced.
ReplaceSlice(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step);
const Strides& strides);
/// \brief Constructs a tensor slice replacement operation with unit step; i.e., every element inside the bounding box will be overwritten.
/// \brief Constructs a tensor slice replacement operation with unit strides; i.e., every element inside the bounding box will be overwritten.
///
/// \param arg0 The tensor view to overwrite into.
/// \param arg1 The tensor view to write into `arg0`.
......@@ -84,15 +78,15 @@ namespace ngraph
if (new_args.size() != 2)
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<ReplaceSlice>(
new_args.at(0), new_args.at(1), m_lower_bounds, m_upper_bounds, m_step);
new_args.at(0), new_args.at(1), m_lower_bounds, m_upper_bounds, m_strides);
}
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing step.
const Shape& get_step() const { return m_step; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......@@ -100,7 +94,7 @@ namespace ngraph
const Coordinate m_lower_bounds;
const Coordinate m_upper_bounds;
const Shape m_step;
const Strides m_strides;
};
}
}
......@@ -53,12 +53,6 @@ namespace ngraph
/// | Type | Description |
/// | ------------------------ | ------------------------------------------------------------------------------------------------------ |
/// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. Implemented for other shapes only when there is no reordering of the input axes, i.e. `input_order` is \f$(0,\dots,n-1)\f$. |
class Reshape : public RequiresTensorViewArgs
{
public:
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Select : public RequiresTensorViewArgs
{
public:
......
......@@ -35,12 +35,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \text{sgn}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sign : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sin : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sinh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sinh : public UnaryElementwiseArithmetic
{
public:
......
......@@ -20,11 +20,11 @@ using namespace ngraph;
op::Slice::Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step)
const Strides& strides)
: RequiresTensorViewArgs("Slice", {arg})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(step)
, m_strides(strides)
{
check_args();
}
......@@ -35,7 +35,7 @@ op::Slice::Slice(const std::shared_ptr<Node>& arg,
: RequiresTensorViewArgs("Slice", {arg})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(Shape(lower_bounds.size(), 1))
, m_strides(Strides(lower_bounds.size(), 1))
{
check_args();
}
......@@ -57,10 +57,10 @@ void op::Slice::check_args()
"Number of upper bounds provided for slice does not match number of input axes");
}
if (m_step.size() != arg_shape.size())
if (m_strides.size() != arg_shape.size())
{
throw ngraph_error(
"Number of step axes provided for slice does not match number of input axes");
"Number of strides provided for slice does not match number of input axes");
}
Shape result_shape;
......@@ -77,14 +77,14 @@ void op::Slice::check_args()
throw ngraph_error("Lower bound for slice is greater than upper bound");
}
if (0 == m_step[i])
if (0 == m_strides[i])
{
throw ngraph_error("Step distance for slice is zero");
throw ngraph_error("Strides distance for slice is zero");
}
size_t result_axis_size = m_upper_bounds[i] - m_lower_bounds[i];
result_axis_size =
result_axis_size / m_step[i] + ((result_axis_size % m_step[i] == 0) ? 0 : 1);
result_axis_size / m_strides[i] + ((result_axis_size % m_strides[i] == 0) ? 0 : 1);
result_shape.push_back(result_axis_size);
}
......@@ -96,5 +96,5 @@ void op::Slice::generate_adjoints(autodiff::Adjoints& adjoints, const std::share
{
auto x = get_inputs().at(0).get_output().get_node();
adjoints.add_delta_to_slice(x, delta, m_lower_bounds, m_upper_bounds, m_step);
adjoints.add_delta_to_slice(x, delta, m_lower_bounds, m_upper_bounds, m_strides);
}
......@@ -29,11 +29,11 @@ namespace ngraph
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `step` | The "step" or "stride" \f$s_i\f$ for the tensor slice. For example, a stride of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
/// | | Description |
/// | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `strides` | The strides \f$s_i\f$ for the tensor slice. For example, in the matrix case, strides of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
///
/// ## Inputs
///
......@@ -46,12 +46,6 @@ namespace ngraph
/// | Type | Description |
/// | ------------------------------------------------------------------------------ | --------------------------------- |
/// | \f$E[d'_1,\dots,d'_n]\f$ where \f$d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$. | The tensor sliced from the input. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Slice : public RequiresTensorViewArgs
{
public:
......@@ -60,14 +54,14 @@ namespace ngraph
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param step The slicing step; for example, step of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step);
const Strides& strides);
/// \brief Constructs a tensor slice operation with unit step; i.e., every element inside the bounding box will be copied to the output slice.
/// \brief Constructs a tensor slice operation with unit strides; i.e., every element inside the bounding box will be copied to the output slice.
///
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
......@@ -82,15 +76,15 @@ namespace ngraph
if (new_args.size() != 1)
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Slice>(
new_args.at(0), m_lower_bounds, m_upper_bounds, m_step);
new_args.at(0), m_lower_bounds, m_upper_bounds, m_strides);
}
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing step.
const Shape& get_step() const { return m_step; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
......@@ -98,7 +92,7 @@ namespace ngraph
const Coordinate m_lower_bounds;
const Coordinate m_upper_bounds;
const Shape m_step;
const Strides m_strides;
};
}
}
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sqrt : public UnaryElementwiseArithmetic
{
public:
......
......@@ -34,12 +34,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] - \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Subtract : public BinaryElementwiseArithmetic
{
public:
......
......@@ -74,12 +74,6 @@ namespace ngraph
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Sum : public RequiresTensorViewArgs
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tan : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tanh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tanh : public UnaryElementwiseArithmetic
{
public:
......
......@@ -33,12 +33,6 @@ namespace ngraph
/// | Type | Description |
/// | --------------------- | ---------------------------------------------------------- |
/// | \f$(T_1,\dots,T_n)\f$ | The tuple \f$(\texttt{args}[0],\dots,\texttt{args}[n])\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tuple : public Node
{
public:
......
......@@ -41,6 +41,10 @@ std::shared_ptr<TensorView>
{
rc = std::make_shared<ParameterizedTensorView<element::Int8>>(shape);
}
else if (element_type == element::Int16::element_type())
{
rc = std::make_shared<ParameterizedTensorView<element::Int16>>(shape);
}
else if (element_type == element::Int32::element_type())
{
rc = std::make_shared<ParameterizedTensorView<element::Int32>>(shape);
......@@ -53,6 +57,10 @@ std::shared_ptr<TensorView>
{
rc = std::make_shared<ParameterizedTensorView<element::UInt8>>(shape);
}
else if (element_type == element::UInt16::element_type())
{
rc = std::make_shared<ParameterizedTensorView<element::UInt16>>(shape);
}
else if (element_type == element::UInt32::element_type())
{
rc = std::make_shared<ParameterizedTensorView<element::UInt32>>(shape);
......
......@@ -1189,11 +1189,11 @@ void Emitter::EmitSlice(const ngraph::Node* n,
{
auto slice = static_cast<const op::Slice*>(n);
for (auto d : slice->get_step())
for (auto d : slice->get_strides())
{
if (1 != d)
{
throw ngraph_error("Slice does not support non-unit step yet");
throw ngraph_error("Slice does not support non-unit strides yet");
}
}
......@@ -1469,11 +1469,11 @@ void Emitter::EmitReplaceSlice(const ngraph::Node* n,
{
auto replace_slice = static_cast<const op::Slice*>(n);
for (auto d : replace_slice->get_step())
for (auto d : replace_slice->get_strides())
{
if (1 != d)
{
throw ngraph_error("Replace-slice does not support non-unit step yet");
throw ngraph_error("Replace-slice does not support non-unit strides yet");
}
}
......
......@@ -17,7 +17,7 @@
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_iterator.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
......@@ -32,45 +32,16 @@ namespace ngraph
const Shape& out_shape,
const AxisSet& broadcast_axes)
{
// For the outer loop we will walk over the entire input shape.
CoordinateIterator arg_iter(in_shape);
CoordinateTransform input_transform(in_shape);
CoordinateTransform output_transform(out_shape);
do
for (Coordinate output_coord : output_transform)
{
// For the inner loop we will walk across the entire axis for the new broadcast axes, and stay put at the current arg position for the existing axes.
Coordinate arg_coordinate = arg_iter.get_current_coordinate();
Coordinate input_coord = project_coordinate(output_coord, broadcast_axes);
Strides out_strides(out_shape.size(), 1);
Coordinate out_outer_corner(out_shape.size());
Coordinate out_inner_corner(out_shape.size());
size_t arg_pos = 0;
for (size_t i = 0; i < out_shape.size(); i++)
{
if (broadcast_axes.find(i) == broadcast_axes.end())
{
// This is an existing axis.
out_outer_corner[i] = arg_coordinate[arg_pos];
out_inner_corner[i] = arg_coordinate[arg_pos];
arg_pos++;
}
else
{
// This is a new broadcast axis.
out_outer_corner[i] = out_shape[i];
out_inner_corner[i] = 0;
}
}
CoordinateIterator out_iter(
out_shape, out_strides, out_outer_corner, out_inner_corner);
do
{
out[out_iter.get_current_index()] = arg[arg_iter.get_current_index()];
} while (out_iter.increment());
} while (arg_iter.increment());
out[output_transform.index(output_coord)] =
arg[input_transform.index(input_coord)];
}
}
}
}
......
......@@ -14,50 +14,57 @@
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
namespace kernel
{
namespace eigen
template <typename T>
void concat(const std::vector<T*>& args,
T* out,
const std::vector<Shape>& in_shapes,
const Shape& out_shape,
size_t concatenation_axis)
{
template <typename ET>
class ReplaceVectorSliceInstruction : public Instruction
// We will copy the inputs to the output one at a time. As we go, we will move out along the
// concatenation axis, starting at 0.
size_t concatenation_pos = 0;
for (size_t i = 0; i < args.size(); i++)
{
public:
ReplaceVectorSliceInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out,
size_t lower,
size_t upper)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
, m_lower(lower)
, m_upper(upper)
{
}
// The start coordinate for the copy is (0,...,0) except at the concatenation axis.
Coordinate out_start_coord = Coordinate(out_shape.size(), 0);
out_start_coord[concatenation_axis] = concatenation_pos;
virtual void execute(CallFrame& call_frame) const override
// The end coordinate for the copy is the same as the output shape except at the
// concatenation axis.
Coordinate out_end_coord = out_shape;
out_end_coord[concatenation_axis] =
concatenation_pos + in_shapes[i][concatenation_axis];
CoordinateTransform input_transform(in_shapes[i]);
CoordinateTransform output_chunk_transform(
out_shape, out_start_coord, out_end_coord);
CoordinateTransform::Iterator output_chunk_it = output_chunk_transform.begin();
for (Coordinate input_coord : input_transform)
{
EigenVector<ET>(call_frame, m_out) = EigenVector<ET>(call_frame, m_arg0);
EigenVector<ET>(call_frame, m_out).segment(m_lower, m_upper - m_lower) =
EigenVector<ET>(call_frame, m_arg1);
size_t input_index = input_transform.index(input_coord);
size_t output_chunk_index = output_chunk_transform.index(*output_chunk_it);
++output_chunk_it;
out[output_chunk_index] = args[i][input_index];
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
size_t m_lower;
size_t m_upper;
};
concatenation_pos += in_shapes[i][concatenation_axis];
}
}
}
}
......
......@@ -14,17 +14,41 @@
#pragma once
#include <type_traits>
namespace ngraph
{
namespace runtime
{
namespace kernel
{
// NOTE: Execution throws `std::domain_error` if either a non-integral value or an out-of-bounds
// value is detected in the input tensor.
// In English: return type is void and T must be an integral type.
template <typename T>
typename std::enable_if<std::is_integral<T>::value>::type
divide(T* arg0, T* arg1, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
if (arg1[i] == 0)
{
throw std::domain_error("integer division by zero");
}
out[i] = arg0[i] / arg1[i];
}
}
// In English: return type is void and T must be a floating point type.
template <typename T>
void divide(T* arg0, T* arg1, T* out, size_t count)
typename std::enable_if<std::is_floating_point<T>::value>::type
divide(T* arg0, T* arg1, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
// TODO: Here we do not check for div by zero, so we'll get +-inf here
// if arg1[i] == 0. Is that the right thing to do? Jury's still out.
out[i] = arg0[i] / arg1[i];
}
}
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void dot(T* arg0,
T* arg1,
T* out,
const Shape& arg0_shape,
const Shape& arg1_shape,
const Shape& out_shape,
size_t arg0_dot_axis,
size_t arg1_dot_axis)
{
CoordinateTransform output_transform(out_shape);
for (Coordinate out_coord : output_transform)
{
out[output_transform.index(out_coord)] = 0;
}
CoordinateTransform arg0_transform(arg0_shape);
CoordinateTransform arg1_transform(arg1_shape);
CoordinateTransform arg0_projected_transform(
project_shape(arg0_shape, AxisSet{arg0_dot_axis}));
CoordinateTransform arg1_projected_transform(
project_shape(arg1_shape, AxisSet{arg1_dot_axis}));
for (Coordinate arg0_projected_coord : arg0_projected_transform)
{
for (Coordinate arg1_projected_coord : arg1_projected_transform)
{
for (size_t i = 0; i < arg0_shape[arg0_dot_axis]; i++)
{
Coordinate arg0_coord =
inject_coordinate(arg0_projected_coord, arg0_dot_axis, i);
Coordinate arg1_coord =
inject_coordinate(arg1_projected_coord, arg1_dot_axis, i);
Coordinate out_coord(arg0_projected_coord.size() +
arg1_projected_coord.size());
std::copy(arg0_projected_coord.begin(),
arg0_projected_coord.end(),
out_coord.begin());
std::copy(arg1_projected_coord.begin(),
arg1_projected_coord.end(),
out_coord.begin() + arg0_projected_coord.size());
out[output_transform.index(out_coord)] +=
arg0[arg0_transform.index(arg0_coord)] *
arg1[arg1_transform.index(arg1_coord)];
}
}
}
}
}
}
}
......@@ -17,7 +17,7 @@
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_iterator.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
......@@ -25,71 +25,45 @@ namespace ngraph
{
namespace kernel
{
// NOTE: Execution throws `std::range_error` if either a non-integral value or an out-of-bounds
// value is detected in the input tensor.
template <typename T>
void one_hot(
T* arg, T* out, const Shape& in_shape, const Shape& out_shape, size_t one_hot_axis)
{
// For the outer loop we will walk over the entire input shape.
CoordinateIterator arg_iter(in_shape);
// Step 1: Zero out the output.
CoordinateTransform output_transform(out_shape);
do
for (Coordinate output_coord : output_transform)
{
// For the inner loop we will walk across the entire axis for the one-hot axis, and stay put at the current arg position for the existing axes.
Coordinate arg_coordinate = arg_iter.get_current_coordinate();
out[output_transform.index(output_coord)] = 0;
}
Strides out_strides(out_shape.size(), 1);
Coordinate out_outer_corner(out_shape.size());
Coordinate out_inner_corner(out_shape.size());
// Step 2: Write ones at needed positions, throwing exceptions when invalid conditions
// are encountered.
CoordinateTransform input_transform(in_shape);
size_t arg_pos = 0;
for (Coordinate input_coord : input_transform)
{
T val = arg[input_transform.index(input_coord)];
for (size_t i = 0; i < out_shape.size(); i++)
if (std::floor(val) < val || std::floor(val) > val)
{
if (i != one_hot_axis)
{
// This is an existing axis.
out_outer_corner[i] = arg_coordinate[arg_pos];
out_inner_corner[i] = arg_coordinate[arg_pos];
arg_pos++;
}
else
{
// This is the one-hot axis.
out_outer_corner[i] = out_shape[i];
out_inner_corner[i] = 0;
}
throw(std::range_error("One-hot: non-integral value in input"));
}
CoordinateIterator out_iter(
out_shape, out_strides, out_outer_corner, out_inner_corner);
bool found = false;
size_t one_hot_pos = static_cast<size_t>(val);
do
if (one_hot_pos >= out_shape[one_hot_axis])
{
auto out_index = out_iter.get_current_index();
auto one_hot_pos = out_iter.get_current_coordinate()[one_hot_axis];
auto in_index = arg_iter.get_current_index();
throw(std::range_error("One-hot: value is out of category range"));
}
// The weird test for equality here is because this template winds up being
// instantiated for floating-point types, and clang complains if you try to
// == on a float.
if (arg[in_index] <= one_hot_pos && arg[in_index] >= one_hot_pos)
{
out[out_index] = 1;
found = true;
}
else
{
out[out_index] = 0;
}
} while (out_iter.increment());
Coordinate one_hot_coord =
inject_coordinate(input_coord, one_hot_axis, one_hot_pos);
if (!found)
{
throw std::range_error("One-hot: value is out of category range");
}
} while (arg_iter.increment());
out[output_transform.index(one_hot_coord)] = 1;
}
}
}
}
......
......@@ -14,43 +14,43 @@
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
namespace kernel
{
namespace eigen
template <typename T>
void reduce(T* arg0,
T* arg1, // TODO: really we should just pass a T here.
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes,
std::function<T(T, T)> reduction_function)
{
template <typename ET>
class MatrixMultInstruction : public Instruction
CoordinateTransform output_transform(out_shape);
for (Coordinate output_coord : output_transform)
{
out[output_transform.index(output_coord)] = *arg1;
}
CoordinateTransform input_transform(in_shape);
for (Coordinate input_coord : input_transform)
{
public:
MatrixMultInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) = EigenMatrix<ET>(call_frame, m_arg0) *
EigenMatrix<ET>(call_frame, m_arg1);
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
size_t input_index = input_transform.index(input_coord);
size_t output_index = output_transform.index(output_coord);
out[output_index] = reduction_function(out[output_index], arg0[input_index]);
}
}
}
}
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void replace_slice(T* arg0, // replacement context
T* arg1, // replacement value
T* out,
const Shape& arg1_shape,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides,
const Shape& out_shape)
{
// Step 1: Copy the entire replacement context to the output.
CoordinateTransform copy_transform(out_shape);
for (Coordinate copy_coord : copy_transform)
{
out[copy_transform.index(copy_coord)] = arg0[copy_transform.index(copy_coord)];
}
// Step 2: Overwrite the slice for replacement.
CoordinateTransform input_transform(arg1_shape);
CoordinateTransform output_transform(
out_shape, lower_bounds, upper_bounds, strides);
CoordinateTransform::Iterator output_it = output_transform.begin();
for (Coordinate input_coord : input_transform)
{
Coordinate output_coord = *output_it++;
out[output_transform.index(output_coord)] =
arg1[input_transform.index(input_coord)];
}
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void reshape(T* arg,
T* out,
const Shape& in_shape,
const AxisVector& in_axis_order,
const Shape& out_shape)
{
// Unfortunately we don't yet have a constructor for CoordinateTransform that lets us pass only source_space_shape
// and source_axis_order so we have to construct the defaults here.
Shape in_start_corner(in_shape.size(), 0); // (0,...0)
Shape in_strides(in_shape.size(), 1); // (1,...,1)
CoordinateTransform input_transform(
in_shape, in_start_corner, in_shape, in_strides, in_axis_order);
CoordinateTransform output_transform(out_shape);
CoordinateTransform::Iterator output_it = output_transform.begin();
for (Coordinate input_coord : input_transform)
{
Coordinate output_coord = *output_it++;
out[output_transform.index(output_coord)] =
arg[input_transform.index(input_coord)];
}
}
}
}
}
......@@ -14,36 +14,28 @@
#pragma once
#include <cstdio>
#include <iostream>
#include <vector>
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
class CoordinateIterator
namespace runtime
{
public:
CoordinateIterator(const Shape& space_shape,
const Strides& strides,
const Coordinate& window_outer_corner,
const Coordinate& window_inner_corner);
CoordinateIterator(const Shape& space_shape);
CoordinateIterator(const Shape& space_shape, const Strides& strides);
Coordinate get_current_coordinate() const { return m_current_coordinate; }
size_t get_current_index() const;
bool increment();
private:
const Shape m_space_shape;
const Strides m_strides;
const Coordinate m_window_outer_corner;
const Coordinate m_window_inner_corner;
Coordinate m_current_coordinate;
};
namespace kernel
{
template <typename T>
void scalar_tensor_product(T* arg0, // the scalar (TODO: just pass as T?)
T* arg1, // the tensor
T* out,
size_t count)
{
for (size_t i = 0; i < count; i++)
{
out[i] = (*arg0) * arg1[i];
}
}
}
}
}
......@@ -14,39 +14,37 @@
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
namespace kernel
{
namespace eigen
template <typename T>
void slice(T* arg,
T* out,
const Shape& arg_shape,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides,
const Shape& out_shape)
{
template <typename ET>
class SumToScalarInstruction : public Instruction
{
public:
SumToScalarInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
CoordinateTransform input_transform(arg_shape, lower_bounds, upper_bounds, strides);
CoordinateTransform output_transform(out_shape);
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg).sum();
}
CoordinateTransform::Iterator output_it = output_transform.begin();
for (Coordinate in_coord : input_transform)
{
Coordinate out_coord = *output_it++;
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
out[output_transform.index(out_coord)] = arg[input_transform.index(in_coord)];
}
}
}
}
......
......@@ -14,39 +14,40 @@
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
namespace kernel
{
namespace eigen
template <typename T>
void sum(T* arg,
T* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
template <typename ET>
class SumMatrixRowsInstruction : public Instruction
CoordinateTransform output_transform(out_shape);
for (Coordinate output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 0;
}
CoordinateTransform input_transform(in_shape);
for (Coordinate input_coord : input_transform)
{
public:
SumMatrixRowsInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).rowwise().sum();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
Coordinate output_coord = project_coordinate(input_coord, reduction_axes);
out[output_transform.index(output_coord)] +=
arg[input_transform.index(input_coord)];
}
}
}
}
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class ConcatMatrixInstruction : public Instruction
{
public:
ConcatMatrixInstruction(const std::vector<TensorViewInfo>& args,
size_t axis,
const TensorViewInfo& out)
: m_args(args)
, m_axis(axis)
, m_out(out)
{
size_t concat_pos[2]{0, 0};
for (auto arg : args)
{
auto& arg_shape = arg.get_tensor_view_layout()->get_shape();
m_blocks.push_back(
{concat_pos[0], concat_pos[1], arg_shape.at(0), arg_shape.at(1)});
concat_pos[axis] += arg_shape.at(axis);
}
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET> out(call_frame, m_out);
for (size_t i = 0; i < m_args.size(); i++)
{
auto& b = m_blocks[i];
out.block(b[0], b[1], b[2], b[3])
<< EigenMatrix<ET>(call_frame, m_args.at(i));
}
}
protected:
std::vector<TensorViewInfo> m_args;
size_t m_axis;
TensorViewInfo m_out;
std::vector<std::vector<size_t>> m_blocks;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class MatrixVectorProductInstruction : public Instruction
{
public:
MatrixVectorProductInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) = EigenMatrix<ET>(call_frame, m_arg0) *
EigenVector<ET>(call_frame, m_arg1);
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class ReduceMatrixColumnsInstruction : public Instruction
{
public:
ReduceMatrixColumnsInstruction(std::shared_ptr<ExternalFunction> ef,
const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_external_function(ef)
, m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
auto ef = m_external_function;
auto f = [ef](typename ET::type x, typename ET::type y) -> typename ET::type
{
std::shared_ptr<CallFrame> cf =
std::dynamic_pointer_cast<CallFrame>(ef->make_call_frame());
auto tx = ngraph::runtime::make_tensor<ET>(Shape{}, {x});
auto ty = ngraph::runtime::make_tensor<ET>(Shape{}, {y});
auto tr = ngraph::runtime::make_tensor<ET>(Shape{});
cf->call({tx, ty}, {tr});
return tr->get_vector()[0];
};
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg0).colwise().redux(f);
}
protected:
std::shared_ptr<ExternalFunction> m_external_function;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class ReduceMatrixRowsInstruction : public Instruction
{
public:
ReduceMatrixRowsInstruction(std::shared_ptr<ExternalFunction> ef,
const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_external_function(ef)
, m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
auto ef = m_external_function;
auto f = [ef](typename ET::type x, typename ET::type y) -> typename ET::type
{
std::shared_ptr<CallFrame> cf =
std::dynamic_pointer_cast<CallFrame>(ef->make_call_frame());
auto tx = ngraph::runtime::make_tensor<ET>(Shape{}, {x});
auto ty = ngraph::runtime::make_tensor<ET>(Shape{}, {y});
auto tr = ngraph::runtime::make_tensor<ET>(Shape{});
cf->call({tx, ty}, {tr});
return tr->get_vector()[0];
};
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg0).rowwise().redux(f);
}
protected:
std::shared_ptr<ExternalFunction> m_external_function;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class ReduceToScalarInstruction : public Instruction
{
public:
ReduceToScalarInstruction(std::shared_ptr<ExternalFunction> ef,
const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_external_function(ef)
, m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
auto ef = m_external_function;
auto f = [ef](typename ET::type x, typename ET::type y) -> typename ET::type
{
std::shared_ptr<CallFrame> cf =
std::dynamic_pointer_cast<CallFrame>(ef->make_call_frame());
auto tx = ngraph::runtime::make_tensor<ET>(Shape{}, {x});
auto ty = ngraph::runtime::make_tensor<ET>(Shape{}, {y});
auto tr = ngraph::runtime::make_tensor<ET>(Shape{});
cf->call({tx, ty}, {tr});
return tr->get_vector()[0];
};
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0).redux(f);
}
protected:
std::shared_ptr<ExternalFunction> m_external_function;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
class TensorViewInfo;
namespace ngvm
{
class CallFrame;
namespace eigen
{
using DynamicStrides = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
using VectorStrides = Eigen::Stride<Eigen::Dynamic, 1>;
template <typename ET>
using DynamicArray = Eigen::
Array<typename ET::type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename ET>
using EigenArrayBase = Eigen::Map<DynamicArray<ET>, 0, DynamicStrides>;
template <typename ET>
using DynamicMatrix = Eigen::
Matrix<typename ET::type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename ET>
using EigenMatrixBase = Eigen::Map<DynamicMatrix<ET>, 0, DynamicStrides>;
template <typename ET>
using DynamicVector = Eigen::Matrix<typename ET::type, Eigen::Dynamic, 1>;
template <typename ET>
using EigenVectorBase = Eigen::Map<DynamicVector<ET>, 0, VectorStrides>;
namespace fmt
{
/// @brief vector format for Eigen wrappers.
class V
{
public:
V(const TensorViewInfo& tensor_view_info)
: l0(tensor_view_info
.get_layout<
ngraph::descriptor::layout::DenseTensorViewLayout>()
->get_size())
{
}
public:
size_t l0;
size_t l1{1};
size_t s0{1};
size_t s1{1};
};
class M
{
M(const Shape& shape, const Strides& strides)
: l0(shape.at(0))
, l1(shape.at(1))
, s0(strides.at(0))
, s1(strides.at(1))
{
}
M(const std::shared_ptr<ngraph::descriptor::layout::DenseTensorViewLayout>&
layout)
: M(layout->get_shape(), layout->get_strides())
{
}
public:
M(const TensorViewInfo& tensor_view_info)
: M(tensor_view_info.get_layout<
ngraph::descriptor::layout::DenseTensorViewLayout>())
{
}
public:
size_t l0;
size_t l1;
size_t s0;
size_t s1;
};
}
// ET element type
// FMT array format (fmt::V for vector, etc.)
// BASE select array/matrix
template <typename ET,
typename FMT,
typename BASE,
typename STRIDES = DynamicStrides>
class EigenWrapper : public BASE
{
using base = BASE;
public:
EigenWrapper(typename ET::type* t, const FMT& fmt)
: base(t, fmt.l0, fmt.l1, STRIDES(fmt.s0, fmt.s1))
{
}
EigenWrapper(
typename ET::type* t,
const std::shared_ptr<ngraph::descriptor::layout::DenseTensorViewLayout>&
layout)
: base(t, layout->get_size(), 1, DynamicStrides(1, 1))
{
}
EigenWrapper(CallFrame& call_frame, const TensorViewInfo& tensor_view_info)
: EigenWrapper(
call_frame.get_tensor_view_data<ET>(tensor_view_info.get_index()),
FMT(tensor_view_info))
{
}
template <typename U>
EigenWrapper& operator=(const U& other)
{
this->base::operator=(other);
return *this;
}
};
template <typename ET, typename FMT = fmt::V>
using EigenArray1d = EigenWrapper<ET, FMT, EigenArrayBase<ET>>;
template <typename ET, typename FMT = fmt::M>
using EigenArray2d = EigenWrapper<ET, FMT, EigenArrayBase<ET>>;
template <typename ET, typename FMT = fmt::M>
using EigenMatrix = EigenWrapper<ET, FMT, EigenMatrixBase<ET>>;
template <typename ET, typename FMT = fmt::V>
using EigenVector = EigenWrapper<ET, FMT, EigenVectorBase<ET>, VectorStrides>;
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class VectorSliceInstruction : public Instruction
{
public:
VectorSliceInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
size_t lower,
size_t upper)
: m_arg(arg)
, m_out(out)
, m_lower(lower)
, m_upper(upper)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) =
EigenVector<ET>(call_frame, m_arg).segment(m_lower, m_upper - m_lower);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
size_t m_lower;
size_t m_upper;
};
}
}
}
}
......@@ -16,11 +16,11 @@
#include <vector>
#include "ngraph/runtime/kernel/concat.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
......@@ -28,41 +28,46 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
// Would be better to just generate a sequence of copy into slice of output instructions
template <typename ET>
class ConcatVectorInstruction : public Instruction
class ConcatInstruction : public Instruction
{
public:
ConcatVectorInstruction(const std::vector<TensorViewInfo>& args,
const TensorViewInfo& out)
ConcatInstruction(const std::vector<TensorViewInfo>& args,
const TensorViewInfo& out,
const std::vector<Shape>& arg_shapes,
const Shape& out_shape,
size_t concatenation_axis)
: m_args(args)
, m_out(out)
, m_arg_shapes(arg_shapes)
, m_out_shape(out_shape)
, m_concatenation_axis(concatenation_axis)
{
for (auto arg : args)
{
auto& arg_shape = arg.get_tensor_view_layout()->get_shape();
m_sizes.push_back(arg_shape.at(0));
}
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET> out(call_frame, m_out);
size_t concat_pos = 0;
for (size_t i = 0; i < m_args.size(); i++)
std::vector<typename ET::type*> args;
for (auto arg_tv : m_args)
{
out.segment(concat_pos, m_sizes[i])
<< EigenVector<ET>(call_frame, m_args.at(i));
concat_pos += m_sizes[i];
args.push_back(get_tensor_data_ptr<ET>(call_frame, arg_tv));
}
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::concat<typename ET::type>(
args, out, m_arg_shapes, m_out_shape, m_concatenation_axis);
}
protected:
std::vector<TensorViewInfo> m_args;
TensorViewInfo m_out;
std::vector<size_t> m_sizes;
std::vector<Shape> m_arg_shapes;
Shape m_out_shape;
size_t m_concatenation_axis;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/dot.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,7 +26,7 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class DotInstruction : public Instruction
......@@ -33,24 +34,48 @@ namespace ngraph
public:
DotInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
const TensorViewInfo& out,
const Shape& arg0_shape,
const Shape& arg1_shape,
const Shape& out_shape,
size_t arg0_dot_axis,
size_t arg1_dot_axis)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
, m_arg0_shape(arg0_shape)
, m_arg1_shape(arg1_shape)
, m_out_shape(out_shape)
, m_arg0_dot_axis(arg0_dot_axis)
, m_arg1_dot_axis(arg1_dot_axis)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out)
<< EigenVector<ET>(call_frame, m_arg0)
.dot(EigenVector<ET>(call_frame, m_arg1));
typename ET::type* arg0 = get_tensor_data_ptr<ET>(call_frame, m_arg0);
typename ET::type* arg1 = get_tensor_data_ptr<ET>(call_frame, m_arg1);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::dot<typename ET::type>(arg0,
arg1,
out,
m_arg0_shape,
m_arg1_shape,
m_out_shape,
m_arg0_dot_axis,
m_arg1_dot_axis);
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
Shape m_arg0_shape;
Shape m_arg1_shape;
Shape m_out_shape;
size_t m_arg0_dot_axis;
size_t m_arg1_dot_axis;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,47 +26,55 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class ReplaceMatrixSliceInstruction : public Instruction
class ReduceInstruction : public Instruction
{
public:
ReplaceMatrixSliceInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out,
size_t lower_row,
size_t lower_col,
size_t upper_row,
size_t upper_col)
ReduceInstruction(
const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out,
const Shape& arg_shape,
const Shape& out_shape,
const AxisSet& reduction_axes,
std::function<typename ET::type(typename ET::type, typename ET::type)>
reduction_function)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
, m_lower_row(lower_row)
, m_lower_col(lower_col)
, m_upper_row(upper_row)
, m_upper_col(upper_col)
, m_arg_shape(arg_shape)
, m_out_shape(out_shape)
, m_reduction_axes(reduction_axes)
, m_reduction_function(reduction_function)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) = EigenMatrix<ET>(call_frame, m_arg0);
EigenMatrix<ET>(call_frame, m_out)
.block(m_lower_row,
m_lower_col,
m_upper_row - m_lower_row,
m_upper_col - m_lower_col) = EigenMatrix<ET>(call_frame, m_arg1);
typename ET::type* arg0 = get_tensor_data_ptr<ET>(call_frame, m_arg0);
typename ET::type* arg1 = get_tensor_data_ptr<ET>(call_frame, m_arg1);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::reduce<typename ET::type>(arg0,
arg1,
out,
m_arg_shape,
m_out_shape,
m_reduction_axes,
m_reduction_function);
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
size_t m_lower_row;
size_t m_lower_col;
size_t m_upper_row;
size_t m_upper_col;
Shape m_arg_shape;
Shape m_out_shape;
AxisSet m_reduction_axes;
std::function<typename ET::type(typename ET::type, typename ET::type)>
m_reduction_function;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,43 +26,56 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class MatrixSliceInstruction : public Instruction
class ReplaceSliceInstruction : public Instruction
{
public:
MatrixSliceInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
size_t lower_row,
size_t lower_col,
size_t upper_row,
size_t upper_col)
: m_arg(arg)
ReplaceSliceInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out,
const Shape& arg1_shape,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides,
const Shape& out_shape)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
, m_lower_row(lower_row)
, m_lower_col(lower_col)
, m_upper_row(upper_row)
, m_upper_col(upper_col)
, m_arg1_shape(arg1_shape)
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_strides(strides)
, m_out_shape(out_shape)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) = EigenMatrix<ET>(call_frame, m_arg)
.block(m_lower_row,
m_lower_col,
m_upper_row - m_lower_row,
m_upper_col - m_lower_col);
typename ET::type* arg0 = get_tensor_data_ptr<ET>(call_frame, m_arg0);
typename ET::type* arg1 = get_tensor_data_ptr<ET>(call_frame, m_arg1);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::replace_slice<typename ET::type>(arg0,
arg1,
out,
m_arg1_shape,
m_lower_bounds,
m_upper_bounds,
m_strides,
m_out_shape);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
size_t m_lower_row;
size_t m_lower_col;
size_t m_upper_row;
size_t m_upper_col;
Shape m_arg1_shape;
Coordinate m_lower_bounds;
Coordinate m_upper_bounds;
Strides m_strides;
Shape m_out_shape;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,28 +26,40 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class SumMatrixColumnsInstruction : public Instruction
class ReshapeInstruction : public Instruction
{
public:
SumMatrixColumnsInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out)
ReshapeInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
const Shape& arg_shape,
const AxisVector& arg_axis_order,
const Shape& out_shape)
: m_arg(arg)
, m_out(out)
, m_arg_shape(arg_shape)
, m_arg_axis_order(arg_axis_order)
, m_out_shape(out_shape)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).colwise().sum();
typename ET::type* arg = get_tensor_data_ptr<ET>(call_frame, m_arg);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::reshape<typename ET::type>(
arg, out, m_arg_shape, m_arg_axis_order, m_out_shape);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
Shape m_arg_shape;
AxisVector m_arg_axis_order;
Shape m_out_shape;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/scalar_tensor_product.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,7 +26,7 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class ScalarTensorProductInstruction : public Instruction
......@@ -42,13 +43,13 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
// This is a bit hacky: regardless of the tensor rank we
// pull it out as a vector. This works because of the way
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenVector<ET>(call_frame, m_out) =
call_frame.get_tensor_view_data<ET>(m_arg0.get_index())[0] *
EigenVector<ET>(call_frame, m_arg1);
typename ET::type* arg0 = get_tensor_data_ptr<ET>(call_frame, m_arg0);
typename ET::type* arg1 = get_tensor_data_ptr<ET>(call_frame, m_arg1);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
size_t count = get_tensor_element_count(call_frame, m_arg1);
kernel::scalar_tensor_product<typename ET::type>(arg0, arg1, out, count);
}
protected:
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/slice.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,31 +26,51 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class BroadcastScalarInstruction : public Instruction
class SliceInstruction : public Instruction
{
public:
BroadcastScalarInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
SliceInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
const Shape& arg_shape,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides,
const Shape& out_shape)
: m_arg(arg)
, m_out(out)
, m_arg_shape(arg_shape)
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_strides(strides)
, m_out_shape(out_shape)
{
}
virtual void execute(CallFrame& call_frame) const override
{
// This is a bit hacky: regardless of the tensor rank we
// pull it out as a vector. This works because of the way
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg)(0, 0);
typename ET::type* arg = get_tensor_data_ptr<ET>(call_frame, m_arg);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::slice<typename ET::type>(arg,
out,
m_arg_shape,
m_lower_bounds,
m_upper_bounds,
m_strides,
m_out_shape);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
Shape m_arg_shape;
Coordinate m_lower_bounds;
Coordinate m_upper_bounds;
Strides m_strides;
Shape m_out_shape;
};
}
}
......
......@@ -14,9 +14,10 @@
#pragma once
#include "ngraph/runtime/kernel/sum.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/ngvm/utils.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -25,27 +26,40 @@ namespace ngraph
{
namespace ngvm
{
namespace eigen
namespace instruction
{
template <typename ET>
class MatrixTransposeInstruction : public Instruction
class SumInstruction : public Instruction
{
public:
MatrixTransposeInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
SumInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
const Shape& arg_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
: m_arg(arg)
, m_out(out)
, m_arg_shape(arg_shape)
, m_out_shape(out_shape)
, m_reduction_axes(reduction_axes)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).transpose();
typename ET::type* arg = get_tensor_data_ptr<ET>(call_frame, m_arg);
typename ET::type* out = get_tensor_data_ptr<ET>(call_frame, m_out);
kernel::sum<typename ET::type>(
arg, out, m_arg_shape, m_out_shape, m_reduction_axes);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
Shape m_arg_shape;
Shape m_out_shape;
AxisSet m_reduction_axes;
};
}
}
......
......@@ -16,8 +16,6 @@
#include <memory>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
......
......@@ -427,8 +427,8 @@ shared_ptr<ngraph::Function>
{
auto lower_bounds = node_js.at("lower_bounds").get<vector<size_t>>();
auto upper_bounds = node_js.at("upper_bounds").get<vector<size_t>>();
auto step = node_js.at("step").get<vector<size_t>>();
node = make_shared<op::Slice>(args[0], lower_bounds, upper_bounds, step);
auto strides = node_js.at("strides").get<vector<size_t>>();
node = make_shared<op::Slice>(args[0], lower_bounds, upper_bounds, strides);
}
else if (node_op == "Subtract")
{
......@@ -631,7 +631,7 @@ json write(const Node& n)
auto tmp = dynamic_cast<const op::Slice*>(&n);
node["lower_bounds"] = tmp->get_lower_bounds();
node["upper_bounds"] = tmp->get_upper_bounds();
node["step"] = tmp->get_step();
node["strides"] = tmp->get_strides();
}
else if (node_op == "Subtract")
{
......
......@@ -186,6 +186,9 @@ namespace ngraph
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int8_t)
using Int8 = TraitedType<int8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int16_t)
using Int16 = TraitedType<int16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int32_t)
using Int32 = TraitedType<int32_t>;
......@@ -195,6 +198,9 @@ namespace ngraph
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint8_t)
using UInt8 = TraitedType<uint8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint16_t)
using UInt16 = TraitedType<uint16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint32_t)
using UInt32 = TraitedType<uint32_t>;
......
......@@ -26,7 +26,6 @@ set (SRC
builder.cpp
builder_autobroadcast.cpp
build_graph.cpp
coordinate_iterator.cpp
copy.cpp
eigen.cpp
element_type.cpp
......
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include <memory>
using namespace std;
using namespace ngraph;
TEST(coordinate_iterator, construct)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 1, 1, 1};
Coordinate window_outer_corner{2, 3, 5, 6};
Coordinate window_inner_corner{0, 0, 0, 0};
auto ci = CoordinateIterator(space_shape, strides, window_outer_corner, window_inner_corner);
}
TEST(coordinate_iterator, construct_defaults)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{2, 2, 2, 1};
auto ci = CoordinateIterator(space_shape, strides);
}
TEST(coordinate_iterator, construct_defaults_stride)
{
Shape space_shape{2, 3, 5, 6};
auto ci = CoordinateIterator(space_shape);
}
TEST(coordinate_iterator, construct_bad_outer_oob)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 1, 1, 1};
Coordinate window_outer_corner{2, 4, 5, 6};
Coordinate window_inner_corner{0, 0, 0, 0};
EXPECT_ANY_THROW({
auto ci =
CoordinateIterator(space_shape, strides, window_outer_corner, window_inner_corner);
});
}
TEST(coordinate_iterator, construct_bad_inner_oob)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 1, 1, 1};
Coordinate window_outer_corner{2, 3, 5, 6};
Coordinate window_inner_corner{0, 3, 0, 0};
EXPECT_ANY_THROW({
auto ci =
CoordinateIterator(space_shape, strides, window_outer_corner, window_inner_corner);
});
}
TEST(coordinate_iterator, construct_bad_inner_outside_outer)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 1, 1, 1};
Coordinate window_outer_corner{2, 1, 5, 6};
Coordinate window_inner_corner{0, 2, 0, 0};
EXPECT_ANY_THROW({
auto ci =
CoordinateIterator(space_shape, strides, window_outer_corner, window_inner_corner);
});
}
TEST(coordinate_iterator, construct_bad_zero_stride)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 0, 1, 1};
Coordinate window_outer_corner{2, 3, 5, 6};
Coordinate window_inner_corner{0, 0, 0, 0};
EXPECT_ANY_THROW({
auto ci =
CoordinateIterator(space_shape, strides, window_outer_corner, window_inner_corner);
});
}
TEST(coordinate_iterator, cover_count_defaults)
{
Shape space_shape{2, 3, 5, 6};
auto ci = CoordinateIterator(space_shape);
size_t count = 0;
size_t expected_index = 0;
do
{
count++;
EXPECT_EQ(ci.get_current_index(), expected_index);
expected_index++;
} while (ci.increment());
EXPECT_EQ(count, 2 * 3 * 5 * 6);
}
TEST(coordinate_iterator, cover_count_stride_2)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 1, 1, 2};
auto ci = CoordinateIterator(space_shape, strides);
size_t count = 0;
size_t expected_index = 0;
do
{
count++;
EXPECT_EQ(ci.get_current_index(), expected_index);
expected_index += 2;
} while (ci.increment());
EXPECT_EQ(count, 2 * 3 * 5 * 6 / 2);
}
#define CEIL_DIV(x, y) (1 + (((x)-1) / (y)))
TEST(coordinate_iterator, cover_count_stride_uneven)
{
Shape space_shape{2, 3, 5, 6};
Strides strides{1, 2, 2, 3};
auto ci = CoordinateIterator(space_shape, strides);
size_t count = 0;
do
{
count++;
} while (ci.increment());
EXPECT_EQ(count, CEIL_DIV(2, 1) * CEIL_DIV(3, 2) * CEIL_DIV(5, 2) * CEIL_DIV(6, 3));
}
......@@ -428,13 +428,13 @@ TEST(copy, slice)
Shape shape_in{2, 3, 4};
Coordinate lower{0, 0, 0};
Coordinate upper{2, 3, 4};
Coordinate step{1, 1, 1};
Strides strides{1, 1, 1};
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), shape_in);
std::vector<std::shared_ptr<Node>> new_args{
make_shared<op::Parameter>(element::Float32::element_type(), shape_in)};
auto node = make_shared<op::Slice>(arg0, lower, upper, step);
auto node = make_shared<op::Slice>(arg0, lower, upper, strides);
auto new_node = node->copy_with_new_args(new_args);
auto node_cast = dynamic_pointer_cast<op::Slice>(new_node);
ASSERT_NE(node_cast, nullptr);
......@@ -443,7 +443,7 @@ TEST(copy, slice)
ASSERT_TRUE(new_args == new_node->get_arguments());
ASSERT_TRUE(lower == node_cast->get_lower_bounds());
ASSERT_TRUE(upper == node_cast->get_upper_bounds());
ASSERT_TRUE(step == node_cast->get_step());
ASSERT_TRUE(strides == node_cast->get_strides());
}
TEST(copy, subtract)
......
......@@ -1239,7 +1239,7 @@ TEST(type_prop, slice_deduce_matrix_zero_zero)
TensorViewType(element::Float32::element_type(), Shape{0, 0}));
}
TEST(type_prop, slice_deduce_vector_invalid_step)
TEST(type_prop, slice_deduce_vector_invalid_strides)
{
auto param = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{6}));
......@@ -1247,14 +1247,13 @@ TEST(type_prop, slice_deduce_vector_invalid_step)
{
auto sl = make_shared<op::Slice>(param, Coordinate{0}, Coordinate{7}, Shape{1, 2});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid slice step not detected";
FAIL() << "Invalid slice strides not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(
error.what(),
std::string(
"Number of step axes provided for slice does not match number of input axes"));
EXPECT_EQ(error.what(),
std::string(
"Number of strides provided for slice does not match number of input axes"));
}
catch (...)
{
......@@ -1610,7 +1609,7 @@ TEST(type_prop, replace_slice_deduce_matrix_zero_zero)
TensorViewType(element::Float32::element_type(), Shape{6, 8}));
}
TEST(type_prop, replace_slice_deduce_vector_invalid_step)
TEST(type_prop, replace_slice_deduce_vector_invalid_strides)
{
auto param0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{6}));
......@@ -1621,14 +1620,13 @@ TEST(type_prop, replace_slice_deduce_vector_invalid_step)
auto sl = make_shared<op::ReplaceSlice>(
param0, param1, Coordinate{0}, Coordinate{7}, Shape{1, 2});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid slice step not detected";
FAIL() << "Invalid slice strides not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(
error.what(),
std::string(
"Number of step axes provided for slice does not match number of input axes"));
EXPECT_EQ(error.what(),
std::string(
"Number of strides provided for slice does not match number of input axes"));
}
catch (...)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment