Commit 37b95a02 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

(Dynamic) Transpose op (#2594)

* Add construction API for Transpose

* Add type propagation unit tests for Transpose

* Add Transpose to op_tbl, add cases for serializer, stub out execution in INTERPRETER

* Add docs for Transpose

* Remove commented-out code

* Add stub cases for op_tbl-dependent stuff

* Fix missing FAIL()s in the transpose exception checks; fix validate_and_infer_types check
parent 60751152
......@@ -75,6 +75,7 @@ Not currently a comprehensive list.
* :doc:`subtract`
* :doc:`tan`
* :doc:`tanh`
* :doc:`transpose`
......@@ -145,6 +146,7 @@ Not currently a comprehensive list.
subtract.rst
tan.rst
tanh.rst
transpose.rst
.. _more_about:
......
.. transpose.rst:
#########
Transpose
#########
.. code-block:: cpp
Transpose // Operation that transposes axes of a tensor
Description
===========
.. warning:: This op is not yet implemented in any backend.
.. warning:: This op is experimental and subject to change without notice.
Operation that transposes axes of an input tensor. This operation covers
matrix transposition, and also more general cases on higher-rank tensors.
Inputs
------
+-----------------+-------------------------+---------------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+=============================================+
| ``arg`` | Any | Any |
+-----------------+-------------------------+---------------------------------------------+
| ``input_order`` | ``element::i64`` | ``[n]``, where `n`` is the rank of ``arg``. |
+-----------------+-------------------------+---------------------------------------------+
Outputs
-------
+-----------------+-------------------------+-------------------------------------------------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+===============================================================================+
| ``output`` | Same as ``arg`` | ``P(ShapeOf(arg))``, where `P` is the permutation supplied for `input_order`. |
+-----------------+-------------------------+-------------------------------------------------------------------------------+
The input ``input_order`` must be a vector of shape `[n]`, where `n` is the
rank of ``arg``, and must contain every integer in the range ``[0,n-1]``. This
vector represents a permutation of ``arg``'s dimensions. For example,
+---------------+-----------------------+------------------+-----------------------------------------------------+
| ``arg`` Shape | ``input_order`` Value | ``output`` Shape | Comment |
+===============+=======================+==================+=====================================================+
| ``[3,4]`` | ``[1,0]`` | ``[4,3]`` | Transposes the ``arg`` matrix. |
+---------------+-----------------------+------------------+-----------------------------------------------------+
| ``[3,3]`` | ``[1,0]`` | ``[3,3]`` | Transposes the ``arg`` matrix. |
+---------------+-----------------------+------------------+-----------------------------------------------------+
| ``[3,3]`` | ``[1,0]`` | ``[3,3]`` | Transposes the ``arg`` matrix. |
+---------------+-----------------------+------------------+-----------------------------------------------------+
| ``[3,4,8]`` | ``[2,0,1]`` | ``[8,3,4]`` | Moves the "last" dimension to the "first" position. |
+---------------+-----------------------+------------------+-----------------------------------------------------+
Mathematical Definition
=======================
.. math::
\mathtt{output}_{i_0,i_1,...,i_n} = \mathtt{arg}_{i_{\mathtt{input_order}[0]},i_\mathtt{input_order}[1],...,i_\mathtt{input_order}[n]}.
Backprop
========
Not yet implemented.
C++ Interface
=============
.. doxygenclass:: ngraph::op::Transpose
:project: ngraph
:members:
......@@ -144,6 +144,8 @@ set (SRC
op/experimental/quantized_dot.hpp
op/experimental/quantized_dot_bias.cpp
op/experimental/quantized_dot_bias.hpp
op/experimental/transpose.cpp
op/experimental/transpose.hpp
op/floor.cpp
op/floor.hpp
op/get_output_element.cpp
......
......@@ -81,9 +81,11 @@
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/equal.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/op/floor.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <iostream>
#include "ngraph/op/experimental/transpose.hpp"
using namespace std;
using namespace ngraph;
op::Transpose::Transpose(const shared_ptr<Node>& arg, const shared_ptr<Node>& input_order)
: Op("Transpose", check_single_output_args({arg, input_order}))
{
constructor_validate_and_infer_types();
}
void op::Transpose::validate_and_infer_types()
{
NODE_VALIDATION_CHECK(this,
get_input_element_type(1).compatible(element::i64),
"Input order must have element type i64.");
auto& input_order_shape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(
this, input_order_shape.rank().compatible(1), "Input order must be a vector.");
auto& arg_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
input_order_shape.compatible(PartialShape{arg_shape.rank()}),
"Input order must have shape [n], where n is the rank of arg.");
set_output_type(0, get_input_element_type(0), PartialShape::dynamic(arg_shape.rank()));
}
shared_ptr<Node> op::Transpose::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Transpose>(new_args.at(0), new_args.at(1));
}
// TODO(amprocte): This will require some way of inverting the permutation in-graph. (TensorFlow,
// for example, has an InvertPermutation op, but that doesn't feel very nGraph-y somehow.)
void op::Transpose::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{
throw ngraph_error("generate_adjoints not implemented for Transpose");
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/axis_vector.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Tensor transpose operation.
class Transpose : public Op
{
public:
/// \brief Constructs a transpose operation.
///
/// \param arg Node producing the tensor to be transposed.
/// \param input_order Node producing the permutation to apply to the axes of the
/// input shape. Must be a vector of element type element::i64,
/// with shape [n], where n is the rank of arg. The tensor's
/// value must contain every integer in the range [0,n-1].
Transpose(const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& input_order);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
}
......@@ -142,4 +142,5 @@ NGRAPH_OP(Sum, ngraph::op)
NGRAPH_OP(Tan, ngraph::op)
NGRAPH_OP(Tanh, ngraph::op)
NGRAPH_OP(TopK, ngraph::op)
NGRAPH_OP(Transpose, ngraph::op)
NGRAPH_OP(EmbeddingLookup, ngraph::op)
......@@ -224,7 +224,8 @@ bool runtime::gpu::GPU_Backend::is_supported(const Node& op) const
"SelectAndScatter",
"StopGradient",
"EmbeddingLookup",
"GenerateMask"};
"GenerateMask",
"Transpose"};
set<string> float_only = {"MaxPoolBackprop", "AvgPoolBackprop", "MaxPool", "Dot"};
......
......@@ -66,6 +66,7 @@
#include "ngraph/op/experimental/quantized_dot_bias.hpp"
#include "ngraph/op/experimental/quantized_max_pool.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/op/floor.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
......@@ -1373,6 +1374,11 @@ std::string runtime::gpu::GPU_Emitter::emit_TopK(EMIT_ARGS)
return compiled_function->add_to_runtime(index, function_name, args, out);
}
std::string runtime::gpu::GPU_Emitter::emit_Transpose(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
string runtime::gpu::GPU_Emitter::node_names(const vector<GPUTensorWrapper>& args,
initializer_list<int> arg_indexes)
{
......
......@@ -2012,6 +2012,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::ShapeOf:
case OP_TYPEID::StopGradient:
case OP_TYPEID::TopK:
case OP_TYPEID::Transpose:
case OP_TYPEID::EmbeddingLookup:
case OP_TYPEID::Passthrough:
{
......
......@@ -1247,6 +1247,7 @@ private:
}
break;
}
case OP_TYPEID::Transpose:
default: throw unsupported_op("Unsupported op '" + node.description() + "'");
#pragma GCC diagnostic pop
}
......
......@@ -56,6 +56,7 @@
#include "ngraph/op/experimental/quantized_dot_bias.hpp"
#include "ngraph/op/experimental/quantized_max_pool.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/op/floor.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
......@@ -1187,6 +1188,11 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::TopK>(args[0], top_k_axis, target_type, k, compute_max);
break;
}
case OP_TYPEID::Transpose:
{
node = make_shared<op::Transpose>(args[0], args[1]);
break;
}
case OP_TYPEID::StopGradient:
{
node = make_shared<op::StopGradient>(args[0]);
......@@ -1759,6 +1765,8 @@ static json write(const Node& n, bool binary_constant_data)
node["compute_max"] = tmp->get_compute_max();
break;
}
case OP_TYPEID::Transpose: { break;
}
case OP_TYPEID::UnknownOp: { break;
}
}
......
......@@ -12148,3 +12148,242 @@ TEST(type_prop, DISABLED_benchmark_type_prop_convolution)
std::cout << "Constructed " << std::fixed << num_iterations << " Convolution ops in "
<< std::fixed << total_nanosec << " ns" << std::endl;
}
TEST(type_prop, transpose_arg_static_input_order_static_ok)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::i64, Shape{4});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_ok)
{
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8});
auto input_order = make_shared<op::Parameter>(element::i64, Shape{4});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{2, 2});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order not vector";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order_not_vector)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape{2, 4, 6, 8});
auto input_order =
make_shared<op::Parameter>(element::i64, PartialShape{2, Dimension::dynamic()});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order not vector";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{5});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order wrong size";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input order must have shape [n], where n is the rank of arg."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_input_order_not_vector)
{
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8});
auto input_order = make_shared<op::Parameter>(element::i64, PartialShape{2, 2});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order not vector";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop,
transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_input_order_not_vector)
{
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8});
auto input_order =
make_shared<op::Parameter>(element::i64, PartialShape{2, Dimension::dynamic()});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order not vector";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input_order_not_vector)
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto input_order =
make_shared<op::Parameter>(element::i64, PartialShape{2, Dimension::dynamic()});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input order not vector";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, transpose_input_order_et_dynamic_ok)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::dynamic, Shape{4});
auto r = make_shared<op::Transpose>(arg, input_order);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, transpose_input_order_et_wrong)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto input_order = make_shared<op::Parameter>(element::boolean, Shape{4});
try
{
auto r = make_shared<op::Transpose>(arg, input_order);
FAIL() << "Did not detect input element type not i64";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must have element type i64."));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment