Unverified Commit 41cb4a2d authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Element Type simplification (#313)

* remove ParameterizedConstant
* use simpler element Type definition
* Move TraitedType to NGVM directory
parent 39383029
......@@ -29,7 +29,7 @@ op::BinaryElementwiseArithmetic::BinaryElementwiseArithmetic(const std::string&
throw ngraph_error("Arguments must have the same tensor view element type");
}
if (arg0_element_type == element::Bool::element_type())
if (arg0_element_type == element::boolean)
{
throw ngraph_error(
"Operands for arithmetic operators must have numeric element type");
......
......@@ -30,7 +30,7 @@ op::BinaryElementwiseComparison::BinaryElementwiseComparison(const std::string&
"Arguments must have the same tensor view element type");
}
return element::Bool::element_type();
return element::boolean;
},
arg0,
arg1)
......
......@@ -29,10 +29,8 @@ void ngraph::op::Maximum::generate_adjoints(autodiff::Adjoints& adjoints,
auto x = get_input_op(0);
auto y = get_input_op(1);
adjoints.add_delta(x,
delta * make_shared<op::Convert>(make_shared<op::Greater>(x, y),
element::Float32::element_type()));
adjoints.add_delta(y,
delta * make_shared<op::Convert>(make_shared<op::Greater>(y, x),
element::Float32::element_type()));
adjoints.add_delta(
x, delta * make_shared<op::Convert>(make_shared<op::Greater>(x, y), element::f32));
adjoints.add_delta(
y, delta * make_shared<op::Convert>(make_shared<op::Greater>(y, x), element::f32));
}
......@@ -30,9 +30,7 @@ void ngraph::op::Minimum::generate_adjoints(autodiff::Adjoints& adjoints,
auto y = get_input_op(1);
adjoints.add_delta(x,
delta * make_shared<op::Convert>(make_shared<op::Less>(x, y),
element::Float32::element_type()));
delta * make_shared<op::Convert>(make_shared<op::Less>(x, y), element::f32));
adjoints.add_delta(y,
delta * make_shared<op::Convert>(make_shared<op::Less>(y, x),
element::Float32::element_type()));
delta * make_shared<op::Convert>(make_shared<op::Less>(y, x), element::f32));
}
......@@ -22,7 +22,7 @@ op::Not::Not(const std::shared_ptr<Node>& arg)
: UnaryElementwise(
"Not",
[](const ngraph::element::Type& arg_element_type) -> const ngraph::element::Type& {
if (arg_element_type != element::Bool::element_type())
if (arg_element_type != element::boolean)
{
throw ngraph_error(
"Operands for logical operators must have boolean element "
......
......@@ -33,7 +33,7 @@ op::Select::Select(const std::shared_ptr<Node>& arg0,
auto& input_1 = get_inputs().at(1);
auto& input_2 = get_inputs().at(2);
if (input_0.get_element_type() != element::Bool::element_type())
if (input_0.get_element_type() != element::boolean)
{
throw ngraph_error("Argument 0 for arithmetic operators must have boolean element type");
}
......@@ -56,9 +56,8 @@ void ngraph::op::Select::generate_adjoints(autodiff::Adjoints& adjoints,
auto x = get_inputs().at(1).get_output().get_node();
auto y = get_inputs().at(2).get_output().get_node();
auto p_as_float = std::make_shared<op::Convert>(p, element::Float32::element_type());
auto not_p_as_float = std::make_shared<op::Convert>(std::make_shared<op::Not>(p),
element::Float32::element_type());
auto p_as_float = std::make_shared<op::Convert>(p, element::f32);
auto not_p_as_float = std::make_shared<op::Convert>(std::make_shared<op::Not>(p), element::f32);
adjoints.add_delta(x, delta * p_as_float);
adjoints.add_delta(y, delta * not_p_as_float);
......
......@@ -25,7 +25,7 @@ op::Sum::Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
{
auto& input = get_inputs().at(0);
auto& input_element_type = input.get_element_type();
if (input_element_type == element::Bool::element_type())
if (input_element_type == element::boolean)
{
throw ngraph_error("Argument for sum must have numeric element type");
}
......
......@@ -21,7 +21,7 @@ op::UnaryElementwiseArithmetic::UnaryElementwiseArithmetic(const std::string& no
: UnaryElementwise(
node_type,
[](const ngraph::element::Type& arg_element_type) -> const ngraph::element::Type& {
if (arg_element_type == element::Bool::element_type())
if (arg_element_type == element::boolean)
{
throw ngraph_error(
"Operands for arithmetic operators must have numeric element "
......
......@@ -617,7 +617,7 @@ void runtime::cpu::CPU_Emitter::EmitReshape(const ngraph::Node* n,
{
// Emit an MKL transpose call if possible
// clang-format off
if (result_element_type == ngraph::element::Float32::element_type())
if (result_element_type == ngraph::element::f32)
{
m_out << "{ // " << n->get_name() << " 2\n";
m_out.indent++;
......
......@@ -20,6 +20,7 @@
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/ngvm/parameterized_tensor_view.hpp"
#include "ngraph/runtime/ngvm/types.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......
......@@ -15,6 +15,7 @@
#include "ngraph/runtime/ngvm/ngvm_backend.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/ngvm/parameterized_tensor_view.hpp"
#include "ngraph/runtime/ngvm/types.hpp"
using namespace ngraph::runtime::ngvm;
......@@ -29,47 +30,47 @@ std::shared_ptr<ngraph::runtime::TensorView>
const Shape& shape)
{
std::shared_ptr<TensorView> rc;
if (element_type == element::Bool::element_type())
if (element_type == element::boolean)
{
rc = std::make_shared<ParameterizedTensorView<element::Bool>>(shape);
}
else if (element_type == element::Float32::element_type())
else if (element_type == element::f32)
{
rc = std::make_shared<ParameterizedTensorView<element::Float32>>(shape);
}
else if (element_type == element::Float64::element_type())
else if (element_type == element::f64)
{
rc = std::make_shared<ParameterizedTensorView<element::Float64>>(shape);
}
else if (element_type == element::Int8::element_type())
else if (element_type == element::i8)
{
rc = std::make_shared<ParameterizedTensorView<element::Int8>>(shape);
}
else if (element_type == element::Int16::element_type())
else if (element_type == element::i16)
{
rc = std::make_shared<ParameterizedTensorView<element::Int16>>(shape);
}
else if (element_type == element::Int32::element_type())
else if (element_type == element::i32)
{
rc = std::make_shared<ParameterizedTensorView<element::Int32>>(shape);
}
else if (element_type == element::Int64::element_type())
else if (element_type == element::i64)
{
rc = std::make_shared<ParameterizedTensorView<element::Int64>>(shape);
}
else if (element_type == element::UInt8::element_type())
else if (element_type == element::u8)
{
rc = std::make_shared<ParameterizedTensorView<element::UInt8>>(shape);
}
else if (element_type == element::UInt16::element_type())
else if (element_type == element::u16)
{
rc = std::make_shared<ParameterizedTensorView<element::UInt16>>(shape);
}
else if (element_type == element::UInt32::element_type())
else if (element_type == element::u32)
{
rc = std::make_shared<ParameterizedTensorView<element::UInt32>>(shape);
}
else if (element_type == element::UInt64::element_type())
else if (element_type == element::u64)
{
rc = std::make_shared<ParameterizedTensorView<element::UInt64>>(shape);
}
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
namespace element
{
// Provides a compile-time name for a C++ type.
// Used in TraitedType for the string that supplies the C++ type name during code generation,
// so it needs to be a valid C++ name.
template <typename T>
const char* traited_type_name()
{
throw ngraph_error("Unknown type");
}
// Define a type string for a type T. Will make traited_type_name<T>() return "T"
#define NGRAPH_DEFINE_TRAITED_TYPE_NAME(T) \
template <> \
constexpr const char* traited_type_name<T>() \
{ \
return #T; \
}
// Literals (and probably other things we don't know about yet) need to have their C++ types
// and element types coordinated. Every element type corresponds to a TraitedType which provides
// access to both the instance and the C++ type used to hold the value during compilation.
template <typename T>
class TraitedType : public element::Type
{
TraitedType(const TraitedType&) = delete;
TraitedType& operator=(const TraitedType&) = delete;
protected:
TraitedType()
: Type(sizeof(T) * 8,
std::is_floating_point<T>::value,
std::is_signed<T>::value,
traited_type_name<T>())
{
}
public:
// This is the C++ type used to hold a value of this element type during compilation
using type = T;
// This returns a reference to an instance of this element type.
static const TraitedType<T>& element_type()
{
static TraitedType<T> t;
return t;
}
};
NGRAPH_DEFINE_TRAITED_TYPE_NAME(char)
using Bool = TraitedType<char>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(float)
using Float32 = TraitedType<float>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(double)
using Float64 = TraitedType<double>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int8_t)
using Int8 = TraitedType<int8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int16_t)
using Int16 = TraitedType<int16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int32_t)
using Int32 = TraitedType<int32_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int64_t)
using Int64 = TraitedType<int64_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint8_t)
using UInt8 = TraitedType<uint8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint16_t)
using UInt16 = TraitedType<uint16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint32_t)
using UInt32 = TraitedType<uint32_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint64_t)
using UInt64 = TraitedType<uint64_t>;
}
}
......@@ -105,84 +105,5 @@ namespace ngraph
const Type& from<uint64_t>();
std::ostream& operator<<(std::ostream& out, const ngraph::element::Type& obj);
// Provides a compile-time name for a C++ type.
// Used in TraitedType for the string that supplies the C++ type name during code generation,
// so it needs to be a valid C++ name.
template <typename T>
const char* traited_type_name()
{
throw ngraph_error("Unknown type");
}
// Define a type string for a type T. Will make traited_type_name<T>() return "T"
#define NGRAPH_DEFINE_TRAITED_TYPE_NAME(T) \
template <> \
constexpr const char* traited_type_name<T>() \
{ \
return #T; \
}
// Literals (and probably other things we don't know about yet) need to have their C++ types
// and element types coordinated. Every element type corresponds to a TraitedType which provides
// access to both the instance and the C++ type used to hold the value during compilation.
template <typename T>
class TraitedType : public Type
{
TraitedType(const TraitedType&) = delete;
TraitedType& operator=(const TraitedType&) = delete;
protected:
TraitedType()
: Type(sizeof(T) * 8,
std::is_floating_point<T>::value,
std::is_signed<T>::value,
traited_type_name<T>())
{
}
public:
// This is the C++ type used to hold a value of this element type during compilation
using type = T;
// This returns a reference to an instance of this element type.
static const TraitedType<T>& element_type()
{
static TraitedType<T> t;
return t;
}
};
NGRAPH_DEFINE_TRAITED_TYPE_NAME(char)
using Bool = TraitedType<char>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(float)
using Float32 = TraitedType<float>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(double)
using Float64 = TraitedType<double>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int8_t)
using Int8 = TraitedType<int8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int16_t)
using Int16 = TraitedType<int16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int32_t)
using Int32 = TraitedType<int32_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(int64_t)
using Int64 = TraitedType<int64_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint8_t)
using UInt8 = TraitedType<uint8_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint16_t)
using UInt16 = TraitedType<uint16_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint32_t)
using UInt32 = TraitedType<uint32_t>;
NGRAPH_DEFINE_TRAITED_TYPE_NAME(uint64_t)
using UInt64 = TraitedType<uint64_t>;
}
}
This diff is collapsed.
......@@ -176,7 +176,7 @@ TEST(benchmark, concat_32x1x200_axis1_6)
for (size_t i = 0; i < n_arrays; i++)
{
auto param = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), shape_of_each_array));
make_shared<TensorViewType>(element::f32, shape_of_each_array));
params[i] = param;
params_as_nodes[i] = param;
}
......@@ -193,14 +193,12 @@ TEST(benchmark, concat_32x1x200_axis1_6)
for (size_t i = 0; i < n_arrays; i++)
{
auto tv = backend->make_primary_tensor_view(element::Float32::element_type(),
shape_of_each_array);
auto tv = backend->make_primary_tensor_view(element::f32, shape_of_each_array);
copy_data(tv, data_arrays[i]);
input_vals.push_back(tv);
}
auto result_tv =
backend->make_primary_tensor_view(element::Float32::element_type(), result_shape);
auto result_tv = backend->make_primary_tensor_view(element::f32, result_shape);
result_tvs.push_back(result_tv);
std::function<void()> cb = [input_vals, result_tv, cf]() {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -23,17 +23,17 @@ using namespace ngraph;
TEST(build_graph, build_simple)
{
// Function with 4 parameters
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3});
auto arg2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::f32, Shape{3});
auto arg2 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto broadcast_1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto b1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto dot = make_shared<op::Dot>(arg2, arg0);
ASSERT_EQ(dot->get_input_ops()[0], arg2);
ASSERT_EQ(dot->get_input_ops()[1], arg0);
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), Shape{32, 3});
auto result_type = make_shared<TensorViewType>(element::f32, Shape{32, 3});
auto cluster_0 =
make_shared<Function>(dot, result_type, op::Parameters{arg0, arg1, arg2, arg3});
......@@ -44,7 +44,7 @@ TEST(build_graph, build_simple)
TEST(build_graph, as_type)
{
// Check upcasting a ValueType::ptr that is a TensorViewType to a TensorViewType and Tuple.
auto tv_vt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 3, 5});
auto tv_vt = make_shared<TensorViewType>(element::f32, Shape{2, 3, 5});
auto tv_tv = dynamic_pointer_cast<TensorViewType>(tv_vt);
ASSERT_EQ(tv_vt, tv_tv);
auto tv_tp = dynamic_pointer_cast<TupleType>(tv_vt);
......@@ -61,14 +61,14 @@ TEST(build_graph, as_type)
// Check node comparisons
TEST(build_graph, node_comparison)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 3});
auto arg1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3});
auto arg2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{32, 3});
auto arg1 = make_shared<op::Parameter>(element::f32, Shape{3});
auto arg2 = make_shared<op::Parameter>(element::f32, Shape{32});
auto dot = make_shared<op::Dot>(arg0, arg1);
auto add = make_shared<op::Add>(dot, arg2);
auto parg = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto parg = make_shared<op::Parameter>(element::f32, Shape{});
auto pattern_dot = make_shared<op::Dot>(parg, parg);
}
......@@ -78,7 +78,7 @@ TEST(build_graph, literal)
//auto float0 = FloatConstant::make(3.0);
vector<float> float_t{3.0};
auto float0 = make_shared<op::Constant>(element::f32, Shape{}, float_t);
auto float_scalar_type = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto float_scalar_type = make_shared<TensorViewType>(element::f32, Shape{});
ASSERT_EQ(float0->get_vector<float>(), std::vector<float>{3.0});
ASSERT_EQ(*float0->get_value_type(), *float_scalar_type);
auto d = make_shared<op::Dot>(float0, float0);
......@@ -87,7 +87,7 @@ TEST(build_graph, literal)
vector<int32_t> int32{3};
auto int32_0 = make_shared<op::Constant>(element::i32, Shape{}, int32);
auto int32_scalar_type = make_shared<TensorViewType>(element::Int32::element_type(), Shape{});
auto int32_scalar_type = make_shared<TensorViewType>(element::i32, Shape{});
ASSERT_EQ(int32_0->get_vector<int32_t>(), std::vector<int>{3});
ASSERT_EQ(*int32_0->get_value_type(), *int32_scalar_type);
ASSERT_NE(*int32_0->get_value_type(), *float_scalar_type);
......@@ -100,7 +100,7 @@ TEST(build_graph, tensor)
Shape shape{2, 3};
vector<float> float_t(shape_size(shape), 0);
auto float0 = make_shared<op::Constant>(element::f32, shape, float_t);
auto float_tensor_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto float_tensor_type = make_shared<TensorViewType>(element::f32, shape);
ASSERT_EQ(*float0->get_value_type(), *float_tensor_type);
auto d = make_shared<op::Add>(float0, float0);
ASSERT_EQ(d->get_input_ops().at(0), float0);
......@@ -109,7 +109,7 @@ TEST(build_graph, tensor)
Shape ishape{3, 5};
vector<int32_t> idata(shape_size(ishape), 0);
auto int32_0 = make_shared<op::Constant>(element::i32, ishape, idata);
auto int32_tensor_type = make_shared<TensorViewType>(element::Int32::element_type(), ishape);
auto int32_tensor_type = make_shared<TensorViewType>(element::i32, ishape);
ASSERT_EQ(*int32_0->get_value_type(), *int32_tensor_type);
ASSERT_NE(*int32_0->get_value_type(), *float_tensor_type);
}
......@@ -123,17 +123,17 @@ TEST(build_graph, arg_inverse)
TEST(build_graph, function_undeclared_parameters)
{
// Function with 4 parameters
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3});
auto arg2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::f32, Shape{3});
auto arg2 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto broadcast_1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto b1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto dot = make_shared<op::Dot>(arg2, arg0);
ASSERT_EQ(dot->get_input_ops()[0], arg2);
ASSERT_EQ(dot->get_input_ops()[1], arg0);
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), Shape{32, 3});
auto result_type = make_shared<TensorViewType>(element::f32, Shape{32, 3});
try
{
......@@ -155,18 +155,17 @@ TEST(build_graph, function_undeclared_parameters)
TEST(build_graph, function_incorrect_return_type)
{
// Function with 4 parameters
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3});
auto arg2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::f32, Shape{3});
auto arg2 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto broadcast_1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto b1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto dot = make_shared<op::Dot>(arg2, arg0);
ASSERT_EQ(dot->get_input_ops()[0], arg2);
ASSERT_EQ(dot->get_input_ops()[1], arg0);
auto incorrect_result_type =
make_shared<TensorViewType>(element::Int32::element_type(), Shape{32, 3});
auto incorrect_result_type = make_shared<TensorViewType>(element::i32, Shape{32, 3});
try
{
......@@ -191,10 +190,10 @@ TEST(build_graph, function_incorrect_return_type)
TEST(build_graph, function_no_declared_return_type)
{
// Function with 4 parameters
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3});
auto arg2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{32, 7});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{7, 3});
auto arg1 = make_shared<op::Parameter>(element::f32, Shape{3});
auto arg2 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto arg3 = make_shared<op::Parameter>(element::f32, Shape{32, 7});
auto broadcast_1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto b1 = make_shared<op::Broadcast>(arg3, Shape{10, 32, 7}, AxisSet{0});
auto dot = make_shared<op::Dot>(arg2, arg0);
......@@ -204,5 +203,5 @@ TEST(build_graph, function_no_declared_return_type)
auto f = make_shared<Function>(dot, op::Parameters{arg0, arg1, arg2, arg3});
auto f_rt = f->get_result_type();
ASSERT_EQ(*f_rt, TensorViewType(element::Float32::element_type(), Shape{32, 3}));
ASSERT_EQ(*f_rt, TensorViewType(element::f32, Shape{32, 3}));
}
......@@ -31,18 +31,18 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&)> func)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto shape_rt = Shape{2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto rt = make_shared<TensorViewType>(element::f32, shape_rt);
auto f = make_shared<Function>(func(A, {0}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
auto a = backend->make_primary_tensor_view(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
auto result = backend->make_primary_tensor_view(element::f32, shape_rt);
cf->call({a}, {result});
return result;
......@@ -52,18 +52,18 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_true(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&, bool)> func)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto shape_rt = Shape{2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto rt = make_shared<TensorViewType>(element::f32, shape_rt);
auto f = make_shared<Function>(func(A, {0}, true), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
auto a = backend->make_primary_tensor_view(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
auto result = backend->make_primary_tensor_view(element::f32, shape_rt);
cf->call({a}, {result});
return result;
......@@ -73,18 +73,18 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_false(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&, bool)> func)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto shape_rt = Shape{2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto rt = make_shared<TensorViewType>(element::f32, shape_rt);
auto f = make_shared<Function>(func(A, {0}, false), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
auto a = backend->make_primary_tensor_view(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
auto result = backend->make_primary_tensor_view(element::f32, shape_rt);
cf->call({a}, {result});
return result;
......@@ -125,19 +125,19 @@ TEST(builder, numpy_transpose)
{
// 2D Transpose
Shape shape{2, 4};
auto param = std::make_shared<op::Parameter>(ngraph::element::Float32::element_type(), shape);
auto param = std::make_shared<op::Parameter>(ngraph::element::f32, shape);
auto transposed = std::dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param));
EXPECT_EQ(Shape({4, 2}), transposed->get_output_shape());
// Multidimensional Transpose
shape = Shape{2, 4, 8};
param = std::make_shared<op::Parameter>(ngraph::element::Float32::element_type(), shape);
param = std::make_shared<op::Parameter>(ngraph::element::f32, shape);
transposed = std::dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param));
EXPECT_EQ(Shape({8, 4, 2}), transposed->get_output_shape());
// Dimshuffle
shape = Shape{2, 4, 8};
param = std::make_shared<op::Parameter>(ngraph::element::Float32::element_type(), shape);
param = std::make_shared<op::Parameter>(ngraph::element::f32, shape);
transposed = std::dynamic_pointer_cast<op::Reshape>(
builder::numpy_transpose(param, AxisVector{2, 0, 1}));
EXPECT_EQ(Shape({8, 2, 4}), transposed->get_output_shape());
......
......@@ -7,7 +7,7 @@ using namespace ngraph;
std::shared_ptr<ngraph::op::Parameter> getParamFromShape(const ngraph::Shape& shape)
{
return std::make_shared<ngraph::op::Parameter>(ngraph::element::Float32::element_type(), shape);
return std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, shape);
}
inline ngraph::Shape getShapeFromParam(const shared_ptr<ngraph::Node>& node)
......@@ -211,8 +211,7 @@ TEST(autobroadcast, make_node_3_args)
ngraph::Shape s21{2, 1};
ngraph::Shape s23{2, 3};
auto predicates =
std::make_shared<ngraph::op::Parameter>(ngraph::element::Bool::element_type(), s23);
auto predicates = std::make_shared<ngraph::op::Parameter>(ngraph::element::boolean, s23);
auto lhs = getParamFromShape(s21);
auto rhs = getParamFromShape(s23);
......
This diff is collapsed.
This diff is collapsed.
......@@ -23,7 +23,7 @@ using namespace ngraph;
TEST(input_output, param_tensor)
{
// Params have no arguments, so we can check that the value becomes a tensor output
auto tv_tp = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4});
auto tv_tp = make_shared<TensorViewType>(element::f32, Shape{2, 4});
auto param = make_shared<op::Parameter>(tv_tp);
ASSERT_EQ(param->get_outputs().size(), 1);
......@@ -40,8 +40,8 @@ TEST(input_output, param_tensor)
TEST(input_output, param_tuple)
{
// Same as param_tensor, but for a tuple
auto tv_tp_0 = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4});
auto tv_tp_1 = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4, 6});
auto tv_tp_0 = make_shared<TensorViewType>(element::f32, Shape{2, 4});
auto tv_tp_1 = make_shared<TensorViewType>(element::f32, Shape{2, 4, 6});
auto tp_tp = make_shared<TupleType>(ValueTypes{tv_tp_0, tv_tp_1});
auto param = make_shared<op::Parameter>(tp_tp);
......@@ -59,7 +59,7 @@ TEST(input_output, param_tuple)
TEST(input_output, simple_output)
{
auto tv_tp_0 = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4});
auto tv_tp_0 = make_shared<TensorViewType>(element::f32, Shape{2, 4});
auto param_0 = make_shared<op::Parameter>(tv_tp_0);
auto param_1 = make_shared<op::Parameter>(tv_tp_0);
auto add = make_shared<op::Add>(param_0, param_1);
......
......@@ -23,14 +23,14 @@ using namespace ngraph;
TEST(op, is_op)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
ASSERT_NE(nullptr, arg0);
EXPECT_TRUE(arg0->is_parameter());
}
TEST(op, is_parameter)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
ASSERT_NE(nullptr, arg0);
auto t0 = make_shared<op::Add>(arg0, arg0);
ASSERT_NE(nullptr, t0);
......
......@@ -47,17 +47,17 @@ TEST(pass_manager, module_add_function)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto rt_f = make_shared<TensorViewType>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C});
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X = make_shared<op::Parameter>(element::f32, shape);
auto Y = make_shared<op::Parameter>(element::f32, shape);
auto Z = make_shared<op::Parameter>(element::f32, shape);
auto rt_g = make_shared<TensorViewType>(element::f32, shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
......
......@@ -201,7 +201,7 @@ static void run_passes(pass::Manager& pass_manager,
std::vector<shared_ptr<op::Parameter>> parms)
{
auto shape = Shape{1};
auto rt = make_shared<TensorViewType>(element::Int32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::i32, shape);
auto func = make_shared<Function>(graph, rt, op::Parameters{parms});
pass_manager.run_passes(func);
}
......@@ -215,8 +215,8 @@ TEST(pattern, graph_rewrite)
pass_manager.register_pass<TestGraphRewrite>();
{
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto iconst0 = construct_constant_node(0);
auto sum = (a + iconst0);
auto graph = b + sum;
......@@ -233,8 +233,8 @@ TEST(pattern, graph_rewrite)
}
{
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto iconst1 = construct_constant_node(1);
auto mul = (a * iconst1);
auto graph = b + mul;
......@@ -251,8 +251,8 @@ TEST(pattern, graph_rewrite)
}
{
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto iconst1 = construct_constant_node(1);
auto graph = ((((a * iconst1) * iconst1) * iconst1) * iconst1) + b;
run_passes(pass_manager, graph, {a, b});
......@@ -264,8 +264,8 @@ TEST(pattern, graph_rewrite)
}
{
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto iconst0 = construct_constant_node(0);
auto iconst1 = construct_constant_node(1);
auto graph = b + (iconst0 + ((a + iconst0) * iconst1));
......@@ -278,8 +278,8 @@ TEST(pattern, graph_rewrite)
}
{
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto iconst1 = construct_constant_node(1);
auto graph = b + (iconst1 * (iconst1 * (iconst1 * (iconst1 * a))));
run_passes(pass_manager, graph, {a, b});
......@@ -295,7 +295,7 @@ TEST(pattern, matcher)
{
auto shape = Shape{1};
auto a = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto a = make_shared<op::Parameter>(element::i32, shape);
TestMatcher n(nullptr);
ASSERT_TRUE(n.match(a, a));
......@@ -315,8 +315,8 @@ TEST(pattern, matcher)
pattern::op::Label::make_from_node(a, [](std::shared_ptr<Node> no) { return false; });
ASSERT_FALSE(n.match(pattern_false, a));
auto b = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto d = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto b = make_shared<op::Parameter>(element::i32, shape);
auto d = make_shared<op::Parameter>(element::i32, shape);
ASSERT_FALSE(n.match(d, b));
ASSERT_FALSE(n.match(abs + b, b + b));
......@@ -328,7 +328,7 @@ TEST(pattern, matcher)
ASSERT_TRUE(n.match(b + pattern, abs + b));
ASSERT_EQ(n.get_pattern_map()[pattern], abs);
auto c = make_shared<op::Parameter>(element::Int32::element_type(), shape);
auto c = make_shared<op::Parameter>(element::i32, shape);
ASSERT_TRUE(n.match(c * (b + pattern), c * (abs + b)));
ASSERT_EQ(n.get_pattern_map()[pattern], abs);
......@@ -341,7 +341,7 @@ TEST(pattern, matcher)
auto iconst1_1 = construct_constant_node(1);
ASSERT_TRUE(n.match(pattern * iconst1_0, a * iconst1_1)); //different iconst
ASSERT_EQ(n.get_pattern_map()[pattern], a);
auto fconst1_0 = op::Constant::create(element::Float32::element_type(), Shape{1}, {1});
auto fconst1_0 = op::Constant::create(element::f32, Shape{1}, {1});
auto patternf = pattern::op::Label::make_from_node(fconst1_0);
ASSERT_TRUE(n.match(patternf * fconst1_0, a * iconst1_1)); //different iconst
......
......@@ -35,7 +35,7 @@ static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
TEST(serialize, tuple)
{
auto shape = Shape{2, 2};
auto tensor_view_type = make_shared<TensorViewType>(element::Int64::element_type(), shape);
auto tensor_view_type = make_shared<TensorViewType>(element::i64, shape);
auto A = make_shared<op::Parameter>(tensor_view_type);
auto B = make_shared<op::Parameter>(tensor_view_type);
......@@ -61,17 +61,17 @@ TEST(serialize, main)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto rt_f = make_shared<TensorViewType>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C}, "f");
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X = make_shared<op::Parameter>(element::f32, shape);
auto Y = make_shared<op::Parameter>(element::f32, shape);
auto Z = make_shared<op::Parameter>(element::f32, shape);
auto rt_g = make_shared<TensorViewType>(element::f32, shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
......@@ -79,10 +79,10 @@ TEST(serialize, main)
"g");
// Now make "h(X,Y,Z) = g(X,Y,Z) + g(X,Y,Z)"
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_h = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::f32, shape);
auto Y1 = make_shared<op::Parameter>(element::f32, shape);
auto Z1 = make_shared<op::Parameter>(element::f32, shape);
auto rt_h = make_shared<TensorViewType>(element::f32, shape);
auto h = make_shared<Function>(make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}) +
make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}),
rt_h,
......@@ -105,13 +105,13 @@ TEST(serialize, main)
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
auto x = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto x = backend->make_primary_tensor_view(element::f32, shape);
copy_data(x, vector<float>{1, 2, 3, 4});
auto y = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto y = backend->make_primary_tensor_view(element::f32, shape);
copy_data(y, vector<float>{5, 6, 7, 8});
auto z = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto z = backend->make_primary_tensor_view(element::f32, shape);
copy_data(z, vector<float>{9, 10, 11, 12});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto result = backend->make_primary_tensor_view(element::f32, shape);
cf->call({x, y, z}, {result});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
......
......@@ -38,9 +38,9 @@ TEST(tensor, size)
pass_manager.register_pass<pass::Liveness>();
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{2, 3});
auto add = make_shared<op::Add>(arg0, arg0);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 3});
auto rt = make_shared<TensorViewType>(element::f32, Shape{2, 3});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......@@ -52,9 +52,9 @@ TEST(tensor, size)
}
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{});
auto add = make_shared<op::Add>(arg0, arg0);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto rt = make_shared<TensorViewType>(element::f32, Shape{});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......@@ -66,9 +66,9 @@ TEST(tensor, size)
}
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
auto add = make_shared<op::Add>(arg0, arg0);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{1});
auto rt = make_shared<TensorViewType>(element::f32, Shape{1});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......@@ -80,15 +80,13 @@ TEST(tensor, size)
}
}
template <typename ET>
void test_read_write(const std::vector<typename ET::type>& x)
template <typename T>
void test_read_write(const std::vector<T>& x)
{
using T = typename ET::type;
auto manager = ngraph::runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
auto a = backend->make_primary_tensor_view(ET::element_type(), Shape{2, x.size()});
auto a = backend->make_primary_tensor_view(element::from<T>(), Shape{2, x.size()});
std::vector<T> result(2 * x.size());
......@@ -98,7 +96,7 @@ void test_read_write(const std::vector<typename ET::type>& x)
std::copy(x.begin(), x.end(), result.begin() + x.size());
std::vector<T> af_vector(2 * x.size());
a->read(af_vector.data(), 0, af_vector.size() * sizeof(typename ET::type));
a->read(af_vector.data(), 0, af_vector.size() * sizeof(T));
ASSERT_EQ(af_vector, result);
std::vector<T> result1(x.size());
......@@ -110,8 +108,8 @@ void test_read_write(const std::vector<typename ET::type>& x)
TEST(tensor, read_write)
{
test_read_write<element::Float32>({1.0, 3.0, 5.0});
test_read_write<element::Int64>({-1, 2, 4});
test_read_write<float>({1.0, 3.0, 5.0});
test_read_write<int64_t>({-1, 2, 4});
}
TEST(tensor, output_flag)
......@@ -120,9 +118,9 @@ TEST(tensor, output_flag)
pass_manager.register_pass<pass::TopologicalSort>();
pass_manager.register_pass<pass::Liveness>();
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto arg0 = make_shared<op::Parameter>(element::f32, Shape{1});
auto add = make_shared<op::Add>(arg0, arg0);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{1});
auto rt = make_shared<TensorViewType>(element::f32, Shape{1});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......
......@@ -36,7 +36,7 @@ TEST(topological_sort, basic)
vector<shared_ptr<op::Parameter>> args;
for (int i = 0; i < 10; i++)
{
auto arg = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg = make_shared<op::Parameter>(element::f32, Shape{});
ASSERT_NE(nullptr, arg);
args.push_back(arg);
}
......@@ -56,7 +56,7 @@ TEST(topological_sort, basic)
auto r0 = make_shared<op::Add>(t3, t4);
ASSERT_NE(nullptr, r0);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto rt = make_shared<TensorViewType>(element::f32, Shape{});
ASSERT_NE(nullptr, rt);
auto f0 = make_shared<Function>(r0, rt, args);
......@@ -84,7 +84,7 @@ TEST(topological_sort, basic)
// vector<shared_ptr<op::Parameter>> args;
// for (int i = 0; i < 10; i++)
// {
// auto arg = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
// auto arg = make_shared<op::Parameter>(element::f32, Shape{1});
// ASSERT_NE(nullptr, arg);
// args.push_back(arg);
// }
......@@ -107,16 +107,16 @@ TEST(benchmark, topological_sort)
// x[i+1] = tanh(dot(W,x[i])+b)
shared_ptr<Node> result;
vector<shared_ptr<op::Parameter>> args;
result = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
result = make_shared<op::Parameter>(element::f32, Shape{});
for (int i = 0; i < 1000000; i++)
{
auto in_1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto in_2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto in_1 = make_shared<op::Parameter>(element::f32, Shape{});
auto in_2 = make_shared<op::Parameter>(element::f32, Shape{});
args.push_back(in_1);
args.push_back(in_2);
result = make_cell(result, in_1, in_2);
}
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto rt = make_shared<TensorViewType>(element::f32, Shape{});
auto f0 = make_shared<Function>(result, rt, args);
timer.start();
......@@ -142,17 +142,17 @@ TEST(topological_sort, collect_functions)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto rt_f = make_shared<TensorViewType>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C}, "f");
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X = make_shared<op::Parameter>(element::f32, shape);
auto Y = make_shared<op::Parameter>(element::f32, shape);
auto Z = make_shared<op::Parameter>(element::f32, shape);
auto rt_g = make_shared<TensorViewType>(element::f32, shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
......@@ -160,10 +160,10 @@ TEST(topological_sort, collect_functions)
"g");
// Now make "h(X,Y,Z) = g(X,Y,Z) + g(X,Y,Z)"
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_h = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::f32, shape);
auto Y1 = make_shared<op::Parameter>(element::f32, shape);
auto Z1 = make_shared<op::Parameter>(element::f32, shape);
auto rt_h = make_shared<TensorViewType>(element::f32, shape);
auto h = make_shared<Function>(make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}) +
make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}),
rt_h,
......@@ -192,10 +192,10 @@ TEST(topological_sort, unused_function_arg)
// Create a function with an unused argument
// B is unused in the function but must be in the graph
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto rt_f = make_shared<TensorViewType>(element::f32, shape);
auto result = A + C + C;
auto f = make_shared<Function>(result, rt_f, op::Parameters{A, B, C}, "f");
......
This diff is collapsed.
......@@ -187,15 +187,15 @@ TEST(util, all_close)
auto backend = manager->allocate_backend();
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
auto b = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
auto a = backend->make_primary_tensor_view(element::f32, Shape{2, 3});
auto b = backend->make_primary_tensor_view(element::f32, Shape{2, 3});
copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{1, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_TRUE(ngraph::test::all_close<float>(a, b));
auto c = backend->make_primary_tensor_view(element::Float32::element_type(), Shape{2, 3});
auto c = backend->make_primary_tensor_view(element::f32, Shape{2, 3});
copy_data(c, test::NDArray<float, 2>({{1.1f, 2, 3}, {3, 4, 5}}).get_vector());
EXPECT_FALSE(ngraph::test::all_close<float>(c, a, 0, .05f));
......@@ -209,17 +209,17 @@ TEST(util, traverse_functions)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto rt_f = make_shared<TensorViewType>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C}, "f");
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X = make_shared<op::Parameter>(element::f32, shape);
auto Y = make_shared<op::Parameter>(element::f32, shape);
auto Z = make_shared<op::Parameter>(element::f32, shape);
auto rt_g = make_shared<TensorViewType>(element::f32, shape);
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
......@@ -227,10 +227,10 @@ TEST(util, traverse_functions)
"g");
// Now make "h(X,Y,Z) = g(X,Y,Z) + g(X,Y,Z)"
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_h = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::f32, shape);
auto Y1 = make_shared<op::Parameter>(element::f32, shape);
auto Z1 = make_shared<op::Parameter>(element::f32, shape);
auto rt_h = make_shared<TensorViewType>(element::f32, shape);
auto h = make_shared<Function>(make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}) +
make_shared<op::FunctionCall>(g, Nodes{X1, Y1, Z1}),
rt_h,
......@@ -247,19 +247,15 @@ class CloneTest : public ::testing::Test
public:
// (A + B) * C
Shape shape = Shape{2, 2};
std::shared_ptr<op::Parameter> A =
make_shared<op::Parameter>(element::Float32::element_type(), shape);
std::shared_ptr<op::Parameter> B =
make_shared<op::Parameter>(element::Float32::element_type(), shape);
std::shared_ptr<op::Parameter> C =
make_shared<op::Parameter>(element::Float32::element_type(), shape);
std::shared_ptr<op::Parameter> A = make_shared<op::Parameter>(element::f32, shape);
std::shared_ptr<op::Parameter> B = make_shared<op::Parameter>(element::f32, shape);
std::shared_ptr<op::Parameter> C = make_shared<op::Parameter>(element::f32, shape);
std::shared_ptr<Node> AplusB = A + B;
std::shared_ptr<Node> AplusBtimesC = AplusB * C;
NodeMap node_map;
std::list<std::shared_ptr<ngraph::Node>> nodes;
std::shared_ptr<TensorViewType> type =
make_shared<TensorViewType>(element::Float32::element_type(), shape);
std::shared_ptr<TensorViewType> type = make_shared<TensorViewType>(element::f32, shape);
std::shared_ptr<Function> func =
make_shared<Function>(AplusBtimesC, type, op::Parameters{A, B, C}, "f");
......@@ -314,7 +310,7 @@ TEST_F(CloneTest, clone_nodes_full)
TEST_F(CloneTest, clone_nodes_partial)
{
// map A -> A' prior to clone
auto Aprime = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Aprime = make_shared<op::Parameter>(element::f32, shape);
node_map.Add(A, Aprime);
auto cloned_nodes = clone_nodes(nodes, node_map);
......
......@@ -55,12 +55,12 @@ bool validate_list(const list<shared_ptr<Node>>& nodes)
shared_ptr<Function> make_test_graph()
{
auto arg_0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_3 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_4 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_5 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto arg_0 = make_shared<op::Parameter>(element::f32, Shape{});
auto arg_1 = make_shared<op::Parameter>(element::f32, Shape{});
auto arg_2 = make_shared<op::Parameter>(element::f32, Shape{});
auto arg_3 = make_shared<op::Parameter>(element::f32, Shape{});
auto arg_4 = make_shared<op::Parameter>(element::f32, Shape{});
auto arg_5 = make_shared<op::Parameter>(element::f32, Shape{});
auto t0 = make_shared<op::Add>(arg_0, arg_1);
auto t1 = make_shared<op::Dot>(t0, arg_2);
......@@ -71,7 +71,7 @@ shared_ptr<Function> make_test_graph()
auto r0 = make_shared<op::Add>(t3, t4);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto rt = make_shared<TensorViewType>(element::f32, Shape{});
auto f0 =
make_shared<Function>(r0, rt, op::Parameters{arg_0, arg_1, arg_2, arg_3, arg_4, arg_5});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment