Commit eec7201d authored by Adam Procter's avatar Adam Procter Committed by GitHub

Implement function calls through VM and type checking for reduce operator (#151)

* Add reduce op class and type propagation

* Implement FunctionCall through VM

* Changes Function to require explicit return type at construction time
parent b90bea14
......@@ -29,9 +29,11 @@ set (SRC
ops/constant.cpp
ops/convert.cpp
ops/dot.cpp
ops/function_call.cpp
ops/get_tuple_element.cpp
ops/op.cpp
ops/parameter.cpp
ops/reduce.cpp
ops/select.cpp
ops/tuple.cpp
ops/unary_elementwise_arithmetic.cpp
......
......@@ -20,10 +20,12 @@ using namespace std;
using namespace ngraph;
Function::Function(const std::shared_ptr<Node>& result,
const std::shared_ptr<ValueType>& result_type,
const std::vector<std::shared_ptr<op::Parameter>>& parameters)
: m_result(result)
, m_parameters(parameters)
, m_name("Function")
, m_result_type(result_type)
{
size_t i = 0;
for (auto parameter : parameters)
......
......@@ -33,6 +33,7 @@ namespace ngraph
{
public:
Function(const std::shared_ptr<Node>& result,
const std::shared_ptr<ValueType>& result_type,
const std::vector<std::shared_ptr<op::Parameter>>& parameters);
std::shared_ptr<Node> get_result() { return m_result; }
......@@ -40,10 +41,15 @@ namespace ngraph
{
return m_parameters;
}
const std::shared_ptr<ValueType> get_result_type() const
{
return m_result_type;
}
std::string get_name() const { return m_name; }
protected:
std::shared_ptr<Node> m_result;
std::vector<std::shared_ptr<ngraph::op::Parameter>> m_parameters;
std::string m_name;
std::shared_ptr<ValueType> m_result_type;
};
}
......@@ -60,6 +60,7 @@
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/floor.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/less.hpp"
......@@ -72,6 +73,7 @@
#include "ngraph/ops/op.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/subtract.hpp"
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/topological_sort.hpp"
using namespace std;
using namespace ngraph::op;
void FunctionCall::propagate_types()
{
auto& function_params = m_function->get_parameters();
if (m_arguments.size() != function_params.size())
{
throw ngraph_error("Wrong number of arguments.");
}
for (size_t i = 0; i < m_arguments.size(); i++)
{
if (nullptr == m_arguments.at(i)->get_value_type())
{
throw ngraph_error("Function call argument is missing type.");
}
if (nullptr == function_params.at(i)->get_value_type())
{
throw ngraph_error("Function parameter is missing type.");
}
if (*(m_arguments.at(i)->get_value_type()) != *(function_params.at(i)->get_value_type()))
{
throw ngraph_error("Function argument type mismatch.");
}
}
auto f_result_type = m_function->get_result_type();
set_value_type_checked(f_result_type);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
namespace ngraph
{
namespace op
{
class FunctionCall : public Builtin
{
public:
///
/// @param function The function to be called
/// @param args The function arguments
///
FunctionCall(const std::shared_ptr<Function>& function,
const std::vector<std::shared_ptr<Node>>& args)
: Builtin(args)
, m_function(function)
{
}
virtual std::string description() const override { return "FunctionCall"; }
virtual void propagate_types() override;
std::shared_ptr<Function> get_function() const { return m_function; }
protected:
std::shared_ptr<Function> m_function;
};
}
}
......@@ -25,16 +25,6 @@ namespace ngraph
// TODO: These class definitions are to be moved into separate files in the op directory
namespace op
{
/// A Function invokes a function on node arguments. In addition to the argument
/// we need to preserve the function.
class FunctionCall : public Node
{
virtual std::string description() const override { return "FunctionCall"; }
protected:
std::shared_ptr<Node> m_function;
};
/// The is an operation we handle directly, i.e. all type checking, etc.
/// are defined in C++ rather than in terms of ngraph operations.
class Builtin : public Node
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/topological_sort.hpp"
using namespace std;
using namespace ngraph::op;
void Reduce::propagate_types()
{
if (m_arguments.size() != 2)
{
throw ngraph_error("Wrong number of arguments.");
}
auto arg_reductee_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg_reductee_type)
{
throw ngraph_error("Argument to reduce is missing type.");
}
auto arg_reductee_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_reductee_type);
if (nullptr == arg_reductee_tensor_view_type)
{
throw ngraph_error("Argument to reduce is not a tensor view");
}
auto arg_init_type = m_arguments.at(1)->get_value_type();
if (nullptr == arg_init_type)
{
throw ngraph_error("Argument for initial value is missing type.");
}
auto arg_init_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_init_type);
if (nullptr == arg_init_tensor_view_type)
{
throw ngraph_error("Argument for initial value is not a tensor view");
}
if (arg_init_tensor_view_type->get_shape().size() != 0)
{
throw ngraph_error("Argument for initial value is not a scalar");
}
if (arg_init_tensor_view_type->get_element_type() != arg_reductee_tensor_view_type->get_element_type())
{
throw ngraph_error("Element types for reductee and initial values do not match");
}
auto arg_reductee_shape = arg_reductee_tensor_view_type->get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= arg_reductee_shape.size())
{
throw ngraph_error("Reduction axis is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < arg_reductee_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(arg_reductee_shape.at(i));
}
}
auto f_params = m_reduction_function->get_parameters();
if (f_params.size() != 2)
{
throw ngraph_error("Reduction function has wrong number of parameters (should be two)");
}
if (*(f_params.at(0)->get_value_type()) != *(arg_init_type))
{
throw ngraph_error("Argument 0 of reduction function has wrong type");
}
if (*(f_params.at(1)->get_value_type()) != *(arg_init_type))
{
throw ngraph_error("Argument 1 of reduction function has wrong type");
}
auto f_result_type = m_reduction_function->get_result_type();
if (*(f_result_type) != *(arg_init_type))
{
throw ngraph_error("Return type from reduction function does not match expected");
}
set_value_type_checked(make_shared<TensorViewType>(arg_reductee_tensor_view_type->get_element_type(), result_shape));
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
namespace ngraph
{
namespace op
{
class Reduce : public Builtin
{
public:
///
/// @param arg_reductee The tensor view to be reduced.
/// @param arg_init The initial value for reduction.
/// @param reduction_function The reduction function to use.
/// @param reduction_axes The axis positions (0-based) to be reduced.
///
Reduce(const std::shared_ptr<Node>& arg_reductee,
const std::shared_ptr<Node>& arg_init,
const std::shared_ptr<Function>& reduction_function,
const AxisSet& reduction_axes)
: Builtin({arg_reductee,arg_init})
, m_reduction_function(reduction_function)
, m_reduction_axes(reduction_axes)
{
}
virtual std::string description() const override { return "Reduce"; }
virtual void propagate_types() override;
std::shared_ptr<Function> get_reduction_function() const { return m_reduction_function; }
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
std::shared_ptr<Function> m_reduction_function;
AxisSet m_reduction_axes;
};
}
}
......@@ -42,10 +42,10 @@ namespace ngraph
///
/// Tuples will be expanded into their tensor views to build the call frame.
void operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outpus);
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& outputs);
/// @brief Invoke the function with tuples pre-expanded to their underlying tensor views.
void tensor_call(const TensorViewPtrs& inputs, const TensorViewPtrs& outpus);
void tensor_call(const TensorViewPtrs& inputs, const TensorViewPtrs& outputs);
void set_return() { m_return = true; }
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
class CallInstruction : public Instruction
{
public:
CallInstruction(std::shared_ptr<ExternalFunction> ef,std::vector<size_t> in, std::vector<size_t> out)
: m_external_function(ef)
, m_in(in)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
std::shared_ptr<CallFrame> cf = m_external_function->make_call_frame();
std::vector<std::shared_ptr<ngraph::runtime::Value>> inputs;
std::vector<std::shared_ptr<ngraph::runtime::Value>> outputs;
for (auto in : m_in)
{
inputs.push_back(call_frame.get_tensor_view(in));
}
for (auto out : m_out)
{
outputs.push_back(call_frame.get_tensor_view(out));
}
(*cf)(inputs,outputs);
}
protected:
std::shared_ptr<ExternalFunction> m_external_function;
std::vector<size_t> m_in;
std::vector<size_t> m_out;
};
}
}
}
......@@ -29,6 +29,7 @@
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/log.hpp"
......@@ -36,6 +37,7 @@
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/tuple.hpp"
......@@ -45,6 +47,7 @@
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/runtime/eigen/abs.hpp"
#include "ngraph/runtime/eigen/add.hpp"
#include "ngraph/runtime/eigen/call.hpp"
#include "ngraph/runtime/eigen/concat_matrix.hpp"
#include "ngraph/runtime/eigen/concat_vector.hpp"
#include "ngraph/runtime/eigen/constant.hpp"
......@@ -79,12 +82,16 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
{
}
#define REGISTER_INSTRUCTION(op_class, instr_class, ...) \
op_map[type_index(typeid(op_class))] = [](const Node* n, \
ExternalFunction* ef, \
const std::vector<size_t>& in, \
const std::vector<size_t>& out) { \
ef->get_instructions()->push_back(make_shared<instr_class>(__VA_ARGS__)); \
#define REGISTER_TO_OP_MAP(op_class) \
op_map[type_index(typeid(op_class))] = [](const Node* n, \
ExternalFunction* ef, \
FunctionMap& function_map, \
const std::vector<size_t>& in, \
const std::vector<size_t>& out)
#define REGISTER_INSTRUCTION(op_class, instr_class, ...) \
REGISTER_TO_OP_MAP(op_class) { \
ef->get_instructions()->push_back(make_shared<instr_class>(__VA_ARGS__)); \
}
#define REGISTER_UNOP(op_class, instr_class) \
......@@ -127,10 +134,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
dynamic_cast<const op::TensorConstant<element::Float32>*>(n)->get_value()->get_vector(),
out[0]);
op_map[type_index(typeid(op::Concat))] = [](const Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
REGISTER_TO_OP_MAP(op::Concat)
{
auto result_tensor_type =
dynamic_pointer_cast<const TensorViewType>(n->get_value_type());
assert(nullptr != result_tensor_type);
......@@ -157,10 +162,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
}
};
op_map[type_index(typeid(op::Dot))] = [](const Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
REGISTER_TO_OP_MAP(op::Dot)
{
auto& arg_nodes = n->get_arguments();
assert(arg_nodes.size() == 2);
......@@ -222,27 +225,21 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
};
// Parameter is a "runtime no-op" because the output tensor has already been filled.
op_map[type_index(typeid(op::Parameter))] = [](const Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {};
REGISTER_TO_OP_MAP(op::Parameter) {};
// GetTupleElement will be spliced out, with the users of out redirected to in's source, but, for now, we need to copy.
op_map[type_index(typeid(op::GetTupleElement))] = [](const Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
REGISTER_TO_OP_MAP(op::GetTupleElement)
{
auto get_tuple_element = static_cast<const op::GetTupleElement*>(n);
ef->get_instructions()->push_back(
make_shared<runtime::eigen::CopyInstruction<element::Float32>>(
in.at(get_tuple_element->get_n()), out.at(0)));
};
// Tuple will be spliced out, with the users of out connected to the corresponding in's source, but, for now, we need to copy.
op_map[type_index(typeid(op::Tuple))] = [](const Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
REGISTER_TO_OP_MAP(op::Tuple)
{
for (size_t i = 0; i < in.size(); ++i)
{
ef->get_instructions()->push_back(
......@@ -251,12 +248,39 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
}
};
REGISTER_TO_OP_MAP(op::FunctionCall)
{
auto function_call = static_cast<const op::FunctionCall*>(n);
auto function = function_call->get_function();
std::shared_ptr<ExternalFunction> external;
try
{
external = function_map.at(function);
}
catch (const std::out_of_range)
{
external = make_shared<ngraph::runtime::ExternalFunction>(
function_call->get_function());
function_map.insert({function,external});
}
ef->get_instructions()->push_back(
make_shared<runtime::eigen::CallInstruction>(external,in,out));
};
REGISTER_TO_OP_MAP(op::Reduce)
{
throw ngraph_error("op::Reduce not implemented yet");
};
initialized = true;
}
return op_map;
}
void ExternalFunction::compile()
void ExternalFunction::compile(FunctionMap& function_map)
{
if (m_is_compiled)
{
......@@ -331,7 +355,7 @@ void ExternalFunction::compile()
auto tv = output.get_tensor_view();
out.push_back(tensor_index.at(tv));
}
handler_it->second(node, this, in, out);
handler_it->second(node, this, function_map, in, out);
}
m_instructions->push_back(make_shared<runtime::eigen::ReturnInstruction>());
m_is_compiled = true;
......@@ -342,10 +366,16 @@ void ExternalFunction::compile()
}
shared_ptr<ngraph::runtime::CallFrame> ExternalFunction::make_call_frame()
{
FunctionMap function_map;
return make_call_frame(function_map);
}
shared_ptr<ngraph::runtime::CallFrame> ExternalFunction::make_call_frame(FunctionMap& function_map)
{
if (!m_is_compiled)
{
compile();
compile(function_map);
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> temps;
for (auto tv : m_temp_views)
......
......@@ -27,16 +27,20 @@ namespace ngraph
{
class ExternalFunction
{
using OpFunction = std::function<void(const ngraph::Node*,
ExternalFunction*,
const std::vector<size_t>& inputs,
const std::vector<size_t>& outputs)>;
using OpMap = std::unordered_map<std::type_index, OpFunction>;
using FunctionMap = std::unordered_map<std::shared_ptr<Function>,std::shared_ptr<ExternalFunction>>;
using OpFunction = std::function<void(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<size_t>& inputs,
const std::vector<size_t>& outputs)>;
using OpMap = std::unordered_map<std::type_index, OpFunction>;
public:
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame(FunctionMap& function_map);
std::shared_ptr<std::vector<std::shared_ptr<ngraph::runtime::Instruction>>>
get_instructions()
{
......@@ -48,6 +52,7 @@ namespace ngraph
protected:
void compile();
void compile(FunctionMap& function_map);
std::shared_ptr<ngraph::Function> m_function;
bool m_release_function;
......
......@@ -33,7 +33,8 @@ TEST(build_graph, build_simple)
ASSERT_EQ(dot->get_arguments()[0], arg2);
ASSERT_EQ(dot->get_arguments()[1], arg0);
auto cluster_0 = make_shared<Function>(dot, op::Parameters{arg0, arg1, arg2, arg3});
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), Shape{10,32,7});
auto cluster_0 = make_shared<Function>(dot, result_type, op::Parameters{arg0, arg1, arg2, arg3});
ASSERT_EQ(cluster_0->get_result(), dot);
}
......
......@@ -25,7 +25,8 @@ TEST(execute, test_abc)
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, op::Parameters{A, B, C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -62,7 +63,9 @@ TEST(execute, test_abc_tuple)
auto A = make_shared<op::GetTupleElement>(ABC, 0);
auto B = make_shared<op::GetTupleElement>(ABC, 1);
auto C = make_shared<op::GetTupleElement>(ABC, 2);
auto f = make_shared<Function>(make_shared<op::Tuple>(Nodes{(A + B) * C}), op::Parameters{ABC});
auto f = make_shared<Function>(make_shared<op::Tuple>(Nodes{(A + B) * C}),
tensor_view_type,
op::Parameters{ABC});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -100,8 +103,12 @@ TEST(execute, test_tuple_result)
auto A_add_B = make_shared<op::Add>(A, B);
auto A_add_B_mul_C = make_shared<op::Multiply>(A_add_B, C);
auto rt = make_shared<TupleType>(
std::vector<shared_ptr<const ValueType>>(
{make_shared<TensorViewType>(element::Float32::element_type(), shape),
make_shared<TensorViewType>(element::Float32::element_type(), shape)}));
auto f = make_shared<Function>(make_shared<op::Tuple>(Nodes{A_add_B, A_add_B_mul_C}),
op::Parameters{A, B, C});
rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -124,9 +131,10 @@ TEST(execute, test_tuple_result)
TEST(execute, test_abs)
{
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Abs>(A), op::Parameters{A});
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Abs>(A), result_type, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -140,8 +148,6 @@ TEST(execute, test_abs)
ASSERT_EQ((vector<float>{1, 2, 0, 4.8f}), result->get_vector());
}
TEST(execute, test_concat_matrix_colwise)
{
auto shape_a = Shape{2, 2};
......@@ -151,7 +157,8 @@ TEST(execute, test_concat_matrix_colwise)
auto shape_c = Shape{2, 3};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{2, 8};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},1), op::Parameters{A,B,C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2,8});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},1), rt, op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -182,7 +189,8 @@ TEST(execute, test_concat_matrix_rowwise)
auto shape_c = Shape{3, 2};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{8, 2};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), op::Parameters{A,B,C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{8,2});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), rt, op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -221,7 +229,8 @@ TEST(execute, test_concat_vector)
auto shape_c = Shape{2};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{12};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), op::Parameters{A,B,C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{12});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), rt, op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -244,7 +253,8 @@ TEST(execute, test_divide)
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::Parameters{A, B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -265,7 +275,8 @@ TEST(execute, test_equal)
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Equal>(A, B), op::Parameters{A, B});
auto rt = make_shared<TensorViewType>(element::Bool::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Equal>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -287,7 +298,8 @@ TEST(execute, test_dot1d)
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{1};
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -309,7 +321,8 @@ TEST(execute, test_dot2d)
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{2,2};
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -328,194 +341,203 @@ TEST(execute, test_dot2d)
43,50}), result->get_vector());
}
TEST(execute, test_lessthan)
TEST(execute, test_dot_scalar_tensor_arg0)
{
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Less>(A, B), op::Parameters{A, B});
auto shape_a = Shape{};
auto shape_b = Shape{2,2,2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5};
auto result = ngraph::runtime::make_tensor<element::Bool>(shape);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{6};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_b);
(*cf)({a, b}, {result});
ASSERT_EQ((vector<char>{0, 0, 1, 0, 1, 0, 0, 1}), result->get_vector());
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
TEST(execute, test_log)
TEST(execute, test_dot_scalar_tensor_arg1)
{
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Log>(A), op::Parameters{A});
auto shape_a = Shape{2,2,2};
auto shape_b = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_a);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{expf(1), expf(2), expf(3), expf(4), expf(5), expf(6), expf(7), expf(8)};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_a);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), result->get_vector());
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
TEST(execute, test_maximum)
TEST(execute, test_dot_scalar_scalar)
{
auto shape = Shape{2, 2, 2};
auto shape = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), op::Parameters{A, B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1};
*a = vector<float>{8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5};
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{1, 8, 4, 17, 0, 0.5, 2, 1.5}), result->get_vector());
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{48}), result->get_vector());
}
TEST(execute, test_negative)
TEST(execute, test_dot_matrix_vector)
{
auto shape = Shape{2, 3};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Negative>(A), op::Parameters{A});
auto shape_a = Shape{4,4};
auto shape_b = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto shape_r = Shape{4};
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, -2, 0, -4.8f, 8.6f, -8.6f};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{ 1, 2, 3, 4,
5, 6, 7, 8,
9,10,11,12,
13,14,15,16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{17,18,19,20};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{-1, 2, 0, 4.8f, -8.6f, 8.6f}), result->get_vector());
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{190,486,782,1078}), result->get_vector());
}
TEST(execute, test_notequal)
TEST(execute, test_lessthan)
{
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::NotEqual>(A, B), op::Parameters{A, B});
auto rt = make_shared<TensorViewType>(element::Bool::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Less>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1};
*a = vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5};
*b = vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5};
auto result = ngraph::runtime::make_tensor<element::Bool>(shape);
(*cf)({a, b}, {result});
ASSERT_EQ((vector<char>{0, 0, 1, 1, 1, 0, 0, 1}), result->get_vector());
ASSERT_EQ((vector<char>{0, 0, 1, 0, 1, 0, 0, 1}), result->get_vector());
}
TEST(execute, test_scalar_tensor_arg0)
TEST(execute, test_log)
{
auto shape_a = Shape{};
auto shape_b = Shape{2,2,2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Log>(A), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{6};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_b);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{expf(1), expf(2), expf(3), expf(4), expf(5), expf(6), expf(7), expf(8)};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), result->get_vector());
}
TEST(execute, test_scalar_tensor_arg1)
TEST(execute, test_maximum)
{
auto shape_a = Shape{2,2,2};
auto shape_b = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_a);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{1, 8, 4, 17, 0, 0.5, 2, 1.5}), result->get_vector());
}
TEST(execute, test_scalar_scalar)
TEST(execute, test_negative)
{
auto shape = Shape{};
auto shape = Shape{2, 3};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Negative>(A), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{6};
*a = vector<float>{1, -2, 0, -4.8f, 8.6f, -8.6f};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{48}), result->get_vector());
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{-1, 2, 0, 4.8f, -8.6f, 8.6f}), result->get_vector());
}
TEST(execute, test_matrix_vector)
TEST(execute, test_notequal)
{
auto shape_a = Shape{4,4};
auto shape_b = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto shape_r = Shape{4};
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Bool::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::NotEqual>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{ 1, 2, 3, 4,
5, 6, 7, 8,
9,10,11,12,
13,14,15,16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{17,18,19,20};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5};
auto result = ngraph::runtime::make_tensor<element::Bool>(shape);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{190,486,782,1078}), result->get_vector());
(*cf)({a, b}, {result});
ASSERT_EQ((vector<char>{0, 0, 1, 1, 1, 0, 0, 1}), result->get_vector());
}
TEST(execute, test_select)
......@@ -524,7 +546,8 @@ TEST(execute, test_select)
auto A = make_shared<op::Parameter>(element::Bool::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Select>(A, B, C), op::Parameters{A, B, C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Select>(A, B, C), rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -547,7 +570,8 @@ TEST(execute, test_subtract)
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Subtract>(A, B), op::Parameters{A, B});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Subtract>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -567,7 +591,8 @@ TEST(execute, test_scalar_constant)
{
auto shape = Shape{};
auto A = make_shared<op::ScalarConstant<element::Float32>>(-3.0f);
auto f = make_shared<Function>(A, op::Parameters{});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(A, rt, op::Parameters{});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -584,7 +609,8 @@ TEST(execute, test_tensor_constant)
auto shape = Shape{2,2,2};
auto A = make_shared<op::TensorConstant<element::Float32>>(shape);
A->get_value()->get_vector() = {1,2,3,4,5,6,7,8};
auto f = make_shared<Function>(A, op::Parameters{});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(A, rt, op::Parameters{});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -601,7 +627,8 @@ TEST(execute, test_tensor_constant_with_op)
auto shape = Shape{2,2,2};
auto A = make_shared<op::TensorConstant<element::Float32>>(shape);
A->get_value()->get_vector() = {-1,2,3,-4,5,-6,-7,8};
auto f = make_shared<Function>(make_shared<op::Abs>(A), op::Parameters{});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Abs>(A), rt, op::Parameters{});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -612,3 +639,46 @@ TEST(execute, test_tensor_constant_with_op)
(*cf)({}, {result});
ASSERT_EQ((vector<float>{1,2,3,4,5,6,7,8}), result->get_vector());
}
TEST(execute, test_function_call)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, rt_f, op::Parameters{A, B, C});
// Now make "g(X,Y,Z) = f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto g = make_shared<Function>(
make_shared<op::FunctionCall>(f,Nodes{X,Y,Z})
+ make_shared<op::FunctionCall>(f,Nodes{X,Y,Z}),
rt_g,
op::Parameters{X, Y, Z});
// Now call g on some test vectors.
auto external = make_shared<ngraph::runtime::ExternalFunction>(g);
auto cf = external->make_call_frame();
auto x = ngraph::runtime::make_tensor<element::Float32>(shape);
*x = vector<float>{1, 2, 3, 4};
auto y = ngraph::runtime::make_tensor<element::Float32>(shape);
*y = vector<float>{5, 6, 7, 8};
auto z = ngraph::runtime::make_tensor<element::Float32>(shape);
*z = vector<float>{9, 10, 11, 12};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({x, y, z}, {result});
ASSERT_EQ((vector<float>{108, 160, 220, 288}), result->get_vector());
(*cf)({y, x, z}, {result});
ASSERT_EQ((vector<float>{108, 160, 220, 288}), result->get_vector());
(*cf)({x, z, y}, {result});
ASSERT_EQ((vector<float>{100, 144, 196, 256}), result->get_vector());
}
......@@ -44,7 +44,8 @@ TEST(tensor, size)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3});
auto add = make_shared<op::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, op::Parameters{arg0});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 3});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......@@ -57,7 +58,8 @@ TEST(tensor, size)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto add = make_shared<op::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, op::Parameters{arg0});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......@@ -70,7 +72,8 @@ TEST(tensor, size)
{
auto arg0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto add = make_shared<op::Add>(arg0, arg0);
auto f0 = make_shared<Function>(add, op::Parameters{arg0});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{1});
auto f0 = make_shared<Function>(add, rt, op::Parameters{arg0});
pass_manager.run_passes(f0);
......
......@@ -71,7 +71,9 @@ shared_ptr<Function> make_test_graph()
auto r0 = make_shared<op::Add>(t3, t4);
auto f0 = make_shared<Function>(r0, op::Parameters{arg_0, arg_1, arg_2, arg_3, arg_4, arg_5});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f0 = make_shared<Function>(r0, rt, op::Parameters{arg_0, arg_1, arg_2, arg_3, arg_4, arg_5});
return f0;
}
......
......@@ -35,7 +35,7 @@ TEST(topological_sort, basic)
vector<shared_ptr<op::Parameter>> args;
for (int i = 0; i < 10; i++)
{
auto arg = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto arg = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
ASSERT_NE(nullptr, arg);
args.push_back(arg);
}
......@@ -55,7 +55,10 @@ TEST(topological_sort, basic)
auto r0 = make_shared<op::Add>(t3, t4);
ASSERT_NE(nullptr, r0);
auto f0 = make_shared<Function>(r0, args);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
ASSERT_NE(nullptr, rt);
auto f0 = make_shared<Function>(r0, rt, args);
ASSERT_NE(nullptr, f0);
ASSERT_EQ(2, r0->get_arguments().size());
......@@ -102,16 +105,17 @@ TEST(benchmark, topological_sort)
// x[i+1] = tanh(dot(W,x[i])+b)
shared_ptr<Node> result;
vector<shared_ptr<op::Parameter>> args;
result = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
result = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
for (int i=0; i<1000000; i++)
{
auto in_1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto in_2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1});
auto in_1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto in_2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
args.push_back(in_1);
args.push_back(in_2);
result = make_cell(result, in_1, in_2);
}
auto f0 = make_shared<Function>(result, args);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f0 = make_shared<Function>(result, rt, args);
timer.start();
pass::Manager pass_manager;
......
......@@ -692,3 +692,318 @@ TEST(type_prop, select_elem_mismatch_bc)
}
}
TEST(type_prop, reduce_deduce)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
r0->propagate_types();
ASSERT_EQ(*(r0->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{4}));
auto r1 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{1});
r1->propagate_types();
ASSERT_EQ(*(r1->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{2}));
auto r01 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0,1});
r01->propagate_types();
ASSERT_EQ(*(r01->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{}));
auto r_none = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{});
r_none->propagate_types();
ASSERT_EQ(*(r_none->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{2,4}));
}
TEST(type_prop, reduce_deduce_correct)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
r0->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{4}));
r0->propagate_types();
ASSERT_EQ(*(r0->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{4}));
}
TEST(type_prop, reduce_nonscalar)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Argument for initial value is not a scalar"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_elem_type_mismatch)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Bool::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Element types for reductee and initial values do not match"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_function_return_type_mismatch)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Bool::element_type(), Shape{});
auto f = make_shared<Function>(
make_shared<op::Equal>(f_param_0,f_param_1),
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Return type from reduction function does not match expected"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_function_arg0_type_mismatch)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Bool::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Argument 0 of reduction function has wrong type"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_function_arg1_type_mismatch)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Bool::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_0,
rt,
op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Argument 1 of reduction function has wrong type"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_function_arg_count_mismatch)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_0 + f_param_1 + f_param_2,
rt,
op::Parameters{f_param_0, f_param_1, f_param_2});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
try
{
r0->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Reduction function has wrong number of parameters (should be two)"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_axis_oob)
{
auto param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_0 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto r = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0,2,1});
try
{
r->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Did not detect incorrect element types for arithmetic operator";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Reduction axis is out of bounds"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, function_call_deduce)
{
// First create "f(A,B,C) = (A+B)*C".
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_f = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>((A + B * C), rt_f, op::Parameters{A, B, C});
// Now make "f(X,Y,Z) + f(X,Y,Z)"
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto r = make_shared<op::FunctionCall>(f,Nodes{X,Y,Z});
auto r_p_r = r + r;
r->propagate_types();
r_p_r->propagate_types();
auto r_p_r_vt = r_p_r->get_value_type();
ASSERT_EQ(*r_p_r_vt, TensorViewType(element::Float32::element_type(), shape));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment