Commit c7b51d2d authored by Robert Kimball's avatar Robert Kimball

apply new .clang-format

parent 158de495
......@@ -27,7 +27,6 @@ namespace ngraph
{
public:
size_t size() const { return m_size; }
protected:
size_t m_size;
};
......
......@@ -29,7 +29,6 @@ namespace ngraph
{
public:
BufferPos() {}
BufferPos(std::shared_ptr<Buffer> buffer, size_t offset, size_t size)
: m_buffer(buffer)
, m_offset(offset)
......
......@@ -40,7 +40,6 @@ namespace ngraph
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const { return m_strides; }
protected:
Strides m_strides;
size_t m_offset;
......
......@@ -17,8 +17,8 @@
#include <tuple>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/buffer_pos.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
namespace ngraph
{
......@@ -41,7 +41,6 @@ namespace ngraph
public:
virtual ~TensorViewLayout() {}
/// Extent of this view in buffer.
///
/// When we support non-linear buffers, this will need to be something other than size_t.
......@@ -52,12 +51,14 @@ namespace ngraph
/// With non-linear buffers, this will need to be something other than size_t.
virtual size_t get_index_offset(const std::vector<size_t>& indices) = 0;
const Shape& get_shape() const { return m_tensor_view.get_tensor_view_type()->get_shape(); }
const Shape& get_shape() const
{
return m_tensor_view.get_tensor_view_type()->get_shape();
}
/// Where this view is located in the buffer.
const BufferPos& get_buffer_pos() const { return m_buffer_pos; }
BufferPos& get_buffer_pos() { return m_buffer_pos; }
protected:
const ngraph::descriptor::TensorView& m_tensor_view;
BufferPos m_buffer_pos;
......
......@@ -57,7 +57,6 @@ namespace ngraph
}
const std::string& get_name() const { return m_name; }
std::shared_ptr<const TensorViewType> get_tensor_view_type() const
{
return m_tensor_view_type;
......
......@@ -16,8 +16,8 @@
#include <initializer_list>
#include <memory>
#include <vector>
#include <string>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/node.hpp"
......@@ -41,10 +41,7 @@ namespace ngraph
{
return m_parameters;
}
const std::shared_ptr<ValueType> get_result_type() const
{
return m_result_type;
}
const std::shared_ptr<ValueType> get_result_type() const { return m_result_type; }
std::string get_name() const { return m_name; }
protected:
std::shared_ptr<Node> m_result;
......
......@@ -32,7 +32,9 @@ Node::Node(const std::vector<shared_ptr<Node>>& arguments, shared_ptr<ValueType>
}
}
Node::~Node() {}
Node::~Node()
{
}
void Node::set_value_type_checked(const shared_ptr<const ValueType>& value_type)
{
......
......@@ -65,9 +65,7 @@ namespace ngraph
const Nodes& get_arguments() const { return m_arguments; }
void clear_arguments() { m_arguments.clear(); }
const std::multiset<Node*>& users() const { return m_users; }
virtual std::string get_node_id() const;
/// Return true if this has the same implementing class as node. This
......@@ -80,7 +78,6 @@ namespace ngraph
std::shared_ptr<const ValueType> get_value_type() { return m_value_type; }
const std::shared_ptr<const ValueType> get_value_type() const { return m_value_type; }
void set_value_type(const element::Type& element_type, const Shape& shape)
{
m_value_type = std::make_shared<TensorViewType>(element_type, shape);
......@@ -108,7 +105,6 @@ namespace ngraph
const std::vector<descriptor::Input>& get_inputs() const { return m_inputs; }
std::vector<descriptor::Output>& get_outputs() { return m_outputs; }
const std::vector<descriptor::Output>& get_outputs() const { return m_outputs; }
std::unordered_set<descriptor::Tensor*> liveness_live_list;
std::unordered_set<descriptor::Tensor*> liveness_new_list;
std::unordered_set<descriptor::Tensor*> liveness_free_list;
......
......@@ -19,8 +19,7 @@ using namespace ngraph;
using namespace ngraph::op;
const element::Type& BinaryElementwiseArithmetic::propagate_element_types(
const element::Type& arg0_element_type,
const element::Type& arg1_element_type) const
const element::Type& arg0_element_type, const element::Type& arg1_element_type) const
{
if (arg0_element_type != arg1_element_type)
{
......
......@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
......@@ -41,11 +41,9 @@ void BinaryElementwiseBuiltin::propagate_types()
throw ngraph_error("Arguments must have the same tensor view shape");
}
const element::Type& result_element_type =
propagate_element_types(arg0_tensor_type->get_element_type(),
arg1_tensor_type->get_element_type());
const element::Type& result_element_type = propagate_element_types(
arg0_tensor_type->get_element_type(), arg1_tensor_type->get_element_type());
set_value_type_checked(make_shared<TensorViewType>(result_element_type,
arg0_tensor_type->get_shape()));
set_value_type_checked(
make_shared<TensorViewType>(result_element_type, arg0_tensor_type->get_shape()));
}
......@@ -19,8 +19,7 @@ using namespace ngraph;
using namespace ngraph::op;
const element::Type& BinaryElementwiseComparison::propagate_element_types(
const element::Type& arg0_element_type,
const element::Type& arg1_element_type) const
const element::Type& arg0_element_type, const element::Type& arg1_element_type) const
{
if (arg0_element_type != arg1_element_type)
{
......
......@@ -19,7 +19,8 @@ using namespace ngraph::op;
void Broadcast::propagate_types()
{
if (m_arguments.size() != 1){
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
......@@ -42,5 +43,6 @@ void Broadcast::propagate_types()
{
throw ngraph_error("Broadcast arg, shape, and axes are incompatible");
}
set_value_type_checked(make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), m_shape));
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), m_shape));
}
......@@ -40,7 +40,6 @@ namespace ngraph
virtual void propagate_types() override;
const AxisSet& get_broadcast_axes() const { return m_broadcast_axes; }
protected:
Shape m_shape;
AxisSet m_broadcast_axes;
......
......@@ -47,7 +47,7 @@ void Concat::propagate_types()
size_t concatenation_axis_length = arg0_shape.at(m_concatenation_axis);
auto& arg0_element_type = arg0_tensor_view_type->get_element_type();
for(auto i = 1; i < m_arguments.size(); i++)
for (auto i = 1; i < m_arguments.size(); i++)
{
auto argi_type = m_arguments.at(i)->get_value_type();
if (nullptr == argi_type)
......@@ -72,11 +72,12 @@ void Concat::propagate_types()
throw ngraph_error("Argument element types do not match");
}
for(auto j = 0; j < argi_shape.size(); j++)
for (auto j = 0; j < argi_shape.size(); j++)
{
if (j != m_concatenation_axis && arg0_shape.at(j) != argi_shape.at(j))
{
throw ngraph_error("Arguments to concat do not have same dimension on a non-concatenation axis");
throw ngraph_error(
"Arguments to concat do not have same dimension on a non-concatenation axis");
}
else if (j == m_concatenation_axis)
{
......
......@@ -30,7 +30,7 @@ namespace ngraph
///
/// Example: n0 has shape {2,4,2}, and n1 has shape {2,5,2}. Then the output of
/// Concat(Nodes{n0,n1},1) will have shape {2,9,2}.
Concat(const Nodes& args,size_t concatenation_axis)
Concat(const Nodes& args, size_t concatenation_axis)
: Builtin(args)
, m_concatenation_axis(concatenation_axis)
{
......@@ -40,7 +40,6 @@ namespace ngraph
virtual void propagate_types() override;
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
const size_t m_concatenation_axis;
};
......
......@@ -16,7 +16,10 @@
using namespace ngraph::op;
void ScalarConstantBase::propagate_types() {}
void TensorConstantBase::propagate_types() {}
void ScalarConstantBase::propagate_types()
{
}
void TensorConstantBase::propagate_types()
{
}
......@@ -16,8 +16,8 @@
#include <sstream>
#include "ngraph/types/element_type.hpp"
#include "ngraph/runtime/utils.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
......@@ -60,11 +60,7 @@ namespace ngraph
return ss.str();
}
type get_value() const
{
return m_value;
}
type get_value() const { return m_value; }
protected:
typename T::type m_value;
};
......@@ -113,7 +109,10 @@ namespace ngraph
return ss.str();
}
typename std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>> get_value() const { return m_value; }
typename std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>> get_value() const
{
return m_value;
}
protected:
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>> m_value;
......
......@@ -56,22 +56,23 @@ void Dot::propagate_types()
vector<size_t> result_shape;
result_shape.reserve(arg0_shape.size() + arg1_shape.size() - (is_scalar_mult ? 0 : 2));
for(auto i = 0; i < arg0_shape.size(); i++)
for (auto i = 0; i < arg0_shape.size(); i++)
{
if(is_scalar_mult || i != arg0_reduction)
if (is_scalar_mult || i != arg0_reduction)
{
result_shape.push_back(arg0_shape[i]);
}
}
for(auto i = 0; i < arg1_shape.size(); i++)
for (auto i = 0; i < arg1_shape.size(); i++)
{
if(is_scalar_mult || i != arg1_reduction)
if (is_scalar_mult || i != arg1_reduction)
{
result_shape.push_back(arg1_shape[i]);
}
}
auto result_type = make_shared<TensorViewType>(arg0_tensor_type->get_element_type(), result_shape);
auto result_type =
make_shared<TensorViewType>(arg0_tensor_type->get_element_type(), result_shape);
set_value_type_checked(result_type);
}
......@@ -39,7 +39,6 @@ namespace ngraph
virtual void propagate_types() override;
std::shared_ptr<Function> get_function() const { return m_function; }
protected:
std::shared_ptr<Function> m_function;
};
......
......@@ -33,7 +33,8 @@ void GetTupleElement::propagate_types()
throw ngraph_error("Argument must be a tuple view");
}
if (m_n >= arg0_tuple_type->get_element_types().size()){
if (m_n >= arg0_tuple_type->get_element_types().size())
{
throw ngraph_error("Indexing tuple beyond its size");
}
......
......@@ -33,9 +33,7 @@ namespace ngraph
virtual void propagate_types() override;
virtual std::string description() const override { return "GetTupleElement"; }
size_t get_n() const { return m_n; }
protected:
size_t m_n;
};
......
......@@ -31,7 +31,6 @@ namespace ngraph
{
public:
virtual std::string description() const override { return "Builtin"; }
protected:
Builtin(const std::vector<std::shared_ptr<Node>>& args)
: Node(args)
......@@ -73,8 +72,8 @@ namespace ngraph
: Builtin(Nodes{arg})
{
}
virtual const element::Type& propagate_element_types(
const element::Type& arg_element_type) const = 0;
virtual const element::Type&
propagate_element_types(const element::Type& arg_element_type) const = 0;
public:
virtual void propagate_types() override;
......@@ -87,8 +86,8 @@ namespace ngraph
: UnaryElementwiseBuiltin({arg})
{
}
virtual const element::Type& propagate_element_types(
const element::Type& arg_element_type) const final override;
virtual const element::Type&
propagate_element_types(const element::Type& arg_element_type) const final override;
};
/// Op(X, Y)[I] = op(X[I], Y[I])
......@@ -100,8 +99,8 @@ namespace ngraph
: Builtin(Nodes{arg0, arg1})
{
}
virtual const element::Type& propagate_element_types(
const element::Type& arg0_element_type,
virtual const element::Type&
propagate_element_types(const element::Type& arg0_element_type,
const element::Type& arg1_element_type) const = 0;
public:
......@@ -111,34 +110,39 @@ namespace ngraph
class BinaryElementwiseComparison : public BinaryElementwiseBuiltin
{
public:
BinaryElementwiseComparison(
const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
BinaryElementwiseComparison(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1)
: BinaryElementwiseBuiltin(arg0, arg1)
{
}
virtual std::string description() const override { return "BinaryElementwiseComparison"; }
virtual std::string description() const override
{
return "BinaryElementwiseComparison";
}
//virtual void propagate_types() override;
virtual const element::Type& propagate_element_types(
const element::Type& arg0_element_type,
virtual const element::Type&
propagate_element_types(const element::Type& arg0_element_type,
const element::Type& arg1_element_type) const override;
};
class BinaryElementwiseArithmetic : public BinaryElementwiseBuiltin
{
public:
BinaryElementwiseArithmetic(
const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
BinaryElementwiseArithmetic(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1)
: BinaryElementwiseBuiltin(arg0, arg1)
{
}
virtual std::string description() const override { return "BinaryElementwiseArithmetic"; }
virtual std::string description() const override
{
return "BinaryElementwiseArithmetic";
}
//virtual void propagate_types() override;
virtual const element::Type& propagate_element_types(
const element::Type& arg0_element_type,
const element::Type& arg1_element_type)
const final override;
const element::Type& arg1_element_type) const final override;
};
}
}
......@@ -41,4 +41,6 @@ void Parameter::assign_function(Function* function, size_t index)
m_index = index;
}
void Parameter::propagate_types() {}
void Parameter::propagate_types()
{
}
......@@ -37,7 +37,7 @@ namespace ngraph
void assign_function(Function* function, size_t index);
public:
Parameter(const std::shared_ptr<ValueType>& value_type=nullptr);
Parameter(const std::shared_ptr<ValueType>& value_type = nullptr);
Parameter(const ngraph::element::Type& element_type, const Shape& shape);
std::string description() const override { return "Parameter"; }
......
......@@ -30,7 +30,8 @@ void Reduce::propagate_types()
{
throw ngraph_error("Argument to reduce is missing type.");
}
auto arg_reductee_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_reductee_type);
auto arg_reductee_tensor_view_type =
dynamic_pointer_cast<const TensorViewType>(arg_reductee_type);
if (nullptr == arg_reductee_tensor_view_type)
{
throw ngraph_error("Argument to reduce is not a tensor view");
......@@ -51,7 +52,8 @@ void Reduce::propagate_types()
throw ngraph_error("Argument for initial value is not a scalar");
}
if (arg_init_tensor_view_type->get_element_type() != arg_reductee_tensor_view_type->get_element_type())
if (arg_init_tensor_view_type->get_element_type() !=
arg_reductee_tensor_view_type->get_element_type())
{
throw ngraph_error("Element types for reductee and initial values do not match");
}
......@@ -99,5 +101,6 @@ void Reduce::propagate_types()
throw ngraph_error("Return type from reduction function does not match expected");
}
set_value_type_checked(make_shared<TensorViewType>(arg_reductee_tensor_view_type->get_element_type(), result_shape));
set_value_type_checked(make_shared<TensorViewType>(
arg_reductee_tensor_view_type->get_element_type(), result_shape));
}
......@@ -31,7 +31,7 @@ namespace ngraph
const std::shared_ptr<Node>& arg_init,
const std::shared_ptr<Function>& reduction_function,
const AxisSet& reduction_axes)
: Builtin({arg_reductee,arg_init})
: Builtin({arg_reductee, arg_init})
, m_reduction_function(reduction_function)
, m_reduction_axes(reduction_axes)
{
......@@ -40,9 +40,11 @@ namespace ngraph
virtual std::string description() const override { return "Reduce"; }
virtual void propagate_types() override;
std::shared_ptr<Function> get_reduction_function() const { return m_reduction_function; }
std::shared_ptr<Function> get_reduction_function() const
{
return m_reduction_function;
}
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
std::shared_ptr<Function> m_reduction_function;
AxisSet m_reduction_axes;
......
......@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
......@@ -42,8 +42,8 @@ void Select::propagate_types()
{
throw ngraph_error("Argument 0 for arithmetic operators must have boolean element type");
}
if (arg0_tensor_type->get_shape() != arg1_tensor_type->get_shape()
|| arg0_tensor_type->get_shape() != arg2_tensor_type->get_shape())
if (arg0_tensor_type->get_shape() != arg1_tensor_type->get_shape() ||
arg0_tensor_type->get_shape() != arg2_tensor_type->get_shape())
{
throw ngraph_error("Arguments must have the same tensor view shape");
}
......@@ -54,4 +54,3 @@ void Select::propagate_types()
set_value_type_checked(arg1_tensor_type);
}
......@@ -20,8 +20,8 @@ using namespace std;
using namespace ngraph;
using namespace ngraph::op;
const element::Type& UnaryElementwiseArithmetic::propagate_element_types(
const element::Type& arg_element_type) const
const element::Type&
UnaryElementwiseArithmetic::propagate_element_types(const element::Type& arg_element_type) const
{
if (arg_element_type == element::Bool::element_type())
{
......
......@@ -37,6 +37,6 @@ void UnaryElementwiseBuiltin::propagate_types()
const element::Type& result_element_type =
propagate_element_types(arg_tensor_type->get_element_type());
set_value_type_checked(make_shared<TensorViewType>(result_element_type,
arg_tensor_type->get_shape()));
set_value_type_checked(
make_shared<TensorViewType>(result_element_type, arg_tensor_type->get_shape()));
}
......@@ -19,8 +19,8 @@
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
using namespace std;
using namespace ngraph;
......
......@@ -14,8 +14,8 @@
#include <fstream>
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -51,7 +51,6 @@ bool pass::DumpSorted::run_on_call_list(list<Node*>& nodes)
out << join(outputs);
out << "\n";
for (const Tensor* tensor : node->liveness_live_list)
{
out << " L " << tensor->get_name() << "\n";
......
......@@ -16,12 +16,12 @@
#include <sstream>
#include <unordered_set>
#include "ngraph/log.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/util.hpp"
#include "ngraph/log.hpp"
using namespace std;
using namespace ngraph;
......@@ -31,7 +31,7 @@ bool pass::Liveness::run_on_call_list(list<Node*>& ops)
{
unordered_set<Tensor*> currently_live;
for(auto it=ops.rbegin(); it!=ops.rend(); it++)
for (auto it = ops.rbegin(); it != ops.rend(); it++)
{
Node* node = *it;
node->liveness_live_list.clear();
......@@ -143,11 +143,8 @@ void pass::Liveness::check_dependencies(
bool pass::Liveness::is_temporary(const Tensor& tensor)
{
return
tensor.is_persistent() == false
&& tensor.is_input() == false
&& tensor.is_output() == false
;
return tensor.is_persistent() == false && tensor.is_input() == false &&
tensor.is_output() == false;
// && tensor.is_constant() == false
// && tensor.is_compile_only() == false;
}
......@@ -170,4 +167,3 @@ void pass::Liveness::validate_liveness(const list<Node*>& ops)
dead_tensors.insert(node->liveness_free_list.begin(), node->liveness_free_list.end());
}
}
......@@ -14,8 +14,8 @@
#pragma once
#include "ngraph/pass/call_pass.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/pass/call_pass.hpp"
namespace ngraph
{
......
......@@ -15,10 +15,10 @@
#include <iostream>
#include <memory>
#include "ngraph/function.hpp"
#include "ngraph/log.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/node.hpp"
#include "ngraph/function.hpp"
#include "ngraph/pass/manager.hpp"
using namespace std;
using namespace ngraph;
......
......@@ -14,9 +14,9 @@
#pragma once
#include <vector>
#include <memory>
#include <list>
#include <memory>
#include <vector>
#include "ngraph/pass/call_pass.hpp"
#include "ngraph/pass/tree_pass.hpp"
......@@ -59,7 +59,7 @@ public:
void initialize_default_passes();
template<typename T, class... Args>
template <typename T, class... Args>
void register_pass(Args... args)
{
static_assert(std::is_base_of<pass::Base, T>::value, "pass not derived from pass base");
......
......@@ -15,12 +15,12 @@
#include <exception>
#include <sstream>
#include "ngraph/log.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -69,7 +69,6 @@ pass::MemoryManager::node::node(size_t size, block_state state)
: m_size{size}
, m_state{state}
{
}
pass::MemoryManager::MemoryManager(size_t alignment)
......@@ -84,14 +83,10 @@ pass::MemoryManager::MemoryManager(size_t alignment)
size_t pass::MemoryManager::allocate(size_t size)
{
size_t rc;
switch(m_scheme)
switch (m_scheme)
{
case allocation_scheme::FIRST_FIT:
rc = first_fit(size);
break;
case allocation_scheme::BEST_FIT:
rc = best_fit(size);
break;
case allocation_scheme::FIRST_FIT: rc = first_fit(size); break;
case allocation_scheme::BEST_FIT: rc = best_fit(size); break;
}
return rc;
}
......@@ -103,7 +98,7 @@ size_t pass::MemoryManager::best_fit(size_t size)
size_t min_delta = numeric_limits<size_t>::max();
auto best_fit = m_node_list.end();
size_t best_offset = offset;
for (auto it=m_node_list.begin(); it != m_node_list.end(); ++it)
for (auto it = m_node_list.begin(); it != m_node_list.end(); ++it)
{
if (it->m_state == block_state::FREE && it->m_size >= size)
{
......@@ -143,7 +138,7 @@ size_t pass::MemoryManager::first_fit(size_t size)
size = align(size, m_alignment);
size_t offset = 0;
bool found = false;
for (auto it=m_node_list.begin(); it != m_node_list.end(); ++it)
for (auto it = m_node_list.begin(); it != m_node_list.end(); ++it)
{
if (it->m_state == block_state::FREE && it->m_size >= size)
{
......@@ -176,7 +171,7 @@ void pass::MemoryManager::free(size_t offset)
{
size_t search_offset = 0;
bool found = false;
for (auto it=m_node_list.begin(); it != m_node_list.end(); ++it)
for (auto it = m_node_list.begin(); it != m_node_list.end(); ++it)
{
if (offset == search_offset)
{
......
......@@ -62,12 +62,11 @@ public:
node(size_t size, block_state state);
bool is_free() const { return m_state == block_state::FREE; }
size_t m_size;
block_state m_state;
};
MemoryManager(size_t alignment=1);
MemoryManager(size_t alignment = 1);
// memory_manager& alignment(size_t a);
size_t allocate(size_t size);
......@@ -81,11 +80,8 @@ public:
std::list<node>::iterator end() { return m_node_list.end(); }
std::list<node>::const_iterator begin() const { return m_node_list.cbegin(); }
std::list<node>::const_iterator end() const { return m_node_list.cend(); }
const std::list<node>& get_node_list() const { return m_node_list; }
size_t max_allocated() const { return m_max_allocated; }
private:
size_t first_fit(size_t size);
size_t best_fit(size_t size);
......
......@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include <fstream>
#include <unordered_set>
#include <unordered_map>
#include <algorithm>
#include <unordered_set>
#include "memory_visualize.hpp"
#include "ngraph/descriptor/tensor.hpp"
......@@ -154,8 +154,7 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>&
}
i++;
}
sort(tensor_set.begin(), tensor_set.end(), [](const Tensor* t1, const Tensor* t2)
{
sort(tensor_set.begin(), tensor_set.end(), [](const Tensor* t1, const Tensor* t2) {
return t1->size() < t2->size();
});
for (const Tensor* tensor : tensor_set)
......@@ -206,12 +205,16 @@ void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nod
y += line_spacing;
size_t x1 = offset;
size_t x2 = ((usage / memory_footprint) * scale) + offset;
file << "<text x=\"" << 0 << "\" y=\"" << y + text_offset << "\" fill=\"" << "black" << "\">" << node->get_node_id() << "</text>\n";
file << "<line x1=\"" << x1 << "\" y1=\"" << y << "\" x2=\"" << x2 << "\" y2=\"" << y << "\"";
file << "<text x=\"" << 0 << "\" y=\"" << y + text_offset << "\" fill=\""
<< "black"
<< "\">" << node->get_node_id() << "</text>\n";
file << "<line x1=\"" << x1 << "\" y1=\"" << y << "\" x2=\"" << x2 << "\" y2=\"" << y
<< "\"";
file << " style=\"stroke:forestgreen;stroke-width:" << stroke_width << "\" />\n";
x1 = x2;
x2 = ((footprint / memory_footprint) * scale) + offset;
file << "<line x1=\"" << x1 << "\" y1=\"" << y << "\" x2=\"" << x2 << "\" y2=\"" << y << "\"";
file << "<line x1=\"" << x1 << "\" y1=\"" << y << "\" x2=\"" << x2 << "\" y2=\"" << y
<< "\"";
file << " style=\"stroke:firebrick;stroke-width:" << stroke_width << "\" />\n";
}
file << "</svg>\n";
......
......@@ -14,9 +14,9 @@
#pragma once
#include <iostream>
#include <limits>
#include <list>
#include <iostream>
#include "ngraph/pass/call_pass.hpp"
......
......@@ -27,6 +27,7 @@ namespace ngraph
class ngraph::pass::Base
{
friend class Manager;
public:
protected:
ManagerState& get_state();
......
......@@ -14,8 +14,8 @@
#include <fstream>
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/node.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
......@@ -24,8 +24,7 @@ using namespace std;
bool pass::VisualizeTree::run_on_tree(std::shared_ptr<Node> base_node)
{
// map<size_t, list<node_ptr>> dependent_nodes;
traverse_nodes(base_node, [&](Node* node)
{
traverse_nodes(base_node, [&](Node* node) {
for (auto arg : node->get_arguments())
{
m_ss << add_attributes(arg.get());
......
......@@ -14,9 +14,9 @@
#pragma once
#include <set>
#include <sstream>
#include <string>
#include <set>
#include "ngraph/pass/tree_pass.hpp"
......
......@@ -48,23 +48,19 @@ namespace ngraph
void tensor_call(const TensorViewPtrs& inputs, const TensorViewPtrs& outputs);
void set_return() { m_return = true; }
std::shared_ptr<TensorView> get_tensor_view(size_t i) { return m_tensor_views[i]; }
template <typename ET>
ParameterizedTensorView<ET>* get_parameterized_tensor_view(size_t i)
{
return m_tensor_views[i]->get_parameterized_tensor_view<ET>();
}
template<typename ET>
template <typename ET>
typename ET::type* get_tensor_view_data(size_t i)
{
return &get_parameterized_tensor_view<ET>(i)->get_vector()[0];
}
protected:
size_t m_n_inputs;
size_t m_n_outputs;
......
......@@ -38,7 +38,8 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) = Eigen::abs(EigenArray1d<ET>(call_frame, m_arg));
EigenArray1d<ET>(call_frame, m_out) =
Eigen::abs(EigenArray1d<ET>(call_frame, m_arg));
}
protected:
......
......@@ -29,8 +29,7 @@ namespace ngraph
class BroadcastScalarInstruction : public Instruction
{
public:
BroadcastScalarInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out)
BroadcastScalarInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
......@@ -42,7 +41,7 @@ namespace ngraph
// pull it out as a vector. This works because of the way
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenArray1d<ET>(call_frame, m_out) = EigenArray1d<ET>(call_frame, m_arg)(0,0);
EigenArray1d<ET>(call_frame, m_out) = EigenArray1d<ET>(call_frame, m_arg)(0, 0);
}
protected:
......
......@@ -15,8 +15,8 @@
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
......@@ -29,7 +29,9 @@ namespace ngraph
class CallInstruction : public Instruction
{
public:
CallInstruction(std::shared_ptr<ExternalFunction> ef,std::vector<TensorViewInfo> in, std::vector<TensorViewInfo> out)
CallInstruction(std::shared_ptr<ExternalFunction> ef,
std::vector<TensorViewInfo> in,
std::vector<TensorViewInfo> out)
: m_external_function(ef)
, m_in(in)
, m_out(out)
......@@ -51,7 +53,7 @@ namespace ngraph
{
outputs.push_back(call_frame.get_tensor_view(out.get_index()));
}
(*cf)(inputs,outputs);
(*cf)(inputs, outputs);
}
protected:
......
......@@ -46,8 +46,10 @@ namespace ngraph
{
EigenVector<ET> out(call_frame, m_out);
size_t concat_pos = 0;
for (size_t i = 0; i < m_args.size(); i++){
out.segment(concat_pos, m_sizes[i]) << EigenVector<ET>(call_frame, m_args.at(i));
for (size_t i = 0; i < m_args.size(); i++)
{
out.segment(concat_pos, m_sizes[i])
<< EigenVector<ET>(call_frame, m_args.at(i));
concat_pos += m_sizes[i];
}
}
......
......@@ -30,7 +30,8 @@ namespace ngraph
class ConstantInstruction : public Instruction
{
public:
ConstantInstruction(const std::vector<typename ET::type> value, const TensorViewInfo& out)
ConstantInstruction(const std::vector<typename ET::type> value,
const TensorViewInfo& out)
: m_value(value)
, m_out(out)
{
......@@ -38,7 +39,8 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
call_frame.get_parameterized_tensor_view<ET>(m_out.get_index())->get_vector() = m_value;
call_frame.get_parameterized_tensor_view<ET>(m_out.get_index())->get_vector() =
m_value;
}
protected:
......
......@@ -40,8 +40,9 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) <<
EigenVector<ET>(call_frame, m_arg0).dot(EigenVector<ET>(call_frame, m_arg1));
EigenArray1d<ET>(call_frame, m_out)
<< EigenVector<ET>(call_frame, m_arg0)
.dot(EigenVector<ET>(call_frame, m_arg1));
}
protected:
......
......@@ -37,7 +37,8 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) = Eigen::log(EigenArray1d<ET, fmt::V>(call_frame, m_arg));
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
Eigen::log(EigenArray1d<ET, fmt::V>(call_frame, m_arg));
}
protected:
......
......@@ -27,7 +27,6 @@ namespace ngraph
{
public:
ReturnInstruction() {}
virtual void execute(CallFrame& call_frame) const override
{
call_frame.set_return();
......
......@@ -45,8 +45,8 @@ namespace ngraph
// fmt::V computes sizes---it lumps together any higher
// dimensions---while fmt::M ignores them.
EigenVector<ET>(call_frame, m_out) =
call_frame.get_tensor_view_data<ET>(m_arg0.get_index())[0]
* EigenVector<ET>(call_frame, m_arg1);
call_frame.get_tensor_view_data<ET>(m_arg0.get_index())[0] *
EigenVector<ET>(call_frame, m_arg1);
}
protected:
......
......@@ -40,7 +40,8 @@ namespace ngraph
using EigenArrayBase = Eigen::Map<DynamicArray<ET>, 0, DynamicStrides>;
template <typename ET>
using DynamicMatrix = Eigen::Matrix<typename ET::type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using DynamicMatrix =
Eigen::Matrix<typename ET::type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename ET>
using EigenMatrixBase = Eigen::Map<DynamicMatrix<ET>, 0, DynamicStrides>;
......
......@@ -97,7 +97,8 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
const std::vector<TensorViewInfo>& out)
#define REGISTER_INSTRUCTION(op_class, instr_class, ...) \
REGISTER_TO_OP_MAP(op_class) { \
REGISTER_TO_OP_MAP(op_class) \
{ \
ef->get_instructions()->push_back(make_shared<instr_class>(__VA_ARGS__)); \
}
......@@ -146,8 +147,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
auto broadcast = static_cast<const op::Broadcast*>(n);
auto arg_tensor_type =
dynamic_pointer_cast<const TensorViewType>(n->get_arguments().at(0)->get_value_type());
auto arg_tensor_type = dynamic_pointer_cast<const TensorViewType>(
n->get_arguments().at(0)->get_value_type());
assert(nullptr != arg_tensor_type);
auto result_tensor_type =
......@@ -175,18 +176,22 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
if (broadcast->get_broadcast_axes() == AxisSet{1})
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::BroadcastVectorColwiseInstruction<element::Float32>>(
make_shared<
runtime::eigen::BroadcastVectorColwiseInstruction<element::Float32>>(
in[0], out[0]));
}
else if (broadcast->get_broadcast_axes() == AxisSet{0})
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::BroadcastVectorRowwiseInstruction<element::Float32>>(
make_shared<
runtime::eigen::BroadcastVectorRowwiseInstruction<element::Float32>>(
in[0], out[0]));
}
else
{
throw ngraph_error("Internal error: axis set for vector-matrix broadcast is neither {0} or {1}");
throw ngraph_error(
"Internal error: axis set for vector-matrix broadcast is neither {0} or "
"{1}");
}
}
else
......@@ -206,8 +211,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
if (result_shape.size() == 1)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ConcatVectorInstruction<element::Float32>>(
in, out[0]));
make_shared<runtime::eigen::ConcatVectorInstruction<element::Float32>>(in,
out[0]));
}
else if (result_shape.size() == 2)
{
......@@ -286,7 +291,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
};
// Parameter is a "runtime no-op" because the output tensor has already been filled.
REGISTER_TO_OP_MAP(op::Parameter) {};
REGISTER_TO_OP_MAP(op::Parameter){};
// GetTupleElement will be spliced out, with the users of out redirected to in's source, but, for now, we need to copy.
REGISTER_TO_OP_MAP(op::GetTupleElement)
......@@ -322,20 +327,16 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
}
catch (const std::out_of_range)
{
external = make_shared<ngraph::runtime::ExternalFunction>(
function_call->get_function());
function_map.insert({function,external});
external =
make_shared<ngraph::runtime::ExternalFunction>(function_call->get_function());
function_map.insert({function, external});
}
ef->get_instructions()->push_back(
make_shared<runtime::eigen::CallInstruction>(external,in,out));
};
REGISTER_TO_OP_MAP(op::Reduce)
{
throw ngraph_error("op::Reduce not implemented yet");
make_shared<runtime::eigen::CallInstruction>(external, in, out));
};
REGISTER_TO_OP_MAP(op::Reduce) { throw ngraph_error("op::Reduce not implemented yet"); };
initialized = true;
}
return op_map;
......
......@@ -28,7 +28,8 @@ namespace ngraph
{
class ExternalFunction
{
using FunctionMap = std::unordered_map<std::shared_ptr<Function>,std::shared_ptr<ExternalFunction>>;
using FunctionMap =
std::unordered_map<std::shared_ptr<Function>, std::shared_ptr<ExternalFunction>>;
using OpFunction = std::function<void(const ngraph::Node*,
ExternalFunction*,
......@@ -50,7 +51,6 @@ namespace ngraph
// Release original function's resources
void release_function() { m_function = nullptr; }
protected:
void compile();
void compile(FunctionMap& function_map);
......
......@@ -61,7 +61,6 @@ namespace ngraph
// For getting the data out
storage_type& get_vector() { return m_vector; }
protected:
storage_type m_vector;
};
......
......@@ -39,9 +39,7 @@ namespace ngraph
public:
TensorView() {}
virtual ~TensorView() {}
template <typename ET>
ParameterizedTensorView<ET>* get_parameterized_tensor_view()
{
......@@ -65,7 +63,6 @@ namespace ngraph
}
const Shape& get_shape() { return m_descriptor->get_tensor_view_type()->get_shape(); }
protected:
std::shared_ptr<ngraph::descriptor::TensorView> m_descriptor;
};
......
......@@ -34,7 +34,6 @@ namespace ngraph
}
size_t get_index() const { return m_index; }
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
get_tensor_view_layout() const
{
......
......@@ -40,8 +40,7 @@ namespace ngraph
return m_descriptor;
}
virtual void
collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<Value>& value) const override;
protected:
......
......@@ -30,7 +30,6 @@ namespace ngraph
{
public:
virtual ~Value() {}
/// @brief The compile-time descriptor for this value.
virtual std::shared_ptr<ngraph::descriptor::Value> get_descriptor() const = 0;
......
......@@ -16,8 +16,8 @@
#include <cmath>
#include <iostream>
#include "ngraph/types/element_type.hpp"
#include "ngraph/log.hpp"
#include "ngraph/types/element_type.hpp"
using namespace ngraph;
......
......@@ -14,8 +14,8 @@
#include <memory>
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -39,7 +39,8 @@ bool TensorViewType::operator==(const ValueType& that) const
return true;
}
void TensorViewType::collect_tensor_views(std::vector<std::shared_ptr<const TensorViewType>>& views) const
void TensorViewType::collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const
{
views.push_back(shared_from_this());
}
......@@ -54,9 +55,10 @@ bool TupleType::operator==(const ValueType& that) const
return that_tvt->get_element_types() == get_element_types();
}
void TupleType::collect_tensor_views(std::vector<std::shared_ptr<const TensorViewType>>& views) const
void TupleType::collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const
{
for(auto elt : m_element_types)
for (auto elt : m_element_types)
{
elt->collect_tensor_views(views);
}
......
......@@ -17,8 +17,8 @@
#include <memory>
#include <vector>
#include "ngraph/types/element_type.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
......@@ -35,12 +35,10 @@ namespace ngraph
protected:
ValueType() {}
public:
virtual ~ValueType() {}
virtual bool operator==(const ValueType& that) const = 0;
bool operator!=(const ValueType& that) const { return !(*this == that); }
/// Add tensor views in depth-first order.
virtual void collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const = 0;
......@@ -62,7 +60,6 @@ namespace ngraph
const element::Type& get_element_type() const { return m_element_type; }
const Shape& get_shape() const { return m_shape; }
virtual bool operator==(const ValueType& that) const override;
virtual void collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const override;
......@@ -80,7 +77,6 @@ namespace ngraph
public:
/// Construct empty tuple and add value types later.
TupleType() {}
/// @param element_types A vector of types for the tuple elements
TupleType(const std::vector<std::shared_ptr<const ValueType>>& element_types)
: m_element_types(element_types)
......@@ -91,7 +87,10 @@ namespace ngraph
{
return m_element_types;
}
std::vector<std::shared_ptr<const ValueType>> set_element_types() { return m_element_types; }
std::vector<std::shared_ptr<const ValueType>> set_element_types()
{
return m_element_types;
}
virtual bool operator==(const ValueType& that) const override;
virtual void collect_tensor_views(
......
......@@ -12,15 +12,15 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iomanip>
#include <map>
#include <deque>
#include <forward_list>
#include <iomanip>
#include <map>
#include <unordered_set>
#include "ngraph/util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -135,8 +135,7 @@ size_t ngraph::hash_combine(const std::vector<size_t>& list)
return seed;
}
void ngraph::traverse_nodes(const std::shared_ptr<ngraph::Node>& p,
std::function<void(Node*)> f)
void ngraph::traverse_nodes(const std::shared_ptr<ngraph::Node>& p, std::function<void(Node*)> f)
{
std::unordered_set<Node*> instances_seen;
deque<Node*> stack;
......@@ -151,7 +150,10 @@ void ngraph::traverse_nodes(const std::shared_ptr<ngraph::Node>& p,
f(n);
}
stack.pop_front();
for (auto arg : n->get_arguments()) { stack.push_front(arg.get()); }
for (auto arg : n->get_arguments())
{
stack.push_front(arg.get());
}
}
}
......@@ -159,10 +161,7 @@ void ngraph::free_nodes(shared_ptr<Node> p)
{
std::deque<Node*> sorted_list;
traverse_nodes(p, [&](Node* n)
{
sorted_list.push_front(n);
});
traverse_nodes(p, [&](Node* n) { sorted_list.push_front(n); });
for (Node* n : sorted_list)
{
......
......@@ -18,10 +18,10 @@
#include <chrono>
#include <iostream>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <memory>
namespace ngraph
{
......
......@@ -17,8 +17,8 @@
#include <list>
#include "ngraph/node.hpp"
#include "ngraph/visualize.hpp"
#include "ngraph/util.hpp"
#include "ngraph/visualize.hpp"
using namespace ngraph;
using namespace std;
......
......@@ -33,8 +33,10 @@ TEST(build_graph, build_simple)
ASSERT_EQ(dot->get_arguments()[0], arg2);
ASSERT_EQ(dot->get_arguments()[1], arg0);
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), Shape{10,32,7});
auto cluster_0 = make_shared<Function>(dot, result_type, op::Parameters{arg0, arg1, arg2, arg3});
auto result_type =
make_shared<TensorViewType>(element::Float32::element_type(), Shape{10, 32, 7});
auto cluster_0 =
make_shared<Function>(dot, result_type, op::Parameters{arg0, arg1, arg2, arg3});
ASSERT_EQ(cluster_0->get_result(), dot);
}
......@@ -182,4 +184,6 @@ TEST(build_graph, set_value_type_checked)
}
// Check argument inverses
TEST(build_graph, arg_inverse) {}
TEST(build_graph, arg_inverse)
{
}
......@@ -63,9 +63,8 @@ TEST(execute, test_abc_tuple)
auto A = make_shared<op::GetTupleElement>(ABC, 0);
auto B = make_shared<op::GetTupleElement>(ABC, 1);
auto C = make_shared<op::GetTupleElement>(ABC, 2);
auto f = make_shared<Function>(make_shared<op::Tuple>(Nodes{(A + B) * C}),
tensor_view_type,
op::Parameters{ABC});
auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -103,12 +102,11 @@ TEST(execute, test_tuple_result)
auto A_add_B = make_shared<op::Add>(A, B);
auto A_add_B_mul_C = make_shared<op::Multiply>(A_add_B, C);
auto rt = make_shared<TupleType>(
std::vector<shared_ptr<const ValueType>>(
auto rt = make_shared<TupleType>(std::vector<shared_ptr<const ValueType>>(
{make_shared<TensorViewType>(element::Float32::element_type(), shape),
make_shared<TensorViewType>(element::Float32::element_type(), shape)}));
auto f = make_shared<Function>(make_shared<op::Tuple>(Nodes{A_add_B, A_add_B_mul_C}),
rt, op::Parameters{A, B, C});
auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{A_add_B, A_add_B_mul_C}), rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -157,8 +155,9 @@ TEST(execute, test_concat_matrix_colwise)
auto shape_c = Shape{2, 3};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{2, 8};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2,8});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},1), rt, op::Parameters{A,B,C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 8});
auto f = make_shared<Function>(
make_shared<op::Concat>(Nodes{A, B, C}, 1), rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -186,8 +185,9 @@ TEST(execute, test_concat_matrix_rowwise)
auto shape_c = Shape{3, 2};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{8, 2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{8,2});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), rt, op::Parameters{A,B,C});
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{8, 2});
auto f = make_shared<Function>(
make_shared<op::Concat>(Nodes{A, B, C}, 0), rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -216,7 +216,8 @@ TEST(execute, test_concat_vector)
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{12};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{12});
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), rt, op::Parameters{A,B,C});
auto f = make_shared<Function>(
make_shared<op::Concat>(Nodes{A, B, C}, 0), rt, op::Parameters{A, B, C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -285,7 +286,7 @@ TEST(execute, test_dot1d)
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{1};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -306,9 +307,9 @@ TEST(execute, test_dot2d)
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{2,2};
auto shape_r = Shape{2, 2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -327,11 +328,11 @@ TEST(execute, test_dot2d)
TEST(execute, test_dot_scalar_tensor_arg0)
{
auto shape_a = Shape{};
auto shape_b = Shape{2,2,2};
auto shape_b = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -343,18 +344,18 @@ TEST(execute, test_dot_scalar_tensor_arg0)
*b = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_b);
(*cf)({a,b}, {result});
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
TEST(execute, test_dot_scalar_tensor_arg1)
{
auto shape_a = Shape{2,2,2};
auto shape_a = Shape{2, 2, 2};
auto shape_b = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_a);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -366,7 +367,7 @@ TEST(execute, test_dot_scalar_tensor_arg1)
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_a);
(*cf)({a,b}, {result});
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
......@@ -376,7 +377,7 @@ TEST(execute, test_dot_scalar_scalar)
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -388,18 +389,18 @@ TEST(execute, test_dot_scalar_scalar)
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a,b}, {result});
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{48}), result->get_vector());
}
TEST(execute, test_dot_matrix_vector)
{
auto shape_a = Shape{4,4};
auto shape_a = Shape{4, 4};
auto shape_b = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), rt, op::Parameters{A,B});
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), rt, op::Parameters{A, B});
auto shape_r = Shape{4};
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
......@@ -407,16 +408,13 @@ TEST(execute, test_dot_matrix_vector)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{ 1, 2, 3, 4,
5, 6, 7, 8,
9,10,11,12,
13,14,15,16};
*a = vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{17,18,19,20};
*b = vector<float>{17, 18, 19, 20};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{190,486,782,1078}), result->get_vector());
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{190, 486, 782, 1078}), result->get_vector());
}
TEST(execute, test_lessthan)
......@@ -455,7 +453,8 @@ TEST(execute, test_log)
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{expf(1), expf(2), expf(3), expf(4), expf(5), expf(6), expf(7), expf(8)};
vector<float> loga;
for (auto elt : a->get_vector()){
for (auto elt : a->get_vector())
{
loga.push_back(logf(elt));
}
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
......@@ -593,9 +592,9 @@ TEST(execute, test_scalar_constant)
TEST(execute, test_tensor_constant)
{
auto shape = Shape{2,2,2};
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::TensorConstant<element::Float32>>(shape);
A->get_value()->get_vector() = {1,2,3,4,5,6,7,8};
A->get_value()->get_vector() = {1, 2, 3, 4, 5, 6, 7, 8};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(A, rt, op::Parameters{});
......@@ -611,9 +610,9 @@ TEST(execute, test_tensor_constant)
TEST(execute, test_tensor_constant_with_op)
{
auto shape = Shape{2,2,2};
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::TensorConstant<element::Float32>>(shape);
A->get_value()->get_vector() = {-1,2,3,-4,5,-6,-7,8};
A->get_value()->get_vector() = {-1, 2, 3, -4, 5, -6, -7, 8};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Abs>(A), rt, op::Parameters{});
......@@ -642,9 +641,8 @@ TEST(execute, test_function_call)
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt_g = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto g = make_shared<Function>(
make_shared<op::FunctionCall>(f,Nodes{X,Y,Z})
+ make_shared<op::FunctionCall>(f,Nodes{X,Y,Z}),
auto g = make_shared<Function>(make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}) +
make_shared<op::FunctionCall>(f, Nodes{X, Y, Z}),
rt_g,
op::Parameters{X, Y, Z});
......@@ -676,7 +674,8 @@ TEST(execute, test_broadcast_scalar_vector)
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_r = Shape{4};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_r);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{0}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -694,9 +693,10 @@ TEST(execute, test_broadcast_scalar_matrix)
{
auto shape_a = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_r = Shape{2,2};
auto shape_r = Shape{2, 2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_r);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0,1}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{0, 1}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -714,9 +714,10 @@ TEST(execute, test_broadcast_scalar_tensor)
{
auto shape_a = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_r = Shape{2,2,2};
auto shape_r = Shape{2, 2, 2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_r);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0,1,2}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{0, 1, 2}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -732,10 +733,11 @@ TEST(execute, test_broadcast_scalar_tensor)
TEST(execute, test_broadcast_trivial)
{
auto shape = Shape{2,2,2};
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape, AxisSet{}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape, AxisSet{}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
......@@ -753,16 +755,17 @@ TEST(execute, test_broadcast_vector_colwise)
{
auto shape_a = Shape{3};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_r = Shape{3,4};
auto shape_r = Shape{3, 4};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_r);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{1}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{1}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1,2,3};
*a = vector<float>{1, 2, 3};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a}, {result});
......@@ -773,16 +776,17 @@ TEST(execute, test_broadcast_vector_rowwise)
{
auto shape_a = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_r = Shape{3,4};
auto shape_r = Shape{3, 4};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_r);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}), rt, op::Parameters{A});
auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{0}), rt, op::Parameters{A});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1,2,3,4};
*a = vector<float>{1, 2, 3, 4};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a}, {result});
......
......@@ -13,12 +13,12 @@
// ----------------------------------------------------------------------------
#include <iostream>
#include <vector>
#include <mkldnn.hpp>
#include <vector>
#include "gtest/gtest.h"
static int tensor_volume(const mkldnn::memory::dims &t)
static int tensor_volume(const mkldnn::memory::dims& t)
{
int x = 1;
for (const auto i : t)
......@@ -26,7 +26,6 @@ static int tensor_volume(const mkldnn::memory::dims &t)
return x;
}
TEST(mkldnn, engine)
{
using namespace mkldnn;
......@@ -39,13 +38,15 @@ TEST(mkldnn, engine)
const int mb = 2;
const int groups = 2;
memory::dims input_tz = {mb, 256, 13, 13};
memory::dims weights_tz = {groups, 384/groups, 256/groups, 3, 3};
memory::dims weights_tz = {groups, 384 / groups, 256 / groups, 3, 3};
memory::dims bias_tz = {384};
memory::dims strides = {1, 1};
memory::dims padding = {0, 0};
memory::dims output_tz = {mb, 384,
(input_tz[2] + 2*padding[0] - weights_tz[3])/strides[0] + 1,
(input_tz[3] + 2*padding[1] - weights_tz[4])/strides[1] + 1,
memory::dims output_tz = {
mb,
384,
(input_tz[2] + 2 * padding[0] - weights_tz[3]) / strides[0] + 1,
(input_tz[3] + 2 * padding[1] - weights_tz[4]) / strides[1] + 1,
};
std::vector<float> input(tensor_volume(input_tz), .0f);
......@@ -54,7 +55,8 @@ TEST(mkldnn, engine)
std::vector<float> output(tensor_volume(output_tz), .0f);
auto c3_src_desc = memory::desc({input_tz}, memory::data_type::f32, memory::format::nchw);
auto c3_weights_desc = memory::desc({weights_tz}, memory::data_type::f32, memory::format::goihw);
auto c3_weights_desc =
memory::desc({weights_tz}, memory::data_type::f32, memory::format::goihw);
auto c3_bias_desc = memory::desc({bias_tz}, memory::data_type::f32, memory::format::x);
auto c3_dst_desc = memory::desc({output_tz}, memory::data_type::f32, memory::format::nchw);
......@@ -63,11 +65,22 @@ TEST(mkldnn, engine)
auto c3_bias = memory({c3_bias_desc, cpu_engine}, bias.data());
auto c3_dst = memory({c3_dst_desc, cpu_engine}, output.data());
auto c3 = convolution_forward(convolution_forward::primitive_desc(convolution_forward::desc(prop_kind::forward,
auto c3 = convolution_forward(convolution_forward::primitive_desc(
convolution_forward::desc(prop_kind::forward,
algorithm::convolution_direct,
c3_src_desc, c3_weights_desc, c3_bias_desc, c3_dst_desc,
strides, padding, padding, padding_kind::zero),
cpu_engine), c3_src, c3_weights, c3_bias, c3_dst);
c3_src_desc,
c3_weights_desc,
c3_bias_desc,
c3_dst_desc,
strides,
padding,
padding,
padding_kind::zero),
cpu_engine),
c3_src,
c3_weights,
c3_bias,
c3_dst);
stream(stream::kind::eager).submit({c3}).wait();
}));
......
......@@ -19,16 +19,16 @@
#include "gtest/gtest.h"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/log.hpp"
#include "test_tools.hpp"
......@@ -81,8 +81,6 @@ TEST(pass, liveness)
// auto exc = ex.executor(seq_stuff);
// return exc;
// lg = LivenessGraph(exc.exop.ops)
// lg.layout_memory()
......
......@@ -20,15 +20,15 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "test_tools.hpp"
using namespace ngraph;
......
......@@ -12,20 +12,20 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <memory>
#include "gtest/gtest.h"
#include "ngraph/function.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/function.hpp"
#include "test_tools.hpp"
using namespace std;
......
......@@ -14,9 +14,9 @@
#include <algorithm>
#include "test_tools.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/util.hpp"
#include "test_tools.hpp"
using namespace std;
using namespace ngraph;
......@@ -73,7 +73,8 @@ shared_ptr<Function> make_test_graph()
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f0 = make_shared<Function>(r0, rt, op::Parameters{arg_0, arg_1, arg_2, arg_3, arg_4, arg_5});
auto f0 =
make_shared<Function>(r0, rt, op::Parameters{arg_0, arg_1, arg_2, arg_3, arg_4, arg_5});
return f0;
}
......@@ -81,9 +82,6 @@ shared_ptr<Function> make_test_graph()
size_t get_node_count(std::shared_ptr<Node> n)
{
size_t node_count = 0;
traverse_nodes(n, [&](const Node* node) {
node_count++;
});
traverse_nodes(n, [&](const Node* node) { node_count++; });
return node_count;
}
......@@ -19,12 +19,12 @@
#include "gtest/gtest.h"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/visualize.hpp"
#include "ngraph/util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/visualize.hpp"
#include "test_tools.hpp"
using namespace std;
......@@ -106,7 +106,7 @@ TEST(benchmark, topological_sort)
shared_ptr<Node> result;
vector<shared_ptr<op::Parameter>> args;
result = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
for (int i=0; i<1000000; i++)
for (int i = 0; i < 1000000; i++)
{
auto in_1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto in_2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
......@@ -126,9 +126,7 @@ TEST(benchmark, topological_sort)
NGRAPH_INFO << "topological sort took " << timer.get_milliseconds() << "ms";
size_t node_count = 0;
traverse_nodes(result, [&](const Node* node) {
node_count++;
});
traverse_nodes(result, [&](const Node* node) { node_count++; });
NGRAPH_INFO << "node count " << node_count;
......
......@@ -100,7 +100,7 @@ TEST(type_prop, concat_deduce)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 1);
c->propagate_types();
auto c_vt = c->get_value_type();
ASSERT_EQ(*c_vt, TensorViewType(element::Float32::element_type(), Shape{2, 12, 4}));
......@@ -112,7 +112,7 @@ TEST(type_prop, concat_deduce_incorrect)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 1);
c->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 14, 4}));
try
......@@ -135,8 +135,11 @@ TEST(type_prop, concat_deduce_wrong_rank)
{
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2,});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(),
Shape{
2, 2,
});
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 1);
try
{
c->propagate_types();
......@@ -158,7 +161,7 @@ TEST(type_prop, concat_deduce_wrong_shape)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 5});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 1);
try
{
c->propagate_types();
......@@ -167,7 +170,10 @@ TEST(type_prop, concat_deduce_wrong_shape)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Arguments to concat do not have same dimension on a non-concatenation axis"));
EXPECT_EQ(
error.what(),
std::string(
"Arguments to concat do not have same dimension on a non-concatenation axis"));
}
catch (...)
{
......@@ -180,7 +186,7 @@ TEST(type_prop, concat_deduce_axis_oob)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 5});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 3);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 3);
try
{
c->propagate_types();
......@@ -203,7 +209,7 @@ TEST(type_prop, concat_deduce_axis_barely_in_bounds)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 8});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 12});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 2);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 2);
c->propagate_types();
auto c_vt = c->get_value_type();
ASSERT_EQ(*c_vt, TensorViewType(element::Float32::element_type(), Shape{2, 3, 24}));
......@@ -214,7 +220,7 @@ TEST(type_prop, concat_deduce_elem_type_mismatch)
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Int32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
auto c = make_shared<op::Concat>(Nodes{param0, param1, param2}, 1);
try
{
c->propagate_types();
......@@ -238,22 +244,22 @@ TEST(type_prop, dot_deduce_scalar_2d)
{
// Deduce type for scalar/matrix arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,5});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4, 5});
auto bc = make_shared<op::Dot>(param1, param2);
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4,5}));
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4, 5}));
}
TEST(type_prop, dot_deduce_2d_scalar)
{
// Deduce type for matrix/scalar arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,5});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4, 5});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto bc = make_shared<op::Dot>(param1, param2);
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4,5}));
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4, 5}));
}
TEST(type_prop, dot_deduce_scalar_scalar)
......@@ -292,43 +298,43 @@ TEST(type_prop, dot_deduce_1d)
TEST(type_prop, dot_deduce_2d)
{
// Deduce type for matrix/matrix arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,3});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4, 2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3});
auto bc = make_shared<op::Dot>(param1, param2);
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4,3}));
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4, 3}));
}
TEST(type_prop, dot_deduce_different_rank)
{
// Deduce type for different-rank tensor arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,8,4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1,2,3});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 8, 4, 2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1, 2, 3});
auto bc = make_shared<op::Dot>(param1, param2);
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2,8,4,1,3}));
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2, 8, 4, 1, 3}));
}
TEST(type_prop, dot_deduce_different_rank_correct)
{
// Deduced type matches explicitly set type
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,8,4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1,2,3});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 8, 4, 2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1, 2, 3});
auto bc = make_shared<op::Dot>(param1, param2);
bc->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2,8,4,1,3}));
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 8, 4, 1, 3}));
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2,8,4,1,3}));
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2, 8, 4, 1, 3}));
}
TEST(type_prop, dot_deduce_element_type_mismatch)
{
// Type deduction fails due to element type mismatch
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,2});
auto param2 = make_shared<op::Parameter>(element::Int32::element_type(), Shape{2,5});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4, 2});
auto param2 = make_shared<op::Parameter>(element::Int32::element_type(), Shape{2, 5});
auto bc = make_shared<op::Dot>(param1, param2);
try
{
......@@ -349,8 +355,8 @@ TEST(type_prop, dot_deduce_element_type_mismatch)
TEST(type_prop, dot_deduce_reduction_axes_size_mismatch)
{
// Type deduction fails due to reduction axes size mismatch
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3,5});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4, 2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{3, 5});
auto bc = make_shared<op::Dot>(param1, param2);
try
{
......@@ -417,7 +423,8 @@ void test_binary_bad_arguments_view_element_types(const shared_ptr<Node>& node)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Arguments must have the same tensor view element type"));
EXPECT_EQ(error.what(),
std::string("Arguments must have the same tensor view element type"));
}
catch (...)
{
......@@ -487,10 +494,10 @@ TEST(type_prop, comparison_good)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto eq = make_shared<op::Equal>(tv0_2_4_param_0,tv0_2_4_param_1);
auto eq = make_shared<op::Equal>(tv0_2_4_param_0, tv0_2_4_param_1);
TensorViewType expected_type{element::Bool::element_type(), Shape{2, 4}};
eq->propagate_types();
EXPECT_EQ(*eq->get_value_type(),expected_type);
EXPECT_EQ(*eq->get_value_type(), expected_type);
}
TEST(type_prop, binary_arithmetic_bad_argument_element_types)
......@@ -499,7 +506,7 @@ TEST(type_prop, binary_arithmetic_bad_argument_element_types)
make_shared<TensorViewType>(element::Bool::element_type(), Shape{2, 4}));
auto tv0_2_4_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Bool::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Add>(tv0_2_4_param_0,tv0_2_4_param_1);
auto bc = make_shared<op::Add>(tv0_2_4_param_0, tv0_2_4_param_1);
try
{
bc->propagate_types();
......@@ -508,7 +515,8 @@ TEST(type_prop, binary_arithmetic_bad_argument_element_types)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Operands for arithmetic operators must have numeric element type"));
EXPECT_EQ(error.what(),
std::string("Operands for arithmetic operators must have numeric element type"));
}
catch (...)
{
......@@ -529,7 +537,8 @@ TEST(type_prop, unary_arithmetic_bad_argument_element_types)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Operands for arithmetic operators must have numeric element type"));
EXPECT_EQ(error.what(),
std::string("Operands for arithmetic operators must have numeric element type"));
}
catch (...)
{
......@@ -545,7 +554,7 @@ TEST(type_prop, select_deduce)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2, 4}));
......@@ -559,9 +568,8 @@ TEST(type_prop, select_deduce_correct)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
bc->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
bc->set_value_type(make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
bc->propagate_types();
auto bc_vt = bc->get_value_type();
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2, 4}));
......@@ -575,7 +583,7 @@ TEST(type_prop, select_shape_mismatch_a)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
try
{
bc->propagate_types();
......@@ -600,7 +608,7 @@ TEST(type_prop, select_shape_mismatch_b)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{3, 5}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
try
{
bc->propagate_types();
......@@ -625,7 +633,7 @@ TEST(type_prop, select_shape_mismatch_c)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{3, 5}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
try
{
bc->propagate_types();
......@@ -650,7 +658,7 @@ TEST(type_prop, select_elem_mismatch_a)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
try
{
bc->propagate_types();
......@@ -659,7 +667,9 @@ TEST(type_prop, select_elem_mismatch_a)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Argument 0 for arithmetic operators must have boolean element type"));
EXPECT_EQ(
error.what(),
std::string("Argument 0 for arithmetic operators must have boolean element type"));
}
catch (...)
{
......@@ -675,7 +685,7 @@ TEST(type_prop, select_elem_mismatch_bc)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 4}));
auto tv0_2_4_param_2 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Int32::element_type(), Shape{2, 4}));
auto bc = make_shared<op::Select>(tv0_2_4_param_0,tv0_2_4_param_1,tv0_2_4_param_2);
auto bc = make_shared<op::Select>(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2);
try
{
bc->propagate_types();
......@@ -684,7 +694,8 @@ TEST(type_prop, select_elem_mismatch_bc)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Arguments 1 and 2 must have the same tensor view type"));
EXPECT_EQ(error.what(),
std::string("Arguments 1 and 2 must have the same tensor view type"));
}
catch (...)
{
......@@ -704,25 +715,24 @@ TEST(type_prop, reduce_deduce)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0 + f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
r0->propagate_types();
ASSERT_EQ(*(r0->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{4}));
auto r1 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{1});
auto r1 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{1});
r1->propagate_types();
ASSERT_EQ(*(r1->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{2}));
auto r01 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0,1});
auto r01 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0, 1});
r01->propagate_types();
ASSERT_EQ(*(r01->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{}));
auto r_none = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{});
auto r_none = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{});
r_none->propagate_types();
ASSERT_EQ(*(r_none->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{2,4}));
ASSERT_EQ(*(r_none->get_value_type()),
TensorViewType(element::Float32::element_type(), Shape{2, 4}));
}
TEST(type_prop, reduce_deduce_correct)
......@@ -737,13 +747,10 @@ TEST(type_prop, reduce_deduce_correct)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0 + f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
r0->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{4}));
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
r0->set_value_type(make_shared<TensorViewType>(element::Float32::element_type(), Shape{4}));
r0->propagate_types();
ASSERT_EQ(*(r0->get_value_type()), TensorViewType(element::Float32::element_type(), Shape{4}));
}
......@@ -760,11 +767,9 @@ TEST(type_prop, reduce_nonscalar)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0 + f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -793,11 +798,9 @@ TEST(type_prop, reduce_elem_type_mismatch)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0 + f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -806,7 +809,8 @@ TEST(type_prop, reduce_elem_type_mismatch)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Element types for reductee and initial values do not match"));
EXPECT_EQ(error.what(),
std::string("Element types for reductee and initial values do not match"));
}
catch (...)
{
......@@ -827,11 +831,9 @@ TEST(type_prop, reduce_function_return_type_mismatch)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Bool::element_type(), Shape{});
auto f = make_shared<Function>(
make_shared<op::Equal>(f_param_0,f_param_1),
rt,
op::Parameters{f_param_0, f_param_1});
make_shared<op::Equal>(f_param_0, f_param_1), rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -840,7 +842,8 @@ TEST(type_prop, reduce_function_return_type_mismatch)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Return type from reduction function does not match expected"));
EXPECT_EQ(error.what(),
std::string("Return type from reduction function does not match expected"));
}
catch (...)
{
......@@ -860,12 +863,9 @@ TEST(type_prop, reduce_function_arg0_type_mismatch)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -894,12 +894,9 @@ TEST(type_prop, reduce_function_arg1_type_mismatch)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Bool::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_0,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0, rt, op::Parameters{f_param_0, f_param_1});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -931,11 +928,9 @@ TEST(type_prop, reduce_function_arg_count_mismatch)
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(
f_param_0 + f_param_1 + f_param_2,
rt,
op::Parameters{f_param_0, f_param_1, f_param_2});
f_param_0 + f_param_1 + f_param_2, rt, op::Parameters{f_param_0, f_param_1, f_param_2});
auto r0 = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0});
auto r0 = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0});
try
{
r0->propagate_types();
......@@ -944,7 +939,8 @@ TEST(type_prop, reduce_function_arg_count_mismatch)
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Reduction function has wrong number of parameters (should be two)"));
EXPECT_EQ(error.what(),
std::string("Reduction function has wrong number of parameters (should be two)"));
}
catch (...)
{
......@@ -964,11 +960,9 @@ TEST(type_prop, reduce_axis_oob)
auto f_param_1 = make_shared<op::Parameter>(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{}));
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(f_param_0 + f_param_1,
rt,
op::Parameters{f_param_0, f_param_1});
auto f = make_shared<Function>(f_param_0 + f_param_1, rt, op::Parameters{f_param_0, f_param_1});
auto r = make_shared<op::Reduce>(param_0,param_1,f,AxisSet{0,2,1});
auto r = make_shared<op::Reduce>(param_0, param_1, f, AxisSet{0, 2, 1});
try
{
r->propagate_types();
......@@ -999,7 +993,7 @@ TEST(type_prop, function_call_deduce)
auto X = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Y = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto Z = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto r = make_shared<op::FunctionCall>(f,Nodes{X,Y,Z});
auto r = make_shared<op::FunctionCall>(f, Nodes{X, Y, Z});
auto r_p_r = r + r;
r->propagate_types();
......
......@@ -134,7 +134,9 @@ TEST(util, contains)
EXPECT_FALSE(contains(v1, 8));
}
TEST(util, remove_from) {}
TEST(util, remove_from)
{
}
TEST(util, reduce)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment