Commit 025a1b92 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

New Interpreter backend (#287)

* New Interpreter backend

* PR review comments

* More RP fixes

* oops

* make autodiff tests backend aware

* wip

* wip

* more ops

* wip

* fix merge error

* merge fixes
parent f0810b5f
......@@ -22,6 +22,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-weak-vtables")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-global-constructors")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-exit-time-destructors")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-prototypes")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-noreturn")
# # should remove these
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast")
......
......@@ -85,6 +85,12 @@ set (SRC
pattern/matcher.cpp
runtime/backend.cpp
runtime/manager.cpp
runtime/interpreter/int_call_frame.cpp
runtime/interpreter/int_backend.cpp
runtime/interpreter/int_manager.cpp
runtime/interpreter/int_kernels.cpp
runtime/interpreter/int_external_function.cpp
runtime/interpreter/int_tensor_view.cpp
runtime/ngvm/call_frame.cpp
runtime/ngvm/external_function.cpp
runtime/ngvm/ngvm_backend.cpp
......
......@@ -40,7 +40,7 @@ namespace ngraph
size_t get_offset() const { return m_offset; }
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const { return m_strides; }
const Strides& get_strides() const override { return m_strides; }
virtual bool operator==(const TensorViewLayout& other) const override;
protected:
......
......@@ -28,6 +28,7 @@ const ngraph::element::Type& TensorViewLayout::get_element_type() const
{
return m_tensor_view_type->get_element_type();
}
const ngraph::Shape& TensorViewLayout::get_shape() const
{
return m_tensor_view_type->get_shape();
......
......@@ -56,6 +56,7 @@ namespace ngraph
const element::Type& get_element_type() const;
const Shape& get_shape() const;
virtual const Strides& get_strides() const = 0;
/// Where this view is located in the buffer.
const BufferPos& get_buffer_pos() const { return m_buffer_pos; }
BufferPos& get_buffer_pos() { return m_buffer_pos; }
......
......@@ -1640,6 +1640,54 @@ void Emitter::EmitOneHot(const ngraph::Node* n,
}
}
void Emitter::EmitCeiling(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::ceil("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
void Emitter::EmitFloor(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::floor("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
void Emitter::EmitSqrt(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::sqrt("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
//------------------------------------------------------------------------------------------------
// Utility methods
//------------------------------------------------------------------------------------------------
......
......@@ -96,6 +96,9 @@ namespace ngraph
void EMITTER_DECL(EmitPower);
void EMITTER_DECL(EmitReplaceSlice);
void EMITTER_DECL(EmitOneHot);
void EMITTER_DECL(EmitFloor);
void EMITTER_DECL(EmitCeiling);
void EMITTER_DECL(EmitSqrt);
private:
void generate_call(const std::vector<TensorViewInfo>& inputs,
......
......@@ -35,6 +35,7 @@
#include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp"
......@@ -44,6 +45,7 @@
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/floor.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
......@@ -66,6 +68,7 @@
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/sqrt.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp"
......@@ -160,6 +163,9 @@ static const OpMap dispatcher{
{TI(ngraph::op::Atan), &Emitter::EmitAtan},
{TI(ngraph::op::ReplaceSlice), &Emitter::EmitReplaceSlice},
{TI(ngraph::op::OneHot), &Emitter::EmitOneHot},
{TI(ngraph::op::Floor), &Emitter::EmitFloor},
{TI(ngraph::op::Ceiling), &Emitter::EmitCeiling},
{TI(ngraph::op::Sqrt), &Emitter::EmitSqrt},
};
ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace ngraph;
using namespace std;
shared_ptr<runtime::CallFrame> runtime::interpreter::INT_Backend::make_call_frame(
const shared_ptr<ExternalFunction>& external_function)
{
return external_function->make_call_frame();
}
shared_ptr<runtime::TensorView>
runtime::interpreter::INT_Backend::make_primary_tensor_view(const element::Type& element_type,
const Shape& shape)
{
auto rc = make_shared<runtime::interpreter::INT_TensorView>(element_type, shape, "external");
return static_pointer_cast<runtime::TensorView>(rc);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
static size_t alignment = 64;
class INT_Backend : public runtime::Backend
{
public:
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame(
const std::shared_ptr<ngraph::runtime::ExternalFunction>& external_function)
override;
std::shared_ptr<ngraph::runtime::TensorView>
make_primary_tensor_view(const ngraph::element::Type& element_type,
const Shape& shape) override;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace std;
using namespace ngraph;
runtime::interpreter::INT_CallFrame::INT_CallFrame(shared_ptr<ExternalFunction> external_function,
shared_ptr<Function> func)
: m_external_function(external_function)
, m_function(func)
{
}
void runtime::interpreter::INT_CallFrame::call(
std::shared_ptr<Function> function,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& input_tvs,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& output_tvs)
{
unordered_map<string, shared_ptr<runtime::interpreter::INT_TensorView>> tensor_map;
const std::vector<std::shared_ptr<op::Parameter>>& params = function->get_parameters();
for (size_t i = 0; i < input_tvs.size(); i++)
{
string name = params[i]->get_name();
tensor_map.insert({name, input_tvs[i]});
}
for (size_t i = 0; i < output_tvs.size(); i++)
{
string name = function->get_result()->get_name();
tensor_map.insert({name, output_tvs[i]});
}
// Invoke computation
for (shared_ptr<Node> op : function->get_ordered_ops())
{
vector<shared_ptr<runtime::interpreter::INT_TensorView>> inputs;
vector<shared_ptr<runtime::interpreter::INT_TensorView>> outputs;
element::Type base_type;
if (op->get_inputs().empty())
{
base_type = op->get_element_type();
}
else
{
base_type = op->get_inputs().at(0).get_tensor().get_element_type();
}
element::Type secondary_type = op->get_element_type();
// Some ops have unusual intput/output types so handle those special cases here
if (op->description() == "Select")
{
base_type = op->get_inputs().at(1).get_tensor().get_element_type();
secondary_type = op->get_inputs().at(0).get_tensor().get_element_type();
}
for (const descriptor::Input& input : op->get_inputs())
{
string name = input.get_output().get_node()->get_name();
shared_ptr<runtime::interpreter::INT_TensorView> tv = tensor_map.at(name);
inputs.push_back(tv);
// NGRAPH_INFO << "Op Inputs " << name;
}
for (descriptor::Output& output : op->get_outputs())
{
string name = output.get_node()->get_name();
shared_ptr<runtime::interpreter::INT_TensorView> tv;
if (!contains_key(tensor_map, name))
{
// The output tensor is not in the tensor map so create a new tensor
const Shape& shape = output.get_tensor_view_type()->get_shape();
element::Type element_type = output.get_tensor_view_type()->get_element_type();
string tensor_name = output.get_tensor().get_name();
tv = make_shared<runtime::interpreter::INT_TensorView>(
element_type, shape, tensor_name);
tensor_map.insert({name, tv});
}
else
{
tv = tensor_map.at(name);
}
outputs.push_back(tv);
// NGRAPH_INFO << "Op Outputs " << name;
}
if (base_type == element::boolean)
{
generate_calls<char>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::f32)
{
generate_calls<float>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::f64)
{
generate_calls<double>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i8)
{
generate_calls<int8_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i16)
{
generate_calls<int16_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i32)
{
generate_calls<int32_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i64)
{
generate_calls<int64_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u8)
{
generate_calls<uint8_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u16)
{
generate_calls<uint16_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u32)
{
generate_calls<uint32_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u64)
{
generate_calls<uint64_t>(secondary_type, *op, inputs, outputs);
}
else
{
stringstream ss;
ss << "unsupported element type " << base_type << " op " << op->get_name();
throw runtime_error(ss.str());
}
// Delete any obsolete tensors
for (const descriptor::Tensor* t : op->liveness_free_list)
{
for (auto it = tensor_map.begin(); it != tensor_map.end(); ++it)
{
if (it->second->get_tensor().get_name() == t->get_name())
{
tensor_map.erase(it);
break;
}
}
}
}
}
void runtime::interpreter::INT_CallFrame::tensor_call(
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& input_tvs,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& output_tvs)
{
call(m_function, input_tvs, output_tvs);
}
void runtime::interpreter::INT_CallFrame::tensor_call(
const vector<shared_ptr<runtime::TensorView>>& input_tvs,
const vector<shared_ptr<runtime::TensorView>>& output_tvs)
{
vector<shared_ptr<runtime::interpreter::INT_TensorView>> args;
vector<shared_ptr<runtime::interpreter::INT_TensorView>> out;
for (auto tv : input_tvs)
{
args.push_back(static_pointer_cast<runtime::interpreter::INT_TensorView>(tv));
}
for (auto tv : output_tvs)
{
out.push_back(static_pointer_cast<runtime::interpreter::INT_TensorView>(tv));
}
tensor_call(args, out);
}
void runtime::interpreter::INT_CallFrame::call(const vector<shared_ptr<runtime::Value>>& arguments,
const vector<shared_ptr<runtime::Value>>& results)
{
vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::Value> argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::Value> result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(inputs, outputs);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/replace_slice.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
#include "ngraph/runtime/kernel/abs.hpp"
#include "ngraph/runtime/kernel/acos.hpp"
#include "ngraph/runtime/kernel/add.hpp"
#include "ngraph/runtime/kernel/asin.hpp"
#include "ngraph/runtime/kernel/atan.hpp"
#include "ngraph/runtime/kernel/broadcast.hpp"
#include "ngraph/runtime/kernel/ceiling.hpp"
#include "ngraph/runtime/kernel/concat.hpp"
#include "ngraph/runtime/kernel/constant.hpp"
#include "ngraph/runtime/kernel/convert.hpp"
#include "ngraph/runtime/kernel/copy.hpp"
#include "ngraph/runtime/kernel/cos.hpp"
#include "ngraph/runtime/kernel/cosh.hpp"
#include "ngraph/runtime/kernel/divide.hpp"
#include "ngraph/runtime/kernel/dot.hpp"
#include "ngraph/runtime/kernel/equal.hpp"
#include "ngraph/runtime/kernel/exp.hpp"
#include "ngraph/runtime/kernel/floor.hpp"
#include "ngraph/runtime/kernel/greater.hpp"
#include "ngraph/runtime/kernel/greater_eq.hpp"
#include "ngraph/runtime/kernel/less.hpp"
#include "ngraph/runtime/kernel/less_eq.hpp"
#include "ngraph/runtime/kernel/log.hpp"
#include "ngraph/runtime/kernel/maximum.hpp"
#include "ngraph/runtime/kernel/minimum.hpp"
#include "ngraph/runtime/kernel/multiply.hpp"
#include "ngraph/runtime/kernel/negate.hpp"
#include "ngraph/runtime/kernel/not_equal.hpp"
#include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/power.hpp"
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
#include "ngraph/runtime/kernel/reshape.hpp"
#include "ngraph/runtime/kernel/scalar_tensor_product.hpp"
#include "ngraph/runtime/kernel/select.hpp"
#include "ngraph/runtime/kernel/sign.hpp"
#include "ngraph/runtime/kernel/sin.hpp"
#include "ngraph/runtime/kernel/sinh.hpp"
#include "ngraph/runtime/kernel/slice.hpp"
#include "ngraph/runtime/kernel/sqrt.hpp"
#include "ngraph/runtime/kernel/subtract.hpp"
#include "ngraph/runtime/kernel/sum.hpp"
#include "ngraph/runtime/kernel/tan.hpp"
#include "ngraph/runtime/kernel/tanh.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
namespace runtime
{
class PrimaryTensorView;
namespace interpreter
{
class ExternalFunction;
class INT_CallFrame;
}
}
}
// Compile and execute graphs
class ngraph::runtime::interpreter::INT_CallFrame : public runtime::CallFrame
{
public:
INT_CallFrame(std::shared_ptr<ExternalFunction> external_function,
std::shared_ptr<Function> func);
/// @brief Invoke the function with values matching the signature of the function.
///
/// Tuples will be expanded into their tensor views to build the call frame.
void call(const std::vector<std::shared_ptr<runtime::Value>>& inputs,
const std::vector<std::shared_ptr<runtime::Value>>& outputs);
private:
/// @brief Invoke the function with tuples pre-expanded to their underlying
/// tensor views.
void tensor_call(const std::vector<std::shared_ptr<TensorView>>& inputs,
const std::vector<std::shared_ptr<TensorView>>& outputs);
void tensor_call(const std::vector<std::shared_ptr<INT_TensorView>>& inputs,
const std::vector<std::shared_ptr<INT_TensorView>>& outputs);
void call(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<runtime::interpreter::INT_TensorView>>& input_tvs,
const std::vector<std::shared_ptr<runtime::interpreter::INT_TensorView>>& output_tvs);
std::shared_ptr<ExternalFunction> m_external_function;
std::shared_ptr<Function> m_function;
template <typename BASE>
void generate_calls(const element::Type& type,
ngraph::Node& op,
const std::vector<std::shared_ptr<INT_TensorView>>& args,
const std::vector<std::shared_ptr<INT_TensorView>>& out)
{
if (type == element::boolean)
{
op_engine<BASE, char>(op, args, out);
}
else if (type == element::f32)
{
op_engine<BASE, float>(op, args, out);
}
else if (type == element::f64)
{
op_engine<BASE, double>(op, args, out);
}
else if (type == element::i8)
{
op_engine<BASE, int8_t>(op, args, out);
}
else if (type == element::i16)
{
op_engine<BASE, int16_t>(op, args, out);
}
else if (type == element::i32)
{
op_engine<BASE, int32_t>(op, args, out);
}
else if (type == element::i64)
{
op_engine<BASE, int64_t>(op, args, out);
}
else if (type == element::u8)
{
op_engine<BASE, uint8_t>(op, args, out);
}
else if (type == element::u16)
{
op_engine<BASE, uint16_t>(op, args, out);
}
else if (type == element::u32)
{
op_engine<BASE, uint32_t>(op, args, out);
}
else if (type == element::u64)
{
op_engine<BASE, uint64_t>(op, args, out);
}
else
{
std::stringstream ss;
ss << "unsupported element type " << type << " op " << op.get_name();
throw std::runtime_error(ss.str());
}
}
template <typename T, typename S>
void op_engine(ngraph::Node& node,
const std::vector<std::shared_ptr<INT_TensorView>>& args,
const std::vector<std::shared_ptr<INT_TensorView>>& out)
{
std::string node_op = node.description();
if (node_op == "Abs")
{
kernel::abs<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Acos")
{
kernel::acos<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Add")
{
kernel::add<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Asin")
{
kernel::asin<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Atan")
{
kernel::atan<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Broadcast")
{
ngraph::op::Broadcast* broadcast = dynamic_cast<ngraph::op::Broadcast*>(&node);
Shape in_shape = args[0]->get_shape();
Shape out_shape = out[0]->get_shape();
AxisSet broadcast_axes = broadcast->get_broadcast_axes();
kernel::broadcast<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
in_shape,
out_shape,
broadcast_axes);
}
else if (node_op == "Ceiling")
{
kernel::ceiling<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Concat")
{
const op::Concat* concat = static_cast<const op::Concat*>(&node);
std::vector<T*> in_args;
std::vector<Shape> in_shapes;
for (std::shared_ptr<INT_TensorView> arg : args)
{
in_args.push_back(reinterpret_cast<T*>(arg->get_data_ptr()));
in_shapes.push_back(arg->get_shape());
}
kernel::concat<T>(in_args,
reinterpret_cast<T*>(out[0]->get_data_ptr()),
in_shapes,
out[0]->get_shape(),
concat->get_concatenation_axis());
}
else if (node_op == "Constant")
{
auto c = static_cast<const op::Constant*>(&node);
std::vector<T> input = ngraph::parse_string<T>(c->get_value_strings());
kernel::constant<T>(reinterpret_cast<T*>(input.data()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Convert")
{
kernel::convert<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<S*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Cos")
{
kernel::cos<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Cosh")
{
kernel::cosh<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Divide")
{
kernel::divide<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Dot")
{
if (args[0]->get_shape().size() == 0)
{
kernel::scalar_tensor_product(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (args[1]->get_shape().size() == 0)
{
kernel::scalar_tensor_product(reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else
{
size_t arg0_dot_axis;
size_t arg1_dot_axis;
if (args[0]->get_shape().size() == 1 && args[1]->get_shape().size() == 1)
{
arg0_dot_axis = 0;
arg1_dot_axis = 0;
}
// If arg0 is a matrix and arg1 is a vector, dot on axes 1 and 0 respectively.
else if (args[0]->get_shape().size() == 2 && args[1]->get_shape().size() == 1)
{
arg0_dot_axis = 1;
arg1_dot_axis = 0;
}
// If arg0 is rank n and arg1 is rank m, dot on axes n-1 and m-2, respectively.
//
// Note that this happens to handle the vector-matrix and matrix-matrix cases.
else
{
arg0_dot_axis = args[0]->get_shape().size() - 1;
arg1_dot_axis = args[1]->get_shape().size() - 2;
}
kernel::dot(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
args[1]->get_shape(),
out[0]->get_shape(),
arg0_dot_axis,
arg1_dot_axis);
}
}
else if (node_op == "Equal")
{
kernel::equal<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Exp")
{
kernel::exp<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Floor")
{
kernel::floor<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "FunctionCall")
{
std::shared_ptr<Function> function = node.get_function();
call(function, args, out);
}
else if (node_op == "Greater")
{
kernel::greater<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "GreaterEq")
{
kernel::greater_eq<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Less")
{
kernel::less<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "LessEq")
{
kernel::less_eq<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Log")
{
kernel::log<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Maximum")
{
kernel::maximum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Minimum")
{
kernel::minimum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Multiply")
{
kernel::multiply<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Negative")
{
kernel::negate<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "NotEqual")
{
kernel::not_equal<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<char*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "OneHot")
{
auto oh = static_cast<const op::OneHot*>(&node);
kernel::one_hot<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
oh->get_one_hot_axis());
}
else if (node_op == "Parameter")
{
}
else if (node_op == "ParameterizedConstant")
{
// I would like to appologize for this...
element::Type type = element::from<T>();
const void* data;
if (type == element::boolean)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Bool>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::f32)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Float32>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::f64)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Float64>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::i8)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Int8>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::i16)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Int16>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::i32)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Int32>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::i64)
{
data = dynamic_cast<const op::ParameterizedConstant<element::Int64>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::u8)
{
data = dynamic_cast<const op::ParameterizedConstant<element::UInt8>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::u16)
{
data = dynamic_cast<const op::ParameterizedConstant<element::UInt16>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::u32)
{
data = dynamic_cast<const op::ParameterizedConstant<element::UInt32>*>(&node)
->get_value()
->get_vector()
.data();
}
else if (type == element::u64)
{
data = dynamic_cast<const op::ParameterizedConstant<element::UInt64>*>(&node)
->get_value()
->get_vector()
.data();
}
else
{
std::stringstream ss;
ss << "unsupported element type " << type << " op " << node.get_name();
throw std::runtime_error(ss.str());
}
kernel::copy<T>(reinterpret_cast<T*>(const_cast<void*>(data)),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Power")
{
kernel::power<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Reduce")
{
ngraph::op::Reduce* reduce = dynamic_cast<ngraph::op::Reduce*>(&node);
std::shared_ptr<ngraph::Function> reduction_function = reduce->get_function();
auto in_tensor_view_type = std::dynamic_pointer_cast<const TensorViewType>(
node.get_arguments().at(0)->get_value_type());
if (in_tensor_view_type == nullptr)
{
throw std::runtime_error("encountered non-tensor view type as input to reduce");
}
auto out_tensor_view_type =
std::dynamic_pointer_cast<const TensorViewType>(node.get_value_type());
if (out_tensor_view_type == nullptr)
{
throw std::runtime_error("reduce has non-tensor view output type");
}
std::function<T(T, T)> f =
[this, in_tensor_view_type, out_tensor_view_type, reduction_function](T x,
T y) -> T {
auto tx = std::make_shared<runtime::interpreter::INT_TensorView>(
in_tensor_view_type->get_element_type(), Shape{}, "reduce_temp_x");
auto ty = std::make_shared<runtime::interpreter::INT_TensorView>(
in_tensor_view_type->get_element_type(), Shape{}, "reduce_temp_y");
auto tr = std::make_shared<runtime::interpreter::INT_TensorView>(
in_tensor_view_type->get_element_type(), Shape{}, "reduce_temp_r");
*(reinterpret_cast<T*>(tx->get_data_ptr())) = x;
*(reinterpret_cast<T*>(ty->get_data_ptr())) = y;
call(reduction_function, {tx, ty}, {tr});
return *(reinterpret_cast<T*>(tr->get_data_ptr()));
};
kernel::reduce(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
in_tensor_view_type->get_shape(),
out_tensor_view_type->get_shape(),
reduce->get_reduction_axes(),
f);
}
// else if (node_op == "Remainder")
// {
// // node = make_shared<op::Remainder>(args[0], args[1]);
// }
else if (node_op == "ReplaceSlice")
{
const op::ReplaceSlice* slice = static_cast<const op::ReplaceSlice*>(&node);
kernel::replace_slice<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[1]->get_shape(),
slice->get_lower_bounds(),
slice->get_upper_bounds(),
slice->get_strides(),
out[0]->get_shape());
}
else if (node_op == "Reshape")
{
ngraph::op::Reshape* reshape = dynamic_cast<ngraph::op::Reshape*>(&node);
kernel::reshape(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
reshape->get_input_order(),
out[0]->get_shape());
}
else if (node_op == "Select")
{
kernel::select<T>(reinterpret_cast<char*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(args[2]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Sign")
{
kernel::sign<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Sin")
{
kernel::sin<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Sinh")
{
kernel::sinh<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Slice")
{
const op::Slice* slice = static_cast<const op::Slice*>(&node);
kernel::slice<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
slice->get_lower_bounds(),
slice->get_upper_bounds(),
slice->get_strides(),
out[0]->get_shape());
}
else if (node_op == "Sqrt")
{
kernel::sqrt<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Subtract")
{
kernel::subtract<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Sum")
{
const op::Sum* sum = static_cast<const op::Sum*>(&node);
kernel::sum<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
args[0]->get_shape(),
out[0]->get_shape(),
sum->get_reduction_axes());
}
else if (node_op == "Tan")
{
kernel::tan<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else if (node_op == "Tanh")
{
kernel::tanh<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
out[0]->get_element_count());
}
else
{
std::stringstream ss;
ss << "unsupported op " << node_op;
throw std::runtime_error(ss.str());
}
}
};
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <fstream>
#include <memory>
#include <string>
#include <tuple>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/abs.hpp"
#include "ngraph/ops/acos.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp"
#include "ngraph/ops/cos.hpp"
#include "ngraph/ops/cosh.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/greater_eq.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/sign.hpp"
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp"
#include "ngraph/ops/tanh.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/pass/assign_layout.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/interpreter/int_external_function.hpp"
#include "ngraph/runtime/utils.hpp"
using namespace std;
using namespace ngraph;
static const string s_output_dir = "cpu_codegen";
class StaticInitializers
{
public:
StaticInitializers() { file_util::remove_directory(s_output_dir); }
};
static StaticInitializers s_static_initializers;
using descriptor::layout::DenseTensorViewLayout;
runtime::interpreter::ExternalFunction::ExternalFunction(const shared_ptr<Function>& function,
bool release_function)
: runtime::ExternalFunction(function, release_function)
, m_function(function)
{
}
void runtime::interpreter::ExternalFunction::compile()
{
if (m_is_compiled)
{
return;
}
string function_name = m_function->get_name();
string dump_filename = file_util::path_join(s_output_dir, function_name + "_ops.txt");
pass::Manager pass_manager;
pass_manager.register_pass<pass::TopologicalSort>();
// For now, just make everyone row-major.
pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>();
pass_manager.register_pass<pass::Liveness>();
pass_manager.run_passes(m_function);
m_is_compiled = true;
if (m_release_function)
{
release_function();
}
}
shared_ptr<runtime::CallFrame> runtime::interpreter::ExternalFunction::make_call_frame()
{
if (!m_is_compiled)
{
compile();
}
return make_shared<runtime::interpreter::INT_CallFrame>(shared_from_this(), m_function);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
class ExternalFunction : public ngraph::runtime::ExternalFunction,
public std::enable_shared_from_this<ExternalFunction>
{
public:
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
protected:
std::shared_ptr<ngraph::Function> m_function;
void compile();
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/interpreter/int_kernels.hpp"
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cstddef>
#include <cstdint>
// CBLAS types and wrappers
namespace cblas
{
enum class Layout
{
RowMajor = 101,
ColMajor = 102
};
enum class Transpose
{
None = 111,
Transpose = 112,
ConjTrans = 113
};
enum class UpperLower
{
Upper = 121,
Lower = 122
};
enum class Diag
{
NonUnit = 131,
Unit = 132
};
enum class Side
{
Left = 141,
Right = 142
};
enum class Storage
{
Packed = 151
};
enum class Ident
{
AMatrix = 161,
BMatrix = 162
};
enum class Offset
{
RowOffset = 171,
ColOffset = 172,
FixOffset = 173
};
extern "C" {
void cblas_sgemm(const Layout layout,
const Transpose TransA,
const Transpose TransB,
const int64_t M,
const int64_t N,
const int64_t K,
const float alpha,
const float* A,
const int64_t lda,
const float* B,
const int64_t ldb,
const float beta,
float* C,
const int64_t ldc);
}
}
namespace mkl
{
extern "C" {
void MKL_Somatcopy(char ordering,
char trans,
size_t rows,
size_t cols,
const float alpha,
const float* A,
size_t lda,
float* B,
size_t ldb);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_external_function.hpp"
#include "ngraph/runtime/interpreter/int_manager.hpp"
using namespace ngraph;
using namespace std;
shared_ptr<runtime::Backend> runtime::interpreter::INT_Manager::allocate_backend()
{
return make_shared<INT_Backend>();
}
shared_ptr<runtime::ExternalFunction>
runtime::interpreter::INT_Manager::compile(const shared_ptr<Function>& fun)
{
return make_shared<ExternalFunction>(fun);
}
runtime::Manager::Factory runtime::interpreter::INT_Manager::factory =
runtime::Manager::register_factory("INTERPRETER",
[](const string& name) -> shared_ptr<runtime::Manager> {
return make_shared<INT_Manager>();
});
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/codegen/execution_engine.hpp"
#include "ngraph/runtime/manager.hpp"
namespace ngraph
{
class Function;
namespace runtime
{
class ExternalFunction;
namespace interpreter
{
/// @brief Transformer for the interpreted backend
class INT_Manager : public Manager
{
protected:
ngraph::codegen::ExecutionEngine exec_state;
public:
virtual std::shared_ptr<Backend> allocate_backend() override;
virtual std::shared_ptr<ngraph::runtime::ExternalFunction>
compile(const std::shared_ptr<ngraph::Function>& fun) override;
static Factory factory;
};
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace ngraph;
using namespace std;
runtime::interpreter::INT_TensorView::INT_TensorView(const element::Type& element_type,
const Shape& shape,
const string& name)
: runtime::TensorView(std::make_shared<descriptor::PrimaryTensorView>(
std::make_shared<TensorViewType>(element_type, shape), name, true, true, false))
, m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_view_layout(
std::make_shared<descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
if (m_buffer_size > 0)
{
size_t allocation_size = m_buffer_size + runtime::interpreter::alignment;
m_allocated_buffer_pool = static_cast<char*>(malloc(allocation_size));
m_aligned_buffer_pool = m_allocated_buffer_pool;
size_t mod = size_t(m_aligned_buffer_pool) % alignment;
if (mod != 0)
{
m_aligned_buffer_pool += (alignment - mod);
}
}
}
runtime::interpreter::INT_TensorView::~INT_TensorView()
{
if (m_allocated_buffer_pool != nullptr)
{
free(m_allocated_buffer_pool);
}
}
char* runtime::interpreter::INT_TensorView::get_data_ptr()
{
return m_aligned_buffer_pool;
}
const char* runtime::interpreter::INT_TensorView::get_data_ptr() const
{
return m_aligned_buffer_pool;
}
void runtime::interpreter::INT_TensorView::write(const void* source, size_t tensor_offset, size_t n)
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("write access past end of tensor");
}
char* target = get_data_ptr();
memcpy(&target[tensor_offset], source, n);
}
void runtime::interpreter::INT_TensorView::read(void* target, size_t tensor_offset, size_t n) const
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("read access past end of tensor");
}
const char* source = get_data_ptr();
memcpy(target, &source[tensor_offset], n);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
class INT_TensorView;
}
}
}
class ngraph::runtime::interpreter::INT_TensorView : public ngraph::runtime::TensorView
{
public:
INT_TensorView(const ngraph::element::Type& element_type,
const Shape& shape,
const std::string& name);
virtual ~INT_TensorView();
char* get_data_ptr();
const char* get_data_ptr() const;
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
private:
char* m_allocated_buffer_pool;
char* m_aligned_buffer_pool;
size_t m_buffer_size;
};
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void constant(T* arg0, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
out[i] = arg0[i];
}
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <Eigen/Dense>
#include "ngraph/common.hpp"
namespace ngraph
{
namespace runtime
{
class TensorViewInfo;
class CallFrame;
namespace kernel
{
using DynamicStrides = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
using VectorStrides = Eigen::Stride<Eigen::Dynamic, 1>;
template <typename T>
using DynamicArray = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>;
template <typename T>
using EigenArrayBase = Eigen::Map<DynamicArray<T>, 0, DynamicStrides>;
template <typename T>
using DynamicMatrix = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename T>
using EigenMatrixBase = Eigen::Map<DynamicMatrix<T>, 0, DynamicStrides>;
template <typename T>
using DynamicVector = Eigen::Matrix<T, Eigen::Dynamic, 1>;
template <typename T>
using EigenVectorBase = Eigen::Map<DynamicVector<T>, 0, VectorStrides>;
namespace fmt
{
/// @brief vector format for Eigen wrappers.
class V
{
public:
V(size_t s)
: l0(s)
{
}
public:
size_t l0;
size_t l1{1};
size_t s0{1};
size_t s1{1};
};
class M
{
public:
M(const Shape& shape, const Strides& strides)
: l0(shape.at(0))
, l1(shape.at(1))
, s0(strides.at(0))
, s1(strides.at(1))
{
}
public:
size_t l0;
size_t l1;
size_t s0;
size_t s1;
};
}
// T element type
// FMT array format (fmt::V for vector, etc.)
// BASE select array/matrix
template <typename T, typename FMT, typename BASE, typename STRIDES = DynamicStrides>
class EigenWrapper : public BASE
{
using base = BASE;
public:
EigenWrapper(T* t, const FMT& fmt)
: base(t, fmt.l0, fmt.l1, STRIDES(fmt.s0, fmt.s1))
{
}
template <typename U>
EigenWrapper& operator=(const U& other)
{
this->base::operator=(other);
return *this;
}
};
template <typename T, typename FMT = fmt::V>
using EigenArray1d = EigenWrapper<T, FMT, EigenArrayBase<T>>;
template <typename T, typename FMT = fmt::M>
using EigenArray2d = EigenWrapper<T, FMT, EigenArrayBase<T>>;
template <typename T, typename FMT = fmt::M>
using EigenMatrix = EigenWrapper<T, FMT, EigenMatrixBase<T>>;
template <typename T, typename FMT = fmt::V>
using EigenVector = EigenWrapper<T, FMT, EigenVectorBase<T>, VectorStrides>;
}
}
}
......@@ -14,6 +14,7 @@
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/common.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
......@@ -40,8 +41,28 @@ const ngraph::Shape& TensorView::get_shape() const
return m_descriptor->get_tensor_view_type()->get_shape();
}
const ngraph::Strides& TensorView::get_strides() const
{
return m_descriptor->get_tensor_view_layout()->get_strides();
}
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
TensorView::get_tensor_view_layout() const
{
return m_descriptor->get_tensor_view_layout();
}
size_t TensorView::get_element_count() const
{
size_t rc = 1;
for (size_t s : get_shape())
{
rc *= s;
}
return rc;
}
const ngraph::descriptor::Tensor& TensorView::get_tensor() const
{
return get_tensor_view_descriptor()->get_tensor();
}
......@@ -54,6 +54,9 @@ namespace ngraph
const std::shared_ptr<Value>& value) const override;
const ngraph::Shape& get_shape() const;
const ngraph::Strides& get_strides() const;
size_t get_element_count() const;
const ngraph::descriptor::Tensor& get_tensor() const;
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
get_tensor_view_layout() const;
......
......@@ -21,7 +21,7 @@
using namespace ngraph;
const element::Type element::boolean(8, false, false, "char");
const element::Type element::boolean(8, false, true, "char");
const element::Type element::f32(32, true, true, "float");
const element::Type element::f64(64, true, true, "double");
const element::Type element::i8(8, false, true, "int8_t");
......@@ -33,6 +33,14 @@ const element::Type element::u16(16, false, false, "uint16_t");
const element::Type element::u32(32, false, false, "uint32_t");
const element::Type element::u64(64, false, false, "uint64_t");
element::Type::Type()
: m_bitwidth{0}
, m_is_real{0}
, m_is_signed{0}
, m_cname{}
{
}
element::Type::Type(size_t bitwidth, bool is_real, bool is_signed, const std::string& cname)
: m_bitwidth{bitwidth}
, m_is_real{is_real}
......
......@@ -48,9 +48,10 @@ namespace ngraph
class Type
{
public:
Type() = delete;
Type();
Type(const Type&) = default;
Type(size_t bitwidth, bool is_real, bool is_signed, const std::string& cname);
Type& operator=(const Type&) = default;
virtual ~Type() {}
const std::string& c_type_string() const;
size_t size() const;
......@@ -66,11 +67,10 @@ namespace ngraph
/// Returns true if the type is floating point, else false.
bool get_is_real() const { return m_is_real; }
private:
static std::map<std::string, Type> m_element_list;
size_t m_bitwidth;
bool m_is_real;
bool m_is_signed;
const std::string m_cname;
std::string m_cname;
};
template <typename T>
......
......@@ -22,7 +22,6 @@ include_directories(
)
set (SRC
autodiff.cpp
builder.cpp
builder_autobroadcast.cpp
build_graph.cpp
......@@ -54,7 +53,9 @@ set (SRC
# and replace BACKEND_NAME_GOES_HERE with your backend name.
# The code for the unit test suite is in test/backend_test.in.cpp
#================================================================================================
# TODO add interpreter back to unit tests when it works
set(BACKEND_NAMES ${BACKEND_NAMES} "NGVM")
set(BACKEND_NAMES ${BACKEND_NAMES} "INTERPRETER")
if(MKLDNN_INCLUDE_DIR)
include_directories(SYSTEM ${MKLDNN_INCLUDE_DIR})
......@@ -80,8 +81,16 @@ endif()
foreach(BACKEND_NAME ${BACKEND_NAMES})
configure_file(backend_test.in.cpp backend_test_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/backend_test_${BACKEND_NAME}.cpp)
message(STATUS "Adding unit test for backend ${BACKEND_NAME}")
endforeach()
# <special case>
# This is a special case as NGVM is the only backend that can run these tests
set(BACKEND_NAME "NGVM")
configure_file(autodiff.in.cpp autodiff_NGVM.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/autodiff_NGVM.cpp)
# </special cast>
include_directories(".")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCURDIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}\\\"")
......
......@@ -94,9 +94,9 @@ bool autodiff_numeric_compare_selective(
return test::all_close(results_num, results_sym, rtol, atol);
}
TEST(backwards, abs)
TEST(${BACKEND_NAME}, backwards_abs)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
// The numeric derivative and the symbolic one may disagree around 0, so we will dance around
......@@ -125,9 +125,9 @@ TEST(backwards, abs)
}
}
TEST(backwards, add)
TEST(${BACKEND_NAME}, backwards_add)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -145,9 +145,9 @@ TEST(backwards, add)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, add_nested)
TEST(${BACKEND_NAME}, backwards_add_nested)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -165,9 +165,9 @@ TEST(backwards, add_nested)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, broadcast0)
TEST(${BACKEND_NAME}, backwards_broadcast0)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -183,9 +183,9 @@ TEST(backwards, broadcast0)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, broadcast1)
TEST(${BACKEND_NAME}, backwards_broadcast1)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -201,9 +201,9 @@ TEST(backwards, broadcast1)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, concat_vector)
TEST(${BACKEND_NAME}, backwards_concat_vector)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -229,9 +229,9 @@ TEST(backwards, concat_vector)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1, x2}, .01f, .01f));
}
TEST(backwards, concat_axis_0)
TEST(${BACKEND_NAME}, backwards_concat_axis_0)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -257,9 +257,9 @@ TEST(backwards, concat_axis_0)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1, x2}, .01f, .01f));
}
TEST(backwards, concat_axis_1)
TEST(${BACKEND_NAME}, backwards_concat_axis_1)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -285,9 +285,9 @@ TEST(backwards, concat_axis_1)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1, x2}, .01f, .01f));
}
TEST(backwards, ceiling)
TEST(${BACKEND_NAME}, backwards_ceiling)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
// The numeric derivative and the symbolic one may disagree near integers, so we will dance around
......@@ -322,9 +322,9 @@ TEST(backwards, ceiling)
}
}
TEST(backwards, cos)
TEST(${BACKEND_NAME}, backwards_cos)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -343,9 +343,9 @@ TEST(backwards, cos)
}
}
TEST(backwards, cosh)
TEST(${BACKEND_NAME}, backwards_cosh)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -364,9 +364,9 @@ TEST(backwards, cosh)
}
}
TEST(backwards, divide)
TEST(${BACKEND_NAME}, backwards_divide)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -389,9 +389,9 @@ TEST(backwards, divide)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x2}, .01f, .01f));
}
TEST(backwards, dot_scalar_scalar)
TEST(${BACKEND_NAME}, backwards_dot_scalar_scalar)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -411,9 +411,9 @@ TEST(backwards, dot_scalar_scalar)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_scalar_tensor)
TEST(${BACKEND_NAME}, backwards_dot_scalar_tensor)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -433,9 +433,9 @@ TEST(backwards, dot_scalar_tensor)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor_scalar)
TEST(${BACKEND_NAME}, backwards_dot_tensor_scalar)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -455,9 +455,9 @@ TEST(backwards, dot_tensor_scalar)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_vector_vector)
TEST(${BACKEND_NAME}, backwards_dot_vector_vector)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -477,9 +477,9 @@ TEST(backwards, dot_vector_vector)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor_vector)
TEST(${BACKEND_NAME}, backwards_dot_tensor_vector)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -499,9 +499,9 @@ TEST(backwards, dot_tensor_vector)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor2_tensor2)
TEST(${BACKEND_NAME}, backwards_dot_tensor2_tensor2)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -521,9 +521,9 @@ TEST(backwards, dot_tensor2_tensor2)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, exp)
TEST(${BACKEND_NAME}, backwards_exp)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -538,9 +538,9 @@ TEST(backwards, exp)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, floor)
TEST(${BACKEND_NAME}, backwards_floor)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
// The numeric derivative and the symbolic one may disagree near integers, so we will dance around
......@@ -575,9 +575,9 @@ TEST(backwards, floor)
}
}
TEST(backwards, log)
TEST(${BACKEND_NAME}, backwards_log)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(1.0f, 2.0f);
......@@ -592,9 +592,9 @@ TEST(backwards, log)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, maximum)
TEST(${BACKEND_NAME}, backwards_maximum)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -613,9 +613,9 @@ TEST(backwards, maximum)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, minimum)
TEST(${BACKEND_NAME}, backwards_minimum)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -634,9 +634,9 @@ TEST(backwards, minimum)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, multiply)
TEST(${BACKEND_NAME}, backwards_multiply)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -654,9 +654,9 @@ TEST(backwards, multiply)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, negative)
TEST(${BACKEND_NAME}, backwards_negative)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -670,9 +670,9 @@ TEST(backwards, negative)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, parameter)
TEST(${BACKEND_NAME}, backwards_parameter)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -685,9 +685,9 @@ TEST(backwards, parameter)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, power)
TEST(${BACKEND_NAME}, backwards_power)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng_neg(-5.0f, -0.5f);
......@@ -727,9 +727,9 @@ TEST(backwards, power)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, replace_slice)
TEST(${BACKEND_NAME}, backwards_replace_slice)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -754,9 +754,9 @@ TEST(backwards, replace_slice)
}
}
TEST(backwards, reshape)
TEST(${BACKEND_NAME}, backwards_reshape)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -772,9 +772,9 @@ TEST(backwards, reshape)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, select)
TEST(${BACKEND_NAME}, backwards_select)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -806,9 +806,9 @@ TEST(backwards, select)
}
}
TEST(backwards, select_nested)
TEST(${BACKEND_NAME}, backwards_select_nested)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -840,9 +840,9 @@ TEST(backwards, select_nested)
}
}
TEST(backwards, sign)
TEST(${BACKEND_NAME}, backwards_sign)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
// The numeric derivative and the symbolic one may disagree around 0, so we will dance around
......@@ -871,9 +871,9 @@ TEST(backwards, sign)
}
}
TEST(backwards, sin)
TEST(${BACKEND_NAME}, backwards_sin)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -892,9 +892,9 @@ TEST(backwards, sin)
}
}
TEST(backwards, sinh)
TEST(${BACKEND_NAME}, backwards_sinh)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -913,9 +913,9 @@ TEST(backwards, sinh)
}
}
TEST(backwards, slice)
TEST(${BACKEND_NAME}, backwards_slice)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -935,9 +935,9 @@ TEST(backwards, slice)
}
}
TEST(backwards, sqrt)
TEST(${BACKEND_NAME}, backwards_sqrt)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
// Deriv has an asymptote at 0 so we'll stay away from there.
......@@ -957,9 +957,9 @@ TEST(backwards, sqrt)
}
}
TEST(backwards, subtract)
TEST(${BACKEND_NAME}, backwards_subtract)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -977,9 +977,9 @@ TEST(backwards, subtract)
autodiff_numeric_compare<float>(manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, sum_v2s)
TEST(${BACKEND_NAME}, backwards_sum_v2s)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -995,9 +995,9 @@ TEST(backwards, sum_v2s)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
}
TEST(backwards, sum_m2s)
TEST(${BACKEND_NAME}, backwards_sum_m2s)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -1013,9 +1013,9 @@ TEST(backwards, sum_m2s)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
}
TEST(backwards, sum_m2v_0)
TEST(${BACKEND_NAME}, backwards_sum_m2v_0)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -1031,9 +1031,9 @@ TEST(backwards, sum_m2v_0)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
}
TEST(backwards, sum_m2v_1)
TEST(${BACKEND_NAME}, backwards_sum_m2v_1)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......@@ -1049,9 +1049,9 @@ TEST(backwards, sum_m2v_1)
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
}
TEST(backwards, tan)
TEST(${BACKEND_NAME}, backwards_tan)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
auto pi = 3.14159f;
......@@ -1083,9 +1083,9 @@ TEST(backwards, tan)
}
}
TEST(backwards, tanh)
TEST(${BACKEND_NAME}, backwards_tanh)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-10.0f, 10.0f);
......@@ -1104,9 +1104,9 @@ TEST(backwards, tanh)
}
}
TEST(backwards, abc)
TEST(${BACKEND_NAME}, backwards_abc)
{
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
test::Uniform<float> rng(-1.0f, 1.0f);
......
......@@ -31,6 +31,36 @@ static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
tv->write(data.data(), 0, data_size);
}
TEST(${BACKEND_NAME}, ab)
{
using f32 = element::Float32;
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(f32::element_type(), shape);
auto B = make_shared<op::Parameter>(f32::element_type(), shape);
auto rt = make_shared<TensorViewType>(f32::element_type(), shape);
auto f = make_shared<Function>(A + B, rt, op::Parameters{A, B});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
shared_ptr<runtime::TensorView> a =
backend->make_primary_tensor_view(f32::element_type(), shape);
shared_ptr<runtime::TensorView> b =
backend->make_primary_tensor_view(f32::element_type(), shape);
shared_ptr<runtime::TensorView> result =
backend->make_primary_tensor_view(f32::element_type(), shape);
copy_data(a, runtime::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, runtime::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
cf->call({a, b}, {result});
EXPECT_EQ(*result, (runtime::NDArray<float, 2>({{6, 8}, {10, 12}})));
}
TEST(${BACKEND_NAME}, abc)
{
using f32 = element::Float32;
......@@ -104,135 +134,6 @@ TEST(${BACKEND_NAME}, abc_int64)
EXPECT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector<int64_t>());
}
// Same as abc, but using tuples for input and output
TEST(${BACKEND_NAME}, abc_tuple)
{
auto shape = Shape{2, 2};
auto tensor_view_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto ABC = make_shared<op::Parameter>(
make_shared<TupleType>(ValueTypes{tensor_view_type, tensor_view_type, tensor_view_type}));
auto A = make_shared<op::GetTupleElement>(ABC, 0);
auto B = make_shared<op::GetTupleElement>(ABC, 1);
auto C = make_shared<op::GetTupleElement>(ABC, 2);
auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(b, vector<float>{5, 6, 7, 8});
auto c = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(c, vector<float>{9, 10, 11, 12});
auto abc = runtime::make_tuple({a, b, c});
auto bac = runtime::make_tuple({b, a, c});
auto acb = runtime::make_tuple({a, c, b});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto result_tuple = runtime::make_tuple({result});
cf->call({abc}, {result_tuple});
ASSERT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
cf->call({bac}, {result_tuple});
ASSERT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector<float>());
cf->call({acb}, {result_tuple});
ASSERT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector<float>());
}
// Same as abc, but using tuples for input and output
TEST(${BACKEND_NAME}, abc_tuple_int64)
{
auto shape = Shape{2, 2};
auto tensor_view_type = make_shared<TensorViewType>(element::Int64::element_type(), shape);
auto ABC = make_shared<op::Parameter>(
make_shared<TupleType>(ValueTypes{tensor_view_type, tensor_view_type, tensor_view_type}));
auto A = make_shared<op::GetTupleElement>(ABC, 0);
auto B = make_shared<op::GetTupleElement>(ABC, 1);
auto C = make_shared<op::GetTupleElement>(ABC, 2);
auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
copy_data(a, vector<element::Int64::type>{1, 2, 3, 4});
auto b = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
copy_data(b, vector<element::Int64::type>{5, 6, 7, 8});
auto c = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
copy_data(c, vector<element::Int64::type>{9, 10, 11, 12});
auto abc = runtime::make_tuple({a, b, c});
auto bac = runtime::make_tuple({b, a, c});
auto acb = runtime::make_tuple({a, c, b});
auto result = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
auto result_tuple = runtime::make_tuple({result});
cf->call({abc}, {result_tuple});
ASSERT_EQ((vector<element::Int64::type>{54, 80, 110, 144}),
result->get_vector<element::Int64::type>());
cf->call({bac}, {result_tuple});
ASSERT_EQ((vector<element::Int64::type>{54, 80, 110, 144}),
result->get_vector<element::Int64::type>());
cf->call({acb}, {result_tuple});
ASSERT_EQ((vector<element::Int64::type>{50, 72, 98, 128}),
result->get_vector<element::Int64::type>());
}
// Multiple retrive values
TEST(${BACKEND_NAME}, tuple_result)
{
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto A_add_B = make_shared<op::Add>(A, B);
auto A_add_B_mul_C = make_shared<op::Multiply>(A_add_B, C);
auto rt = make_shared<TupleType>(std::vector<shared_ptr<const ValueType>>(
{make_shared<TensorViewType>(element::Float32::element_type(), shape),
make_shared<TensorViewType>(element::Float32::element_type(), shape)}));
auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{A_add_B, A_add_B_mul_C}), rt, op::Parameters{A, B, C});
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(b, vector<float>{5, 6, 7, 8});
auto c = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
copy_data(c, vector<float>{9, 10, 11, 12});
auto r0 = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto r1 = backend->make_primary_tensor_view(element::Float32::element_type(), shape);
auto result_tuple = runtime::make_tuple({r0, r1});
cf->call({a, b, c}, {result_tuple});
ASSERT_EQ((vector<float>{6, 8, 10, 12}), r0->get_vector<float>());
ASSERT_EQ((vector<float>{54, 80, 110, 144}), r1->get_vector<float>());
}
TEST(${BACKEND_NAME}, abs)
{
auto shape = Shape{2, 2};
......@@ -261,7 +162,7 @@ TEST(${BACKEND_NAME}, ceiling)
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Ceiling>(A), result_type, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -527,7 +428,7 @@ TEST(${BACKEND_NAME}, floor)
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Floor>(A), result_type, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -3069,6 +2970,7 @@ TEST(${BACKEND_NAME}, sum_matrix_rows_zero)
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
copy_data(a, vector<float>{});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
cf->call({a}, {result});
ASSERT_EQ((vector<float>{0, 0, 0}), result->get_vector<float>());
......@@ -3096,6 +2998,7 @@ TEST(${BACKEND_NAME}, sum_matrix_cols_zero)
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
copy_data(a, vector<float>{});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
copy_data(result, vector<float>({3, 3}));
cf->call({a}, {result});
ASSERT_EQ((vector<float>{0, 0}), result->get_vector<float>());
......@@ -3122,6 +3025,7 @@ TEST(${BACKEND_NAME}, sum_vector_zero)
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
copy_data(a, vector<float>{});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
copy_data(result, vector<float>({3}));
cf->call({a}, {result});
ASSERT_EQ((vector<float>{0}), result->get_vector<float>());
......@@ -3148,6 +3052,7 @@ TEST(${BACKEND_NAME}, sum_matrix_to_scalar_zero_by_zero)
auto a = backend->make_primary_tensor_view(element::Float32::element_type(), shape_a);
copy_data(a, vector<float>{});
auto result = backend->make_primary_tensor_view(element::Float32::element_type(), shape_rt);
copy_data(result, vector<float>({3}));
cf->call({a}, {result});
ASSERT_EQ((vector<float>{0}), result->get_vector<float>());
......@@ -3377,7 +3282,7 @@ TEST(${BACKEND_NAME}, sqrt)
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Sqrt>(A), result_type, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment