Commit 025a1b92 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

New Interpreter backend (#287)

* New Interpreter backend

* PR review comments

* More RP fixes

* oops

* make autodiff tests backend aware

* wip

* wip

* more ops

* wip

* fix merge error

* merge fixes
parent f0810b5f
......@@ -22,6 +22,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-weak-vtables")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-global-constructors")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-exit-time-destructors")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-prototypes")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-noreturn")
# # should remove these
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast")
......
......@@ -85,6 +85,12 @@ set (SRC
pattern/matcher.cpp
runtime/backend.cpp
runtime/manager.cpp
runtime/interpreter/int_call_frame.cpp
runtime/interpreter/int_backend.cpp
runtime/interpreter/int_manager.cpp
runtime/interpreter/int_kernels.cpp
runtime/interpreter/int_external_function.cpp
runtime/interpreter/int_tensor_view.cpp
runtime/ngvm/call_frame.cpp
runtime/ngvm/external_function.cpp
runtime/ngvm/ngvm_backend.cpp
......
......@@ -40,7 +40,7 @@ namespace ngraph
size_t get_offset() const { return m_offset; }
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const { return m_strides; }
const Strides& get_strides() const override { return m_strides; }
virtual bool operator==(const TensorViewLayout& other) const override;
protected:
......
......@@ -28,6 +28,7 @@ const ngraph::element::Type& TensorViewLayout::get_element_type() const
{
return m_tensor_view_type->get_element_type();
}
const ngraph::Shape& TensorViewLayout::get_shape() const
{
return m_tensor_view_type->get_shape();
......
......@@ -56,6 +56,7 @@ namespace ngraph
const element::Type& get_element_type() const;
const Shape& get_shape() const;
virtual const Strides& get_strides() const = 0;
/// Where this view is located in the buffer.
const BufferPos& get_buffer_pos() const { return m_buffer_pos; }
BufferPos& get_buffer_pos() { return m_buffer_pos; }
......
......@@ -1640,6 +1640,54 @@ void Emitter::EmitOneHot(const ngraph::Node* n,
}
}
void Emitter::EmitCeiling(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::ceil("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
void Emitter::EmitFloor(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::floor("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
void Emitter::EmitSqrt(const ngraph::Node* n,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
TU << "{ // " << n->get_name() << "\n";
TU.indent++;
size_t element_count = outputs[0].get_tensor_view_layout()->get_size();
TU << "for (size_t i = 0; i < " << element_count << "; i++)\n";
TU << "{\n";
TU << " " << outputs[0].get_tensor().get_name() << "[i] = std::sqrt("
<< inputs[0].get_tensor().get_name() << "[i]);\n";
TU << "}\n";
TU.indent--;
TU << "}\n";
}
//------------------------------------------------------------------------------------------------
// Utility methods
//------------------------------------------------------------------------------------------------
......
......@@ -96,6 +96,9 @@ namespace ngraph
void EMITTER_DECL(EmitPower);
void EMITTER_DECL(EmitReplaceSlice);
void EMITTER_DECL(EmitOneHot);
void EMITTER_DECL(EmitFloor);
void EMITTER_DECL(EmitCeiling);
void EMITTER_DECL(EmitSqrt);
private:
void generate_call(const std::vector<TensorViewInfo>& inputs,
......
......@@ -35,6 +35,7 @@
#include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp"
......@@ -44,6 +45,7 @@
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/floor.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
......@@ -66,6 +68,7 @@
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/sqrt.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp"
......@@ -160,6 +163,9 @@ static const OpMap dispatcher{
{TI(ngraph::op::Atan), &Emitter::EmitAtan},
{TI(ngraph::op::ReplaceSlice), &Emitter::EmitReplaceSlice},
{TI(ngraph::op::OneHot), &Emitter::EmitOneHot},
{TI(ngraph::op::Floor), &Emitter::EmitFloor},
{TI(ngraph::op::Ceiling), &Emitter::EmitCeiling},
{TI(ngraph::op::Sqrt), &Emitter::EmitSqrt},
};
ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace ngraph;
using namespace std;
shared_ptr<runtime::CallFrame> runtime::interpreter::INT_Backend::make_call_frame(
const shared_ptr<ExternalFunction>& external_function)
{
return external_function->make_call_frame();
}
shared_ptr<runtime::TensorView>
runtime::interpreter::INT_Backend::make_primary_tensor_view(const element::Type& element_type,
const Shape& shape)
{
auto rc = make_shared<runtime::interpreter::INT_TensorView>(element_type, shape, "external");
return static_pointer_cast<runtime::TensorView>(rc);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
static size_t alignment = 64;
class INT_Backend : public runtime::Backend
{
public:
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame(
const std::shared_ptr<ngraph::runtime::ExternalFunction>& external_function)
override;
std::shared_ptr<ngraph::runtime::TensorView>
make_primary_tensor_view(const ngraph::element::Type& element_type,
const Shape& shape) override;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace std;
using namespace ngraph;
runtime::interpreter::INT_CallFrame::INT_CallFrame(shared_ptr<ExternalFunction> external_function,
shared_ptr<Function> func)
: m_external_function(external_function)
, m_function(func)
{
}
void runtime::interpreter::INT_CallFrame::call(
std::shared_ptr<Function> function,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& input_tvs,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& output_tvs)
{
unordered_map<string, shared_ptr<runtime::interpreter::INT_TensorView>> tensor_map;
const std::vector<std::shared_ptr<op::Parameter>>& params = function->get_parameters();
for (size_t i = 0; i < input_tvs.size(); i++)
{
string name = params[i]->get_name();
tensor_map.insert({name, input_tvs[i]});
}
for (size_t i = 0; i < output_tvs.size(); i++)
{
string name = function->get_result()->get_name();
tensor_map.insert({name, output_tvs[i]});
}
// Invoke computation
for (shared_ptr<Node> op : function->get_ordered_ops())
{
vector<shared_ptr<runtime::interpreter::INT_TensorView>> inputs;
vector<shared_ptr<runtime::interpreter::INT_TensorView>> outputs;
element::Type base_type;
if (op->get_inputs().empty())
{
base_type = op->get_element_type();
}
else
{
base_type = op->get_inputs().at(0).get_tensor().get_element_type();
}
element::Type secondary_type = op->get_element_type();
// Some ops have unusual intput/output types so handle those special cases here
if (op->description() == "Select")
{
base_type = op->get_inputs().at(1).get_tensor().get_element_type();
secondary_type = op->get_inputs().at(0).get_tensor().get_element_type();
}
for (const descriptor::Input& input : op->get_inputs())
{
string name = input.get_output().get_node()->get_name();
shared_ptr<runtime::interpreter::INT_TensorView> tv = tensor_map.at(name);
inputs.push_back(tv);
// NGRAPH_INFO << "Op Inputs " << name;
}
for (descriptor::Output& output : op->get_outputs())
{
string name = output.get_node()->get_name();
shared_ptr<runtime::interpreter::INT_TensorView> tv;
if (!contains_key(tensor_map, name))
{
// The output tensor is not in the tensor map so create a new tensor
const Shape& shape = output.get_tensor_view_type()->get_shape();
element::Type element_type = output.get_tensor_view_type()->get_element_type();
string tensor_name = output.get_tensor().get_name();
tv = make_shared<runtime::interpreter::INT_TensorView>(
element_type, shape, tensor_name);
tensor_map.insert({name, tv});
}
else
{
tv = tensor_map.at(name);
}
outputs.push_back(tv);
// NGRAPH_INFO << "Op Outputs " << name;
}
if (base_type == element::boolean)
{
generate_calls<char>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::f32)
{
generate_calls<float>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::f64)
{
generate_calls<double>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i8)
{
generate_calls<int8_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i16)
{
generate_calls<int16_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i32)
{
generate_calls<int32_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::i64)
{
generate_calls<int64_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u8)
{
generate_calls<uint8_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u16)
{
generate_calls<uint16_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u32)
{
generate_calls<uint32_t>(secondary_type, *op, inputs, outputs);
}
else if (base_type == element::u64)
{
generate_calls<uint64_t>(secondary_type, *op, inputs, outputs);
}
else
{
stringstream ss;
ss << "unsupported element type " << base_type << " op " << op->get_name();
throw runtime_error(ss.str());
}
// Delete any obsolete tensors
for (const descriptor::Tensor* t : op->liveness_free_list)
{
for (auto it = tensor_map.begin(); it != tensor_map.end(); ++it)
{
if (it->second->get_tensor().get_name() == t->get_name())
{
tensor_map.erase(it);
break;
}
}
}
}
}
void runtime::interpreter::INT_CallFrame::tensor_call(
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& input_tvs,
const vector<shared_ptr<runtime::interpreter::INT_TensorView>>& output_tvs)
{
call(m_function, input_tvs, output_tvs);
}
void runtime::interpreter::INT_CallFrame::tensor_call(
const vector<shared_ptr<runtime::TensorView>>& input_tvs,
const vector<shared_ptr<runtime::TensorView>>& output_tvs)
{
vector<shared_ptr<runtime::interpreter::INT_TensorView>> args;
vector<shared_ptr<runtime::interpreter::INT_TensorView>> out;
for (auto tv : input_tvs)
{
args.push_back(static_pointer_cast<runtime::interpreter::INT_TensorView>(tv));
}
for (auto tv : output_tvs)
{
out.push_back(static_pointer_cast<runtime::interpreter::INT_TensorView>(tv));
}
tensor_call(args, out);
}
void runtime::interpreter::INT_CallFrame::call(const vector<shared_ptr<runtime::Value>>& arguments,
const vector<shared_ptr<runtime::Value>>& results)
{
vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::Value> argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::Value> result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(inputs, outputs);
}
This diff is collapsed.
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <fstream>
#include <memory>
#include <string>
#include <tuple>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/abs.hpp"
#include "ngraph/ops/acos.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp"
#include "ngraph/ops/cos.hpp"
#include "ngraph/ops/cosh.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/exp.hpp"
#include "ngraph/ops/function_call.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/greater_eq.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/sign.hpp"
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp"
#include "ngraph/ops/tanh.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/pass/assign_layout.hpp"
#include "ngraph/pass/dump_sorted.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/interpreter/int_external_function.hpp"
#include "ngraph/runtime/utils.hpp"
using namespace std;
using namespace ngraph;
static const string s_output_dir = "cpu_codegen";
class StaticInitializers
{
public:
StaticInitializers() { file_util::remove_directory(s_output_dir); }
};
static StaticInitializers s_static_initializers;
using descriptor::layout::DenseTensorViewLayout;
runtime::interpreter::ExternalFunction::ExternalFunction(const shared_ptr<Function>& function,
bool release_function)
: runtime::ExternalFunction(function, release_function)
, m_function(function)
{
}
void runtime::interpreter::ExternalFunction::compile()
{
if (m_is_compiled)
{
return;
}
string function_name = m_function->get_name();
string dump_filename = file_util::path_join(s_output_dir, function_name + "_ops.txt");
pass::Manager pass_manager;
pass_manager.register_pass<pass::TopologicalSort>();
// For now, just make everyone row-major.
pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>();
pass_manager.register_pass<pass::Liveness>();
pass_manager.run_passes(m_function);
m_is_compiled = true;
if (m_release_function)
{
release_function();
}
}
shared_ptr<runtime::CallFrame> runtime::interpreter::ExternalFunction::make_call_frame()
{
if (!m_is_compiled)
{
compile();
}
return make_shared<runtime::interpreter::INT_CallFrame>(shared_from_this(), m_function);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <functional>
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
class ExternalFunction : public ngraph::runtime::ExternalFunction,
public std::enable_shared_from_this<ExternalFunction>
{
public:
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
protected:
std::shared_ptr<ngraph::Function> m_function;
void compile();
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/runtime/interpreter/int_kernels.hpp"
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cstddef>
#include <cstdint>
// CBLAS types and wrappers
namespace cblas
{
enum class Layout
{
RowMajor = 101,
ColMajor = 102
};
enum class Transpose
{
None = 111,
Transpose = 112,
ConjTrans = 113
};
enum class UpperLower
{
Upper = 121,
Lower = 122
};
enum class Diag
{
NonUnit = 131,
Unit = 132
};
enum class Side
{
Left = 141,
Right = 142
};
enum class Storage
{
Packed = 151
};
enum class Ident
{
AMatrix = 161,
BMatrix = 162
};
enum class Offset
{
RowOffset = 171,
ColOffset = 172,
FixOffset = 173
};
extern "C" {
void cblas_sgemm(const Layout layout,
const Transpose TransA,
const Transpose TransB,
const int64_t M,
const int64_t N,
const int64_t K,
const float alpha,
const float* A,
const int64_t lda,
const float* B,
const int64_t ldb,
const float beta,
float* C,
const int64_t ldc);
}
}
namespace mkl
{
extern "C" {
void MKL_Somatcopy(char ordering,
char trans,
size_t rows,
size_t cols,
const float alpha,
const float* A,
size_t lda,
float* B,
size_t ldb);
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_external_function.hpp"
#include "ngraph/runtime/interpreter/int_manager.hpp"
using namespace ngraph;
using namespace std;
shared_ptr<runtime::Backend> runtime::interpreter::INT_Manager::allocate_backend()
{
return make_shared<INT_Backend>();
}
shared_ptr<runtime::ExternalFunction>
runtime::interpreter::INT_Manager::compile(const shared_ptr<Function>& fun)
{
return make_shared<ExternalFunction>(fun);
}
runtime::Manager::Factory runtime::interpreter::INT_Manager::factory =
runtime::Manager::register_factory("INTERPRETER",
[](const string& name) -> shared_ptr<runtime::Manager> {
return make_shared<INT_Manager>();
});
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/codegen/execution_engine.hpp"
#include "ngraph/runtime/manager.hpp"
namespace ngraph
{
class Function;
namespace runtime
{
class ExternalFunction;
namespace interpreter
{
/// @brief Transformer for the interpreted backend
class INT_Manager : public Manager
{
protected:
ngraph::codegen::ExecutionEngine exec_state;
public:
virtual std::shared_ptr<Backend> allocate_backend() override;
virtual std::shared_ptr<ngraph::runtime::ExternalFunction>
compile(const std::shared_ptr<ngraph::Function>& fun) override;
static Factory factory;
};
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_tensor_view.hpp"
using namespace ngraph;
using namespace std;
runtime::interpreter::INT_TensorView::INT_TensorView(const element::Type& element_type,
const Shape& shape,
const string& name)
: runtime::TensorView(std::make_shared<descriptor::PrimaryTensorView>(
std::make_shared<TensorViewType>(element_type, shape), name, true, true, false))
, m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_view_layout(
std::make_shared<descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
if (m_buffer_size > 0)
{
size_t allocation_size = m_buffer_size + runtime::interpreter::alignment;
m_allocated_buffer_pool = static_cast<char*>(malloc(allocation_size));
m_aligned_buffer_pool = m_allocated_buffer_pool;
size_t mod = size_t(m_aligned_buffer_pool) % alignment;
if (mod != 0)
{
m_aligned_buffer_pool += (alignment - mod);
}
}
}
runtime::interpreter::INT_TensorView::~INT_TensorView()
{
if (m_allocated_buffer_pool != nullptr)
{
free(m_allocated_buffer_pool);
}
}
char* runtime::interpreter::INT_TensorView::get_data_ptr()
{
return m_aligned_buffer_pool;
}
const char* runtime::interpreter::INT_TensorView::get_data_ptr() const
{
return m_aligned_buffer_pool;
}
void runtime::interpreter::INT_TensorView::write(const void* source, size_t tensor_offset, size_t n)
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("write access past end of tensor");
}
char* target = get_data_ptr();
memcpy(&target[tensor_offset], source, n);
}
void runtime::interpreter::INT_TensorView::read(void* target, size_t tensor_offset, size_t n) const
{
if (tensor_offset + n > m_buffer_size)
{
throw out_of_range("read access past end of tensor");
}
const char* source = get_data_ptr();
memcpy(target, &source[tensor_offset], n);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/types/element_type.hpp"
namespace ngraph
{
namespace runtime
{
namespace interpreter
{
class INT_TensorView;
}
}
}
class ngraph::runtime::interpreter::INT_TensorView : public ngraph::runtime::TensorView
{
public:
INT_TensorView(const ngraph::element::Type& element_type,
const Shape& shape,
const std::string& name);
virtual ~INT_TensorView();
char* get_data_ptr();
const char* get_data_ptr() const;
/// @brief Write bytes directly into the tensor
/// @param p Pointer to source of data
/// @param tensor_offset Offset into tensor storage to begin writing. Must be element-aligned.
/// @param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t tensor_offset, size_t n) override;
/// @brief Read bytes directly from the tensor
/// @param p Pointer to destination for data
/// @param tensor_offset Offset into tensor storage to begin reading. Must be element-aligned.
/// @param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t tensor_offset, size_t n) const override;
private:
char* m_allocated_buffer_pool;
char* m_aligned_buffer_pool;
size_t m_buffer_size;
};
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void constant(T* arg0, T* out, size_t count)
{
for (size_t i = 0; i < count; i++)
{
out[i] = arg0[i];
}
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <Eigen/Dense>
#include "ngraph/common.hpp"
namespace ngraph
{
namespace runtime
{
class TensorViewInfo;
class CallFrame;
namespace kernel
{
using DynamicStrides = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
using VectorStrides = Eigen::Stride<Eigen::Dynamic, 1>;
template <typename T>
using DynamicArray = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>;
template <typename T>
using EigenArrayBase = Eigen::Map<DynamicArray<T>, 0, DynamicStrides>;
template <typename T>
using DynamicMatrix = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
template <typename T>
using EigenMatrixBase = Eigen::Map<DynamicMatrix<T>, 0, DynamicStrides>;
template <typename T>
using DynamicVector = Eigen::Matrix<T, Eigen::Dynamic, 1>;
template <typename T>
using EigenVectorBase = Eigen::Map<DynamicVector<T>, 0, VectorStrides>;
namespace fmt
{
/// @brief vector format for Eigen wrappers.
class V
{
public:
V(size_t s)
: l0(s)
{
}
public:
size_t l0;
size_t l1{1};
size_t s0{1};
size_t s1{1};
};
class M
{
public:
M(const Shape& shape, const Strides& strides)
: l0(shape.at(0))
, l1(shape.at(1))
, s0(strides.at(0))
, s1(strides.at(1))
{
}
public:
size_t l0;
size_t l1;
size_t s0;
size_t s1;
};
}
// T element type
// FMT array format (fmt::V for vector, etc.)
// BASE select array/matrix
template <typename T, typename FMT, typename BASE, typename STRIDES = DynamicStrides>
class EigenWrapper : public BASE
{
using base = BASE;
public:
EigenWrapper(T* t, const FMT& fmt)
: base(t, fmt.l0, fmt.l1, STRIDES(fmt.s0, fmt.s1))
{
}
template <typename U>
EigenWrapper& operator=(const U& other)
{
this->base::operator=(other);
return *this;
}
};
template <typename T, typename FMT = fmt::V>
using EigenArray1d = EigenWrapper<T, FMT, EigenArrayBase<T>>;
template <typename T, typename FMT = fmt::M>
using EigenArray2d = EigenWrapper<T, FMT, EigenArrayBase<T>>;
template <typename T, typename FMT = fmt::M>
using EigenMatrix = EigenWrapper<T, FMT, EigenMatrixBase<T>>;
template <typename T, typename FMT = fmt::V>
using EigenVector = EigenWrapper<T, FMT, EigenVectorBase<T>, VectorStrides>;
}
}
}
......@@ -14,6 +14,7 @@
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/common.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp"
......@@ -40,8 +41,28 @@ const ngraph::Shape& TensorView::get_shape() const
return m_descriptor->get_tensor_view_type()->get_shape();
}
const ngraph::Strides& TensorView::get_strides() const
{
return m_descriptor->get_tensor_view_layout()->get_strides();
}
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
TensorView::get_tensor_view_layout() const
{
return m_descriptor->get_tensor_view_layout();
}
size_t TensorView::get_element_count() const
{
size_t rc = 1;
for (size_t s : get_shape())
{
rc *= s;
}
return rc;
}
const ngraph::descriptor::Tensor& TensorView::get_tensor() const
{
return get_tensor_view_descriptor()->get_tensor();
}
......@@ -54,6 +54,9 @@ namespace ngraph
const std::shared_ptr<Value>& value) const override;
const ngraph::Shape& get_shape() const;
const ngraph::Strides& get_strides() const;
size_t get_element_count() const;
const ngraph::descriptor::Tensor& get_tensor() const;
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
get_tensor_view_layout() const;
......
......@@ -21,7 +21,7 @@
using namespace ngraph;
const element::Type element::boolean(8, false, false, "char");
const element::Type element::boolean(8, false, true, "char");
const element::Type element::f32(32, true, true, "float");
const element::Type element::f64(64, true, true, "double");
const element::Type element::i8(8, false, true, "int8_t");
......@@ -33,6 +33,14 @@ const element::Type element::u16(16, false, false, "uint16_t");
const element::Type element::u32(32, false, false, "uint32_t");
const element::Type element::u64(64, false, false, "uint64_t");
element::Type::Type()
: m_bitwidth{0}
, m_is_real{0}
, m_is_signed{0}
, m_cname{}
{
}
element::Type::Type(size_t bitwidth, bool is_real, bool is_signed, const std::string& cname)
: m_bitwidth{bitwidth}
, m_is_real{is_real}
......
......@@ -48,9 +48,10 @@ namespace ngraph
class Type
{
public:
Type() = delete;
Type();
Type(const Type&) = default;
Type(size_t bitwidth, bool is_real, bool is_signed, const std::string& cname);
Type& operator=(const Type&) = default;
virtual ~Type() {}
const std::string& c_type_string() const;
size_t size() const;
......@@ -66,11 +67,10 @@ namespace ngraph
/// Returns true if the type is floating point, else false.
bool get_is_real() const { return m_is_real; }
private:
static std::map<std::string, Type> m_element_list;
size_t m_bitwidth;
bool m_is_real;
bool m_is_signed;
const std::string m_cname;
std::string m_cname;
};
template <typename T>
......
......@@ -22,7 +22,6 @@ include_directories(
)
set (SRC
autodiff.cpp
builder.cpp
builder_autobroadcast.cpp
build_graph.cpp
......@@ -54,7 +53,9 @@ set (SRC
# and replace BACKEND_NAME_GOES_HERE with your backend name.
# The code for the unit test suite is in test/backend_test.in.cpp
#================================================================================================
# TODO add interpreter back to unit tests when it works
set(BACKEND_NAMES ${BACKEND_NAMES} "NGVM")
set(BACKEND_NAMES ${BACKEND_NAMES} "INTERPRETER")
if(MKLDNN_INCLUDE_DIR)
include_directories(SYSTEM ${MKLDNN_INCLUDE_DIR})
......@@ -80,8 +81,16 @@ endif()
foreach(BACKEND_NAME ${BACKEND_NAMES})
configure_file(backend_test.in.cpp backend_test_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/backend_test_${BACKEND_NAME}.cpp)
message(STATUS "Adding unit test for backend ${BACKEND_NAME}")
endforeach()
# <special case>
# This is a special case as NGVM is the only backend that can run these tests
set(BACKEND_NAME "NGVM")
configure_file(autodiff.in.cpp autodiff_NGVM.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/autodiff_NGVM.cpp)
# </special cast>
include_directories(".")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCURDIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}\\\"")
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment