Commit ad6b0f07 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

using namespace flatten (#400)

* wip

* using namespace cleanup
parent 379300b7
...@@ -67,46 +67,45 @@ ...@@ -67,46 +67,45 @@
using namespace clang; using namespace clang;
using namespace llvm; using namespace llvm;
using namespace llvm::opt;
using namespace std; using namespace std;
using namespace ngraph::codegen; using namespace ngraph;
static StaticCompiler s_static_compiler; static codegen::StaticCompiler s_static_compiler;
static std::mutex m_mutex; static std::mutex m_mutex;
ngraph::codegen::Module::Module(std::unique_ptr<llvm::Module> module) codegen::Module::Module(std::unique_ptr<llvm::Module> module)
: m_module(move(module)) : m_module(move(module))
{ {
} }
ngraph::codegen::Module::~Module() codegen::Module::~Module()
{ {
} }
std::unique_ptr<llvm::Module> ngraph::codegen::Module::take_module() std::unique_ptr<llvm::Module> codegen::Module::take_module()
{ {
return move(m_module); return move(m_module);
} }
Compiler::Compiler() codegen::Compiler::Compiler()
{ {
} }
Compiler::~Compiler() codegen::Compiler::~Compiler()
{ {
} }
void Compiler::set_precompiled_header_source(const std::string& source) void codegen::Compiler::set_precompiled_header_source(const std::string& source)
{ {
s_static_compiler.set_precompiled_header_source(source); s_static_compiler.set_precompiled_header_source(source);
} }
void Compiler::add_header_search_path(const std::string& path) void codegen::Compiler::add_header_search_path(const std::string& path)
{ {
s_static_compiler.add_header_search_path(path); s_static_compiler.add_header_search_path(path);
} }
std::unique_ptr<ngraph::codegen::Module> Compiler::compile(const std::string& source) std::unique_ptr<codegen::Module> codegen::Compiler::compile(const std::string& source)
{ {
lock_guard<mutex> lock(m_mutex); lock_guard<mutex> lock(m_mutex);
return s_static_compiler.compile(m_compiler_action, source); return s_static_compiler.compile(m_compiler_action, source);
...@@ -120,7 +119,7 @@ static std::string GetExecutablePath(const char* Argv0) ...@@ -120,7 +119,7 @@ static std::string GetExecutablePath(const char* Argv0)
return llvm::sys::fs::getMainExecutable(Argv0, MainAddr); return llvm::sys::fs::getMainExecutable(Argv0, MainAddr);
} }
StaticCompiler::StaticCompiler() codegen::StaticCompiler::StaticCompiler()
: m_precompiled_header_valid(false) : m_precompiled_header_valid(false)
, m_debuginfo_enabled(false) , m_debuginfo_enabled(false)
, m_enable_diag_output((std::getenv("NGRAPH_COMPILER_DIAG_ENABLE") != nullptr)) , m_enable_diag_output((std::getenv("NGRAPH_COMPILER_DIAG_ENABLE") != nullptr))
...@@ -129,7 +128,7 @@ StaticCompiler::StaticCompiler() ...@@ -129,7 +128,7 @@ StaticCompiler::StaticCompiler()
initialize(); initialize();
} }
void StaticCompiler::initialize() void codegen::StaticCompiler::initialize()
{ {
m_extra_search_path_list.clear(); m_extra_search_path_list.clear();
#if NGCPU_DEBUGINFO #if NGCPU_DEBUGINFO
...@@ -223,7 +222,7 @@ void StaticCompiler::initialize() ...@@ -223,7 +222,7 @@ void StaticCompiler::initialize()
TO.FeaturesAsWritten.emplace_back("+fma"); TO.FeaturesAsWritten.emplace_back("+fma");
} }
StaticCompiler::~StaticCompiler() codegen::StaticCompiler::~StaticCompiler()
{ {
// This is causing a segfault after program terminates // This is causing a segfault after program terminates
// will address later // will address later
...@@ -239,10 +238,10 @@ StaticCompiler::~StaticCompiler() ...@@ -239,10 +238,10 @@ StaticCompiler::~StaticCompiler()
// } // }
} }
bool StaticCompiler::is_version_number(const string& path) bool codegen::StaticCompiler::is_version_number(const string& path)
{ {
bool rc = true; bool rc = true;
vector<string> tokens = ngraph::split(path, '.'); vector<string> tokens = split(path, '.');
for (string s : tokens) for (string s : tokens)
{ {
for (char c : s) for (char c : s)
...@@ -256,7 +255,7 @@ bool StaticCompiler::is_version_number(const string& path) ...@@ -256,7 +255,7 @@ bool StaticCompiler::is_version_number(const string& path)
return rc; return rc;
} }
void StaticCompiler::add_header_search_path(const string& path) void codegen::StaticCompiler::add_header_search_path(const string& path)
{ {
if (!contains(m_extra_search_path_list, path)) if (!contains(m_extra_search_path_list, path))
{ {
...@@ -266,9 +265,9 @@ void StaticCompiler::add_header_search_path(const string& path) ...@@ -266,9 +265,9 @@ void StaticCompiler::add_header_search_path(const string& path)
} }
} }
std::unique_ptr<ngraph::codegen::Module> std::unique_ptr<codegen::Module>
StaticCompiler::compile(std::unique_ptr<clang::CodeGenAction>& m_compiler_action, codegen::StaticCompiler::compile(std::unique_ptr<clang::CodeGenAction>& m_compiler_action,
const string& source) const string& source)
{ {
PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts(); PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts();
if (!m_precompiled_header_valid && m_precomiled_header_source.empty() == false) if (!m_precompiled_header_valid && m_precomiled_header_source.empty() == false)
...@@ -304,25 +303,25 @@ std::unique_ptr<ngraph::codegen::Module> ...@@ -304,25 +303,25 @@ std::unique_ptr<ngraph::codegen::Module>
preprocessor_options.RemappedFileBuffers.pop_back(); preprocessor_options.RemappedFileBuffers.pop_back();
unique_ptr<ngraph::codegen::Module> result; unique_ptr<codegen::Module> result;
if (rc) if (rc)
{ {
result = move(unique_ptr<ngraph::codegen::Module>(new ngraph::codegen::Module(move(rc)))); result = move(unique_ptr<codegen::Module>(new codegen::Module(move(rc))));
} }
else else
{ {
result = move(unique_ptr<ngraph::codegen::Module>(nullptr)); result = move(unique_ptr<codegen::Module>(nullptr));
} }
if (reinitialize) if (reinitialize)
{ {
StaticCompiler::initialize(); codegen::StaticCompiler::initialize();
} }
return result; return result;
} }
void StaticCompiler::generate_pch(const string& source) void codegen::StaticCompiler::generate_pch(const string& source)
{ {
PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts(); PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts();
m_pch_path = file_util::tmp_filename(); m_pch_path = file_util::tmp_filename();
...@@ -346,7 +345,7 @@ void StaticCompiler::generate_pch(const string& source) ...@@ -346,7 +345,7 @@ void StaticCompiler::generate_pch(const string& source)
delete compilerAction; delete compilerAction;
} }
void StaticCompiler::configure_search_path() void codegen::StaticCompiler::configure_search_path()
{ {
#ifdef USE_BUILTIN #ifdef USE_BUILTIN
load_headers_from_resource(); load_headers_from_resource();
...@@ -400,7 +399,7 @@ void StaticCompiler::configure_search_path() ...@@ -400,7 +399,7 @@ void StaticCompiler::configure_search_path()
#endif #endif
} }
void StaticCompiler::load_headers_from_resource() void codegen::StaticCompiler::load_headers_from_resource()
{ {
HeaderSearchOptions& hso = m_compiler->getInvocation().getHeaderSearchOpts(); HeaderSearchOptions& hso = m_compiler->getInvocation().getHeaderSearchOpts();
PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts(); PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts();
...@@ -423,7 +422,7 @@ void StaticCompiler::load_headers_from_resource() ...@@ -423,7 +422,7 @@ void StaticCompiler::load_headers_from_resource()
} }
} }
void StaticCompiler::set_precompiled_header_source(const std::string& source) void codegen::StaticCompiler::set_precompiled_header_source(const std::string& source)
{ {
m_precomiled_header_source = source; m_precomiled_header_source = source;
} }
...@@ -18,12 +18,12 @@ ...@@ -18,12 +18,12 @@
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
using namespace ngraph::descriptor::layout; using namespace ngraph;
using ngraph::Shape; using ngraph::Shape;
using ngraph::descriptor::TensorView; using ngraph::descriptor::TensorView;
using ngraph::TensorViewType; using ngraph::TensorViewType;
DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view) descriptor::layout::DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
: TensorViewLayout(tensor_view) : TensorViewLayout(tensor_view)
{ {
auto tensor_view_type = tensor_view.get_tensor_view_type(); auto tensor_view_type = tensor_view.get_tensor_view_type();
...@@ -32,7 +32,8 @@ DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view) ...@@ -32,7 +32,8 @@ DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
m_strides = ngraph::row_major_strides(shape); m_strides = ngraph::row_major_strides(shape);
} }
size_t DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indices) size_t
descriptor::layout::DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indices)
{ {
if (indices.size() != m_strides.size()) if (indices.size() != m_strides.size())
{ {
...@@ -46,7 +47,7 @@ size_t DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indice ...@@ -46,7 +47,7 @@ size_t DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indice
return result; return result;
} }
bool DenseTensorViewLayout::operator==(const TensorViewLayout& other) const bool descriptor::layout::DenseTensorViewLayout::operator==(const TensorViewLayout& other) const
{ {
const DenseTensorViewLayout* p_other = dynamic_cast<const DenseTensorViewLayout*>(&other); const DenseTensorViewLayout* p_other = dynamic_cast<const DenseTensorViewLayout*>(&other);
if (nullptr == p_other) if (nullptr == p_other)
......
...@@ -17,19 +17,19 @@ ...@@ -17,19 +17,19 @@
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
using namespace ngraph::descriptor::layout; using namespace ngraph;
TensorViewLayout::TensorViewLayout(const ngraph::descriptor::TensorView& tensor_view) descriptor::layout::TensorViewLayout::TensorViewLayout(const descriptor::TensorView& tensor_view)
: m_tensor_view_type(tensor_view.get_tensor_view_type()) : m_tensor_view_type(tensor_view.get_tensor_view_type())
{ {
} }
const ngraph::element::Type& TensorViewLayout::get_element_type() const const element::Type& descriptor::layout::TensorViewLayout::get_element_type() const
{ {
return m_tensor_view_type->get_element_type(); return m_tensor_view_type->get_element_type();
} }
const ngraph::Shape& TensorViewLayout::get_shape() const const Shape& descriptor::layout::TensorViewLayout::get_shape() const
{ {
return m_tensor_view_type->get_shape(); return m_tensor_view_type->get_shape();
} }
...@@ -18,9 +18,8 @@ ...@@ -18,9 +18,8 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
Output::Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tensor_view) descriptor::Output::Output(Node* node, size_t index, const shared_ptr<TensorView>& tensor_view)
: m_node(node) : m_node(node)
, m_index(index) , m_index(index)
, m_tensor_view(tensor_view) , m_tensor_view(tensor_view)
...@@ -28,37 +27,37 @@ Output::Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tens ...@@ -28,37 +27,37 @@ Output::Output(Node* node, size_t index, const std::shared_ptr<TensorView>& tens
} }
// Add an input to the vector of inputs that use this output. // Add an input to the vector of inputs that use this output.
void Output::add_input(Input* input) void descriptor::Output::add_input(Input* input)
{ {
m_inputs.insert(input); m_inputs.insert(input);
} }
void Output::remove_input(Input* input) void descriptor::Output::remove_input(Input* input)
{ {
m_inputs.erase(input); m_inputs.erase(input);
} }
std::shared_ptr<Node> Output::get_node() const shared_ptr<Node> descriptor::Output::get_node() const
{ {
return m_node->shared_from_this(); return m_node->shared_from_this();
} }
Tensor& Output::get_tensor() const descriptor::Tensor& descriptor::Output::get_tensor() const
{ {
return m_tensor_view->get_tensor(); return m_tensor_view->get_tensor();
} }
std::shared_ptr<const TensorViewType> Output::get_tensor_view_type() const shared_ptr<const TensorViewType> descriptor::Output::get_tensor_view_type() const
{ {
return get_tensor_view()->get_tensor_view_type(); return get_tensor_view()->get_tensor_view_type();
} }
const Shape& Output::get_shape() const const Shape& descriptor::Output::get_shape() const
{ {
return get_tensor_view_type()->get_shape(); return get_tensor_view_type()->get_shape();
} }
const element::Type& Output::get_element_type() const const element::Type& descriptor::Output::get_element_type() const
{ {
return get_tensor_view_type()->get_element_type(); return get_tensor_view_type()->get_element_type();
} }
...@@ -17,14 +17,14 @@ ...@@ -17,14 +17,14 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor; using namespace std;
Tensor::Tensor(const element::Type& element_type, descriptor::Tensor::Tensor(const element::Type& element_type,
PrimaryTensorView* primary_tensor_view, PrimaryTensorView* primary_tensor_view,
const std::string& name, const string& name,
bool is_output, bool is_output,
bool is_input, bool is_input,
bool is_constant) bool is_constant)
: m_element_type(element_type) : m_element_type(element_type)
, m_primary_tensor_view(primary_tensor_view) , m_primary_tensor_view(primary_tensor_view)
, m_is_output{is_output} , m_is_output{is_output}
...@@ -42,32 +42,32 @@ Tensor::Tensor(const element::Type& element_type, ...@@ -42,32 +42,32 @@ Tensor::Tensor(const element::Type& element_type,
m_size = size * m_element_type.size(); m_size = size * m_element_type.size();
} }
std::string Tensor::make_tensor_name(const Node* node, size_t value_index) string descriptor::Tensor::make_tensor_name(const Node* node, size_t value_index)
{ {
return node->get_node_id() + "_" + std::to_string(value_index); return node->get_node_id() + "_" + to_string(value_index);
} }
std::string Tensor::get_next_view_name() string descriptor::Tensor::get_next_view_name()
{ {
return m_name + "_TV" + std::to_string(m_next_view_id++); return m_name + "_TV" + to_string(m_next_view_id++);
} }
size_t Tensor::size() const size_t descriptor::Tensor::size() const
{ {
return m_size; return m_size;
} }
void Tensor::set_pool_offset(size_t offset) void descriptor::Tensor::set_pool_offset(size_t offset)
{ {
m_pool_offset = offset; m_pool_offset = offset;
} }
size_t Tensor::get_pool_offset() const size_t descriptor::Tensor::get_pool_offset() const
{ {
return m_pool_offset; return m_pool_offset;
} }
std::ostream& operator<<(std::ostream& out, const Tensor& tensor) ostream& operator<<(ostream& out, const descriptor::Tensor& tensor)
{ {
out << "Tensor(" << tensor.get_name() << ", "; out << "Tensor(" << tensor.get_name() << ", ";
out << (tensor.is_persistent() ? "P" : ""); out << (tensor.is_persistent() ? "P" : "");
......
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
#include "ngraph/descriptor/tensor_view.hpp" #include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
using namespace ngraph::descriptor; using namespace ngraph;
using namespace std;
std::shared_ptr<const ngraph::ValueType> TensorView::get_value_type() const shared_ptr<const ngraph::ValueType> descriptor::TensorView::get_value_type() const
{ {
return m_tensor_view_type; return m_tensor_view_type;
} }
...@@ -16,11 +16,10 @@ ...@@ -16,11 +16,10 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph;
op::BinaryElementwiseComparison::BinaryElementwiseComparison(const std::string& node_type, op::BinaryElementwiseComparison::BinaryElementwiseComparison(const string& node_type,
const std::shared_ptr<Node>& arg0, const shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1) const shared_ptr<Node>& arg1)
: BinaryElementwise(node_type, element::boolean, arg0, arg1) : BinaryElementwise(node_type, element::boolean, arg0, arg1)
{ {
if (arg0->get_element_type() != arg1->get_element_type()) if (arg0->get_element_type() != arg1->get_element_type())
......
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
#include "ngraph/ops/op.hpp" #include "ngraph/ops/op.hpp"
using namespace ngraph; using namespace ngraph;
using namespace ngraph::op; using namespace std;
op::Not::Not(const std::shared_ptr<Node>& arg) op::Not::Not(const shared_ptr<Node>& arg)
: UnaryElementwise("Not", arg->get_element_type(), arg) : op::UnaryElementwise("Not", arg->get_element_type(), arg)
{ {
} }
......
...@@ -21,14 +21,13 @@ ...@@ -21,14 +21,13 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
pass::DumpSorted::DumpSorted(const string& output_file) pass::DumpSorted::DumpSorted(const string& output_file)
: m_output_file{output_file} : m_output_file{output_file}
{ {
} }
bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& functions) bool pass::DumpSorted::run_on_module(vector<shared_ptr<Function>>& functions)
{ {
ofstream out{m_output_file}; ofstream out{m_output_file};
if (out) if (out)
...@@ -42,7 +41,7 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct ...@@ -42,7 +41,7 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct
{ {
out << node->get_name() << "("; out << node->get_name() << "(";
vector<string> inputs; vector<string> inputs;
for (const Input& input : node->get_inputs()) for (const descriptor::Input& input : node->get_inputs())
{ {
inputs.push_back(input.get_tensor().get_name()); inputs.push_back(input.get_tensor().get_name());
} }
...@@ -57,15 +56,15 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct ...@@ -57,15 +56,15 @@ bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& funct
out << join(outputs); out << join(outputs);
out << "\n"; out << "\n";
for (const Tensor* tensor : node->liveness_live_list) for (const descriptor::Tensor* tensor : node->liveness_live_list)
{ {
out << " L " << tensor->get_name() << "\n"; out << " L " << tensor->get_name() << "\n";
} }
for (const Tensor* tensor : node->liveness_new_list) for (const descriptor::Tensor* tensor : node->liveness_new_list)
{ {
out << " N " << tensor->get_name() << "\n"; out << " N " << tensor->get_name() << "\n";
} }
for (const Tensor* tensor : node->liveness_free_list) for (const descriptor::Tensor* tensor : node->liveness_free_list)
{ {
out << " F " << tensor->get_name() << "\n"; out << " F " << tensor->get_name() << "\n";
} }
......
...@@ -26,11 +26,10 @@ ...@@ -26,11 +26,10 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops) bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
{ {
unordered_set<Tensor*> currently_live; unordered_set<descriptor::Tensor*> currently_live;
for (auto it = ops.rbegin(); it != ops.rend(); it++) for (auto it = ops.rbegin(); it != ops.rend(); it++)
{ {
...@@ -38,32 +37,32 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops) ...@@ -38,32 +37,32 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
node->liveness_live_list.clear(); node->liveness_live_list.clear();
node->liveness_new_list.clear(); node->liveness_new_list.clear();
node->liveness_free_list.clear(); node->liveness_free_list.clear();
unordered_set<Tensor*> input_tensor_decls; unordered_set<descriptor::Tensor*> input_tensor_decls;
for (Input& input_decl : node->get_inputs()) for (descriptor::Input& input_decl : node->get_inputs())
{ {
Tensor& tensor = input_decl.get_tensor(); descriptor::Tensor& tensor = input_decl.get_tensor();
if (is_temporary(tensor)) if (is_temporary(tensor))
{ {
input_tensor_decls.insert(&tensor); input_tensor_decls.insert(&tensor);
} }
} }
unordered_set<Tensor*> output_tensor_decls; unordered_set<descriptor::Tensor*> output_tensor_decls;
for (size_t i = 0; i < node->get_output_size(); ++i) for (size_t i = 0; i < node->get_output_size(); ++i)
{ {
Tensor& tensor = node->get_output_tensor(i); descriptor::Tensor& tensor = node->get_output_tensor(i);
if (is_temporary(tensor)) if (is_temporary(tensor))
{ {
output_tensor_decls.insert(&tensor); output_tensor_decls.insert(&tensor);
} }
} }
unordered_set<Tensor*> free_tensor_decls; unordered_set<descriptor::Tensor*> free_tensor_decls;
unordered_set<Tensor*> new_tensor_decls; unordered_set<descriptor::Tensor*> new_tensor_decls;
unordered_set<Tensor*> all_tensor_decls = input_tensor_decls; unordered_set<descriptor::Tensor*> all_tensor_decls = input_tensor_decls;
all_tensor_decls.insert(output_tensor_decls.begin(), output_tensor_decls.end()); all_tensor_decls.insert(output_tensor_decls.begin(), output_tensor_decls.end());
for (Tensor* tensor_decl : all_tensor_decls) for (descriptor::Tensor* tensor_decl : all_tensor_decls)
{ {
if (!contains(currently_live, tensor_decl)) if (!contains(currently_live, tensor_decl))
{ {
...@@ -75,7 +74,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops) ...@@ -75,7 +74,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
} }
node->liveness_live_list = currently_live; node->liveness_live_list = currently_live;
for (Tensor* output_decl : output_tensor_decls) for (descriptor::Tensor* output_decl : output_tensor_decls)
{ {
if (contains(currently_live, output_decl)) if (contains(currently_live, output_decl))
{ {
...@@ -89,18 +88,18 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops) ...@@ -89,18 +88,18 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
// Anything marked as output must remain live for the remainder of the graph // Anything marked as output must remain live for the remainder of the graph
// Add outputs to live_list and remove from free_list // Add outputs to live_list and remove from free_list
unordered_set<Tensor*> outputs; unordered_set<descriptor::Tensor*> outputs;
unordered_set<Tensor*> seen; unordered_set<descriptor::Tensor*> seen;
for (shared_ptr<Node> node : ops) for (shared_ptr<Node> node : ops)
{ {
for (Tensor* tensor : node->liveness_live_list) for (descriptor::Tensor* tensor : node->liveness_live_list)
{ {
if (tensor->is_output()) if (tensor->is_output())
{ {
outputs.insert(tensor); outputs.insert(tensor);
} }
} }
for (Tensor* tensor : outputs) for (descriptor::Tensor* tensor : outputs)
{ {
node->liveness_live_list.insert(tensor); node->liveness_live_list.insert(tensor);
node->liveness_free_list.erase(tensor); node->liveness_free_list.erase(tensor);
...@@ -123,7 +122,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops) ...@@ -123,7 +122,7 @@ bool pass::Liveness::run_on_call_graph(const list<shared_ptr<Node>>& ops)
return false; return false;
} }
bool pass::Liveness::is_temporary(const Tensor& tensor) bool pass::Liveness::is_temporary(const descriptor::Tensor& tensor)
{ {
return tensor.is_persistent() == false && tensor.is_input() == false && return tensor.is_persistent() == false && tensor.is_input() == false &&
tensor.is_output() == false && tensor.is_constant() == false; tensor.is_output() == false && tensor.is_constant() == false;
...@@ -132,13 +131,13 @@ bool pass::Liveness::is_temporary(const Tensor& tensor) ...@@ -132,13 +131,13 @@ bool pass::Liveness::is_temporary(const Tensor& tensor)
void pass::Liveness::validate_liveness(const list<Node*>& ops) void pass::Liveness::validate_liveness(const list<Node*>& ops)
{ {
unordered_set<Tensor*> dead_tensors; unordered_set<descriptor::Tensor*> dead_tensors;
for (const Node* node : ops) for (const Node* node : ops)
{ {
auto active = node->liveness_live_list; auto active = node->liveness_live_list;
active.insert(node->liveness_new_list.begin(), node->liveness_new_list.end()); active.insert(node->liveness_new_list.begin(), node->liveness_new_list.end());
active.insert(node->liveness_free_list.begin(), node->liveness_free_list.end()); active.insert(node->liveness_free_list.begin(), node->liveness_free_list.end());
for (const Tensor* tensor : active) for (const descriptor::Tensor* tensor : active)
{ {
if (contains(dead_tensors, tensor)) if (contains(dead_tensors, tensor))
{ {
......
...@@ -24,24 +24,23 @@ ...@@ -24,24 +24,23 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
pass::MemoryLayout::MemoryLayout(size_t alignment) pass::MemoryLayout::MemoryLayout(size_t alignment)
: m_alignment(alignment) : m_alignment(alignment)
{ {
} }
bool pass::MemoryLayout::run_on_function(std::shared_ptr<ngraph::Function> function) bool pass::MemoryLayout::run_on_function(shared_ptr<ngraph::Function> function)
{ {
MemoryManager mm(m_alignment); MemoryManager mm(m_alignment);
for (shared_ptr<Node> node : function->get_ordered_ops()) for (shared_ptr<Node> node : function->get_ordered_ops())
{ {
for (Tensor* tensor : node->liveness_new_list) for (descriptor::Tensor* tensor : node->liveness_new_list)
{ {
size_t offset = mm.allocate(tensor->size()); size_t offset = mm.allocate(tensor->size());
tensor->set_pool_offset(offset); tensor->set_pool_offset(offset);
} }
for (const Tensor* tensor : node->liveness_free_list) for (const descriptor::Tensor* tensor : node->liveness_free_list)
{ {
mm.free(tensor->get_pool_offset()); mm.free(tensor->get_pool_offset());
} }
...@@ -114,7 +113,7 @@ size_t pass::MemoryManager::best_fit(size_t size) ...@@ -114,7 +113,7 @@ size_t pass::MemoryManager::best_fit(size_t size)
m_node_list.insert(best_fit, node{size, block_state::ALLOCATED}); m_node_list.insert(best_fit, node{size, block_state::ALLOCATED});
best_fit->m_size -= size; best_fit->m_size -= size;
} }
m_max_allocated = std::max(m_max_allocated, best_offset + size); m_max_allocated = max(m_max_allocated, best_offset + size);
return best_offset; return best_offset;
} }
...@@ -148,7 +147,7 @@ size_t pass::MemoryManager::first_fit(size_t size) ...@@ -148,7 +147,7 @@ size_t pass::MemoryManager::first_fit(size_t size)
{ {
throw bad_alloc(); throw bad_alloc();
} }
m_max_allocated = std::max(m_max_allocated, offset + size); m_max_allocated = max(m_max_allocated, offset + size);
return offset; return offset;
} }
...@@ -161,7 +160,7 @@ void pass::MemoryManager::free(size_t offset) ...@@ -161,7 +160,7 @@ void pass::MemoryManager::free(size_t offset)
{ {
if (offset == search_offset) if (offset == search_offset)
{ {
list<node>::iterator it_next = std::next(it); list<node>::iterator it_next = next(it);
if (it == m_node_list.begin()) if (it == m_node_list.begin())
{ {
// free the first node in the list // free the first node in the list
...@@ -170,7 +169,7 @@ void pass::MemoryManager::free(size_t offset) ...@@ -170,7 +169,7 @@ void pass::MemoryManager::free(size_t offset)
else else
{ {
// node has predecessor // node has predecessor
list<node>::iterator it_prev = std::prev(it); list<node>::iterator it_prev = prev(it);
if (it_prev->m_state == block_state::FREE) if (it_prev->m_state == block_state::FREE)
{ {
it->m_size += it_prev->m_size; it->m_size += it_prev->m_size;
...@@ -195,7 +194,7 @@ void pass::MemoryManager::free(size_t offset) ...@@ -195,7 +194,7 @@ void pass::MemoryManager::free(size_t offset)
} }
} }
void pass::MemoryManager::dump(std::ostream& out) void pass::MemoryManager::dump(ostream& out)
{ {
for (const node& n : m_node_list) for (const node& n : m_node_list)
{ {
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
pass::MemoryVisualize::MemoryVisualize(const string& filename) pass::MemoryVisualize::MemoryVisualize(const string& filename)
: m_filename{filename} : m_filename{filename}
...@@ -104,7 +103,7 @@ shared_ptr<Node> pass::MemoryVisualize::find_largest_op(const list<shared_ptr<No ...@@ -104,7 +103,7 @@ shared_ptr<Node> pass::MemoryVisualize::find_largest_op(const list<shared_ptr<No
for (shared_ptr<Node> exop : nodes) for (shared_ptr<Node> exop : nodes)
{ {
size_t size = 0; size_t size = 0;
for (const Tensor* tensor : exop->liveness_live_list) for (const descriptor::Tensor* tensor : exop->liveness_live_list)
{ {
size += tensor->size(); size += tensor->size();
} }
...@@ -123,15 +122,15 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_ ...@@ -123,15 +122,15 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
if (largest_op) if (largest_op)
{ {
unordered_set<Tensor*> largest_live; unordered_set<descriptor::Tensor*> largest_live;
for (Tensor* tensor : largest_op->liveness_live_list) for (descriptor::Tensor* tensor : largest_op->liveness_live_list)
{ {
largest_live.insert(tensor); largest_live.insert(tensor);
} }
unordered_map<const Tensor*, size_t> age_list; unordered_map<const descriptor::Tensor*, size_t> age_list;
vector<const Tensor*> tensor_set; vector<const descriptor::Tensor*> tensor_set;
unordered_map<const Tensor*, shared_ptr<Node>> generator_op; unordered_map<const descriptor::Tensor*, shared_ptr<Node>> generator_op;
file << "<table>\n"; file << "<table>\n";
file << " <tr>"; file << " <tr>";
file << "<th align=\"left\">tensor</th>"; file << "<th align=\"left\">tensor</th>";
...@@ -142,12 +141,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_ ...@@ -142,12 +141,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
size_t i = 0; size_t i = 0;
for (shared_ptr<Node> exop : nodes) for (shared_ptr<Node> exop : nodes)
{ {
for (const Tensor* tensor : exop->liveness_new_list) for (const descriptor::Tensor* tensor : exop->liveness_new_list)
{ {
age_list[tensor] = i; age_list[tensor] = i;
generator_op[tensor] = exop; generator_op[tensor] = exop;
} }
for (const Tensor* tensor : exop->liveness_free_list) for (const descriptor::Tensor* tensor : exop->liveness_free_list)
{ {
size_t start = age_list[tensor]; size_t start = age_list[tensor];
age_list[tensor] = (i - start); age_list[tensor] = (i - start);
...@@ -155,10 +154,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_ ...@@ -155,10 +154,12 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_
} }
i++; i++;
} }
sort(tensor_set.begin(), tensor_set.end(), [](const Tensor* t1, const Tensor* t2) { sort(tensor_set.begin(),
return t1->size() < t2->size(); tensor_set.end(),
}); [](const descriptor::Tensor* t1, const descriptor::Tensor* t2) {
for (const Tensor* tensor : tensor_set) return t1->size() < t2->size();
});
for (const descriptor::Tensor* tensor : tensor_set)
{ {
int generator_weight = compute_op_weight(generator_op[tensor]); int generator_weight = compute_op_weight(generator_op[tensor]);
if (contains(largest_live, tensor)) if (contains(largest_live, tensor))
...@@ -249,14 +250,14 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop) ...@@ -249,14 +250,14 @@ int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop)
// tensor = output_decl.tensor // tensor = output_decl.tensor
// if tensor.is_persistent is False: // if tensor.is_persistent is False:
// mass -= tensor->size() // mass -= tensor->size()
for (const Tensor* tensor : exop->liveness_new_list) for (const descriptor::Tensor* tensor : exop->liveness_new_list)
{ {
if (tensor->is_persistent() == false) if (tensor->is_persistent() == false)
{ {
mass += tensor->size(); mass += tensor->size();
} }
} }
for (const Tensor* tensor : exop->liveness_free_list) for (const descriptor::Tensor* tensor : exop->liveness_free_list)
{ {
if (tensor->is_persistent() == false) if (tensor->is_persistent() == false)
{ {
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
using namespace ngraph; using namespace ngraph;
using namespace ngraph::runtime::cpu::kernel; using namespace std;
// //
// Given a coordinate transform and a vector of index expressions relative to // Given a coordinate transform and a vector of index expressions relative to
...@@ -37,18 +37,17 @@ using namespace ngraph::runtime::cpu::kernel; ...@@ -37,18 +37,17 @@ using namespace ngraph::runtime::cpu::kernel;
// {"((k) * 2 + 5)", "((i) * 2 + 3)", "((j) * 2 + 4)"} // {"((k) * 2 + 5)", "((i) * 2 + 3)", "((j) * 2 + 4)"}
// //
// //
std::vector<std::string> vector<string> ngraph::runtime::cpu::kernel::emit_multi_indices(CoordinateTransform& trans,
ngraph::runtime::cpu::kernel::emit_multi_indices(CoordinateTransform& trans, const vector<string>& index_vars)
const std::vector<std::string>& index_vars)
{ {
std::vector<std::string> result; vector<string> result;
for (size_t i = 0; i < index_vars.size(); i++) for (size_t i = 0; i < index_vars.size(); i++)
{ {
std::string index_var = index_vars[trans.get_source_axis_order()[i]]; string index_var = index_vars[trans.get_source_axis_order()[i]];
size_t source_stride = trans.get_source_strides()[i]; size_t source_stride = trans.get_source_strides()[i];
size_t source_start = trans.get_source_start_corner()[i]; size_t source_start = trans.get_source_start_corner()[i];
std::stringstream ss; stringstream ss;
if (source_stride == 1 && source_start == 0) if (source_stride == 1 && source_start == 0)
{ {
...@@ -90,11 +89,10 @@ std::vector<std::string> ...@@ -90,11 +89,10 @@ std::vector<std::string>
// "((4 * ((k) * 2 + 5)) + (2 * ((i) * 2 + 3)) + ((j) * 2 + 4))" // "((4 * ((k) * 2 + 5)) + (2 * ((i) * 2 + 3)) + ((j) * 2 + 4))"
// //
// //
std::string string ngraph::runtime::cpu::kernel::emit_linear_index(CoordinateTransform& trans,
ngraph::runtime::cpu::kernel::emit_linear_index(CoordinateTransform& trans, const vector<string>& index_vars)
const std::vector<std::string>& index_vars)
{ {
std::vector<std::string> multi_indices = emit_multi_indices(trans, index_vars); vector<string> multi_indices = emit_multi_indices(trans, index_vars);
size_t stride = 1; size_t stride = 1;
...@@ -103,7 +101,7 @@ std::string ...@@ -103,7 +101,7 @@ std::string
// No need to do this (multiply by stride) if it's 1, though it wouldn't hurt anything. // No need to do this (multiply by stride) if it's 1, though it wouldn't hurt anything.
if (stride != 1) if (stride != 1)
{ {
std::stringstream ss; stringstream ss;
ss << "(" << stride << " * " << multi_indices[i] << ")"; ss << "(" << stride << " * " << multi_indices[i] << ")";
multi_indices[i] = ss.str(); multi_indices[i] = ss.str();
} }
...@@ -111,7 +109,7 @@ std::string ...@@ -111,7 +109,7 @@ std::string
stride *= trans.get_source_shape()[i]; stride *= trans.get_source_shape()[i];
} }
std::stringstream ss; stringstream ss;
ss << "(" << join(multi_indices, " + ") << ")"; ss << "(" << join(multi_indices, " + ") << ")";
return ss.str(); return ss.str();
...@@ -123,12 +121,12 @@ std::string ...@@ -123,12 +121,12 @@ std::string
// //
// Optionally emits an OpenMP parallel pragma, if "omp" is true. // Optionally emits an OpenMP parallel pragma, if "omp" is true.
// //
std::string ngraph::runtime::cpu::kernel::start_index_loop(const std::string& index_var, string ngraph::runtime::cpu::kernel::start_index_loop(const string& index_var,
size_t start, size_t start,
size_t end, size_t end,
bool omp) bool omp)
{ {
std::stringstream ss; stringstream ss;
if (omp) if (omp)
{ {
...@@ -145,18 +143,18 @@ std::string ngraph::runtime::cpu::kernel::start_index_loop(const std::string& in ...@@ -145,18 +143,18 @@ std::string ngraph::runtime::cpu::kernel::start_index_loop(const std::string& in
// //
// Ends an indexing loop on the index variable [index_var]. // Ends an indexing loop on the index variable [index_var].
// //
std::string ngraph::runtime::cpu::kernel::end_index_loop(const std::string& index_var) string ngraph::runtime::cpu::kernel::end_index_loop(const string& index_var)
{ {
std::stringstream ss; stringstream ss;
ss << "} // end for(" << index_var << ")\n"; ss << "} // end for(" << index_var << ")\n";
return ss.str(); return ss.str();
} }
std::string ngraph::runtime::cpu::kernel::emit_nd_sizes(CoordinateTransform& trans) string ngraph::runtime::cpu::kernel::emit_nd_sizes(CoordinateTransform& trans)
{ {
std::stringstream ss; stringstream ss;
for (size_t s : trans.get_source_shape()) for (size_t s : trans.get_source_shape())
{ {
...@@ -166,12 +164,12 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_sizes(CoordinateTransform& tra ...@@ -166,12 +164,12 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_sizes(CoordinateTransform& tra
return ss.str(); return ss.str();
} }
std::string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& trans, string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& trans,
const std::vector<std::string>& index_vars) const vector<string>& index_vars)
{ {
std::stringstream ss; stringstream ss;
for (std::string index : emit_multi_indices(trans, index_vars)) for (string index : emit_multi_indices(trans, index_vars))
{ {
ss << "[" << index << "]"; ss << "[" << index << "]";
} }
...@@ -184,21 +182,21 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& tra ...@@ -184,21 +182,21 @@ std::string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& tra
// dest_buffer mediated by dest_trans. // dest_buffer mediated by dest_trans.
// //
void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writer,
const std::string& element_type, const string& element_type,
const std::string& source_buffer, const string& source_buffer,
const std::string& dest_buffer, const string& dest_buffer,
CoordinateTransform& source_trans, CoordinateTransform& source_trans,
CoordinateTransform& dest_trans) CoordinateTransform& dest_trans)
{ {
std::vector<std::string> index_vars; vector<string> index_vars;
Shape source_start_corner = source_trans.get_source_start_corner(); Shape source_start_corner = source_trans.get_source_start_corner();
Shape source_end_corner = source_trans.get_source_end_corner(); Shape source_end_corner = source_trans.get_source_end_corner();
size_t n_axes = source_start_corner.size(); size_t n_axes = source_start_corner.size();
std::string source_nd_name = writer.generate_temporary_name("source_nd"); string source_nd_name = writer.generate_temporary_name("source_nd");
std::string dest_nd_name = writer.generate_temporary_name("dest_nd"); string dest_nd_name = writer.generate_temporary_name("dest_nd");
writer << element_type << "(&" << source_nd_name << ")" << emit_nd_sizes(source_trans) writer << element_type << "(&" << source_nd_name << ")" << emit_nd_sizes(source_trans)
<< " = *reinterpret_cast<" << element_type << "(*)" << emit_nd_sizes(source_trans) << " = *reinterpret_cast<" << element_type << "(*)" << emit_nd_sizes(source_trans)
...@@ -209,7 +207,7 @@ void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writ ...@@ -209,7 +207,7 @@ void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writ
for (size_t i = 0; i < n_axes; i++) for (size_t i = 0; i < n_axes; i++)
{ {
std::string index_var = writer.generate_temporary_name("i"); string index_var = writer.generate_temporary_name("i");
writer << start_index_loop(index_var, source_start_corner[i], source_end_corner[i], i == 0); writer << start_index_loop(index_var, source_start_corner[i], source_end_corner[i], i == 0);
writer.indent++; writer.indent++;
......
...@@ -22,24 +22,25 @@ ...@@ -22,24 +22,25 @@
#include "ngraph/runtime/manager.hpp" #include "ngraph/runtime/manager.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
using namespace ngraph::runtime; using namespace ngraph;
using namespace std;
static std::mutex load_plugins_mutex; static mutex load_plugins_mutex;
static std::mutex close_plugins_mutex; static mutex close_plugins_mutex;
bool Manager::m_is_factory_map_initialized = false; bool runtime::Manager::m_is_factory_map_initialized = false;
std::vector<void*> Manager::m_plugin_handles = {}; vector<void*> runtime::Manager::m_plugin_handles = {};
void Manager::load_plugins(const std::string& runtime_plugin_libs) void runtime::Manager::load_plugins(const string& runtime_plugin_libs)
{ {
std::lock_guard<std::mutex> lock(load_plugins_mutex); lock_guard<mutex> lock(load_plugins_mutex);
if (Manager::m_is_factory_map_initialized) if (m_is_factory_map_initialized)
{ {
return; return;
} }
std::vector<std::string> plugin_paths = ngraph::split(runtime_plugin_libs, ':', false); vector<string> plugin_paths = ngraph::split(runtime_plugin_libs, ':', false);
for (auto plugin_path : plugin_paths) for (auto plugin_path : plugin_paths)
{ {
if (plugin_path.size() > 0) if (plugin_path.size() > 0)
...@@ -52,7 +53,7 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs) ...@@ -52,7 +53,7 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs)
if (register_plugin != NULL) if (register_plugin != NULL)
{ {
register_plugin(); register_plugin();
Manager::m_plugin_handles.push_back(plugin_handle); m_plugin_handles.push_back(plugin_handle);
} }
else else
{ {
...@@ -66,31 +67,31 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs) ...@@ -66,31 +67,31 @@ void Manager::load_plugins(const std::string& runtime_plugin_libs)
} }
} }
Manager::m_is_factory_map_initialized = true; m_is_factory_map_initialized = true;
} }
// TODO: Should call this function after plugin is not needed anymore. // TODO: Should call this function after plugin is not needed anymore.
void Manager::close_plugins() void runtime::Manager::close_plugins()
{ {
std::lock_guard<std::mutex> lock(close_plugins_mutex); lock_guard<mutex> lock(close_plugins_mutex);
for (auto plugin_handle : Manager::m_plugin_handles) for (auto plugin_handle : m_plugin_handles)
{ {
dlclose(plugin_handle); dlclose(plugin_handle);
} }
Manager::m_plugin_handles.clear(); m_plugin_handles.clear();
} }
Manager::FactoryMap& Manager::get_factory_map() runtime::Manager::FactoryMap& runtime::Manager::get_factory_map()
{ {
// Stores Manager Factories // Stores Manager Factories
static FactoryMap factory_map; static FactoryMap factory_map;
return factory_map; return factory_map;
} }
std::shared_ptr<Manager> Manager::get(const std::string& name) shared_ptr<runtime::Manager> runtime::Manager::get(const string& name)
{ {
Manager::load_plugins(RUNTIME_PLUGIN_LIBS); load_plugins(RUNTIME_PLUGIN_LIBS);
auto iter = get_factory_map().find(name); auto iter = get_factory_map().find(name);
...@@ -103,7 +104,7 @@ std::shared_ptr<Manager> Manager::get(const std::string& name) ...@@ -103,7 +104,7 @@ std::shared_ptr<Manager> Manager::get(const std::string& name)
return f(name); return f(name);
} }
Manager::Factory Manager::register_factory(const std::string& name, Factory factory) runtime::Manager::Factory runtime::Manager::register_factory(const string& name, Factory factory)
{ {
get_factory_map()[name] = factory; get_factory_map()[name] = factory;
return factory; return factory;
......
...@@ -18,41 +18,41 @@ ...@@ -18,41 +18,41 @@
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
#include "ngraph/types/type.hpp" #include "ngraph/types/type.hpp"
using namespace ngraph::runtime; using namespace ngraph;
using namespace std;
std::shared_ptr<const ngraph::descriptor::TensorView> TensorView::get_tensor_view_descriptor() const shared_ptr<const descriptor::TensorView> runtime::TensorView::get_tensor_view_descriptor() const
{ {
return m_descriptor; return m_descriptor;
} }
std::shared_ptr<ngraph::descriptor::TensorView> TensorView::get_descriptor() const shared_ptr<descriptor::TensorView> runtime::TensorView::get_descriptor() const
{ {
return m_descriptor; return m_descriptor;
} }
void TensorView::collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views, void runtime::TensorView::collect_tensor_views(vector<shared_ptr<TensorView>>& views,
const std::shared_ptr<TensorView>& value) const const shared_ptr<TensorView>& value) const
{ {
views.push_back(value); views.push_back(value);
} }
const ngraph::Shape& TensorView::get_shape() const const Shape& runtime::TensorView::get_shape() const
{ {
return m_descriptor->get_tensor_view_type()->get_shape(); return m_descriptor->get_tensor_view_type()->get_shape();
} }
const ngraph::Strides& TensorView::get_strides() const const Strides& runtime::TensorView::get_strides() const
{ {
return m_descriptor->get_tensor_view_layout()->get_strides(); return m_descriptor->get_tensor_view_layout()->get_strides();
} }
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout> shared_ptr<descriptor::layout::TensorViewLayout> runtime::TensorView::get_tensor_view_layout() const
TensorView::get_tensor_view_layout() const
{ {
return m_descriptor->get_tensor_view_layout(); return m_descriptor->get_tensor_view_layout();
} }
size_t TensorView::get_element_count() const size_t runtime::TensorView::get_element_count() const
{ {
size_t rc = 1; size_t rc = 1;
for (size_t s : get_shape()) for (size_t s : get_shape())
...@@ -62,7 +62,7 @@ size_t TensorView::get_element_count() const ...@@ -62,7 +62,7 @@ size_t TensorView::get_element_count() const
return rc; return rc;
} }
const ngraph::descriptor::Tensor& TensorView::get_tensor() const const descriptor::Tensor& runtime::TensorView::get_tensor() const
{ {
return get_tensor_view_descriptor()->get_tensor(); return get_tensor_view_descriptor()->get_tensor();
} }
...@@ -18,11 +18,10 @@ ...@@ -18,11 +18,10 @@
#include "util/test_tools.hpp" #include "util/test_tools.hpp"
using namespace ngraph; using namespace ngraph;
using namespace ngraph::test;
using namespace std; using namespace std;
std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result( shared_ptr<runtime::TensorView>
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&)> func) make_reduce_result(function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&)> func)
{ {
auto shape_a = Shape{3, 2}; auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a); auto A = make_shared<op::Parameter>(element::f32, shape_a);
...@@ -41,8 +40,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result( ...@@ -41,8 +40,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result(
return result; return result;
} }
std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_true( shared_ptr<runtime::TensorView> make_reduce_result_true(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&, bool)> func) function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func)
{ {
auto shape_a = Shape{3, 2}; auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a); auto A = make_shared<op::Parameter>(element::f32, shape_a);
...@@ -61,8 +60,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_true( ...@@ -61,8 +60,8 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_true(
return result; return result;
} }
std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_false( shared_ptr<runtime::TensorView> make_reduce_result_false(
std::function<std::shared_ptr<Node>(const std::shared_ptr<Node>&, const AxisSet&, bool)> func) function<shared_ptr<Node>(const shared_ptr<Node>&, const AxisSet&, bool)> func)
{ {
auto shape_a = Shape{3, 2}; auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a); auto A = make_shared<op::Parameter>(element::f32, shape_a);
...@@ -84,58 +83,58 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_false( ...@@ -84,58 +83,58 @@ std::shared_ptr<ngraph::runtime::TensorView> make_reduce_result_false(
TEST(builder, l2_norm) TEST(builder, l2_norm)
{ {
auto result = make_reduce_result(builder::l2_norm); auto result = make_reduce_result(builder::l2_norm);
ASSERT_TRUE( ASSERT_TRUE(test::all_close((vector<float>{5.9160797831f, 7.48331477355f}),
all_close((vector<float>{5.9160797831f, 7.48331477355f}), read_vector<float>(result))); read_vector<float>(result)));
} }
TEST(builder, mean) TEST(builder, mean)
{ {
auto result = make_reduce_result(builder::mean); auto result = make_reduce_result(builder::mean);
ASSERT_TRUE(all_close((vector<float>{3, 4}), read_vector<float>(result))); ASSERT_TRUE(test::all_close((vector<float>{3, 4}), read_vector<float>(result)));
} }
TEST(builder, std_dev) TEST(builder, std_dev)
{ {
auto result = make_reduce_result_false(builder::std_dev); auto result = make_reduce_result_false(builder::std_dev);
ASSERT_TRUE( ASSERT_TRUE(test::all_close((vector<float>{1.63299316186f, 1.63299316186f}),
all_close((vector<float>{1.63299316186f, 1.63299316186f}), read_vector<float>(result))); read_vector<float>(result)));
result = make_reduce_result_true(builder::std_dev); result = make_reduce_result_true(builder::std_dev);
ASSERT_TRUE(all_close((vector<float>{2, 2}), read_vector<float>(result))); ASSERT_TRUE(test::all_close((vector<float>{2, 2}), read_vector<float>(result)));
} }
TEST(builder, variance) TEST(builder, variance)
{ {
auto result = make_reduce_result_false(builder::variance); auto result = make_reduce_result_false(builder::variance);
ASSERT_TRUE( ASSERT_TRUE(test::all_close((vector<float>{2.66666666666f, 2.66666666666f}),
all_close((vector<float>{2.66666666666f, 2.66666666666f}), read_vector<float>(result))); read_vector<float>(result)));
result = make_reduce_result_true(builder::variance); result = make_reduce_result_true(builder::variance);
ASSERT_TRUE(all_close((vector<float>{4, 4}), read_vector<float>(result))); ASSERT_TRUE(test::all_close((vector<float>{4, 4}), read_vector<float>(result)));
} }
TEST(builder, numpy_transpose) TEST(builder, numpy_transpose)
{ {
// 2D Transpose // 2D Transpose
Shape shape{2, 4}; Shape shape{2, 4};
auto param = std::make_shared<op::Parameter>(ngraph::element::f32, shape); auto param = make_shared<op::Parameter>(element::f32, shape);
auto transposed = std::dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param)); auto transposed = dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param));
EXPECT_EQ(Shape({4, 2}), transposed->get_output_shape()); EXPECT_EQ(Shape({4, 2}), transposed->get_output_shape());
// Multidimensional Transpose // Multidimensional Transpose
shape = Shape{2, 4, 8}; shape = Shape{2, 4, 8};
param = std::make_shared<op::Parameter>(ngraph::element::f32, shape); param = make_shared<op::Parameter>(element::f32, shape);
transposed = std::dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param)); transposed = dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param));
EXPECT_EQ(Shape({8, 4, 2}), transposed->get_output_shape()); EXPECT_EQ(Shape({8, 4, 2}), transposed->get_output_shape());
// Dimshuffle // Dimshuffle
shape = Shape{2, 4, 8}; shape = Shape{2, 4, 8};
param = std::make_shared<op::Parameter>(ngraph::element::f32, shape); param = make_shared<op::Parameter>(element::f32, shape);
transposed = std::dynamic_pointer_cast<op::Reshape>( transposed =
builder::numpy_transpose(param, AxisVector{2, 0, 1})); dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param, AxisVector{2, 0, 1}));
EXPECT_EQ(Shape({8, 2, 4}), transposed->get_output_shape()); EXPECT_EQ(Shape({8, 2, 4}), transposed->get_output_shape());
// Bad Orders // Bad Orders
EXPECT_ANY_THROW( EXPECT_ANY_THROW(
std::dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param, AxisVector{2}))); dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param, AxisVector{2})));
EXPECT_ANY_THROW(std::dynamic_pointer_cast<op::Reshape>( EXPECT_ANY_THROW(
builder::numpy_transpose(param, AxisVector{2, 2, 1}))); dynamic_pointer_cast<op::Reshape>(builder::numpy_transpose(param, AxisVector{2, 2, 1})));
} }
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace ngraph::descriptor;
TEST(tensor, size) TEST(tensor, size)
{ {
...@@ -44,7 +43,7 @@ TEST(tensor, size) ...@@ -44,7 +43,7 @@ TEST(tensor, size)
auto& outputs = arg0->get_outputs(); auto& outputs = arg0->get_outputs();
ASSERT_EQ(1, outputs.size()); ASSERT_EQ(1, outputs.size());
Tensor& output = outputs[0].get_tensor(); descriptor::Tensor& output = outputs[0].get_tensor();
EXPECT_EQ(2 * 3 * 4, output.size()); EXPECT_EQ(2 * 3 * 4, output.size());
} }
...@@ -57,7 +56,7 @@ TEST(tensor, size) ...@@ -57,7 +56,7 @@ TEST(tensor, size)
auto& outputs = arg0->get_outputs(); auto& outputs = arg0->get_outputs();
ASSERT_EQ(1, outputs.size()); ASSERT_EQ(1, outputs.size());
Tensor& output = outputs[0].get_tensor(); descriptor::Tensor& output = outputs[0].get_tensor();
EXPECT_EQ(1 * 4, output.size()); EXPECT_EQ(1 * 4, output.size());
} }
...@@ -70,33 +69,33 @@ TEST(tensor, size) ...@@ -70,33 +69,33 @@ TEST(tensor, size)
auto& outputs = arg0->get_outputs(); auto& outputs = arg0->get_outputs();
ASSERT_EQ(1, outputs.size()); ASSERT_EQ(1, outputs.size());
Tensor& output = outputs[0].get_tensor(); descriptor::Tensor& output = outputs[0].get_tensor();
EXPECT_EQ(1 * 4, output.size()); EXPECT_EQ(1 * 4, output.size());
} }
} }
template <typename T> template <typename T>
void test_read_write(const std::vector<T>& x) void test_read_write(const vector<T>& x)
{ {
auto manager = ngraph::runtime::Manager::get("INTERPRETER"); auto manager = runtime::Manager::get("INTERPRETER");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
auto a = backend->make_primary_tensor_view(element::from<T>(), Shape{2, x.size()}); auto a = backend->make_primary_tensor_view(element::from<T>(), Shape{2, x.size()});
std::vector<T> result(2 * x.size()); vector<T> result(2 * x.size());
a->write(&x[0], 0, x.size() * sizeof(T)); a->write(&x[0], 0, x.size() * sizeof(T));
std::copy(x.begin(), x.end(), result.begin()); copy(x.begin(), x.end(), result.begin());
a->write(&x[0], x.size() * sizeof(T), x.size() * sizeof(T)); a->write(&x[0], x.size() * sizeof(T), x.size() * sizeof(T));
std::copy(x.begin(), x.end(), result.begin() + x.size()); copy(x.begin(), x.end(), result.begin() + x.size());
std::vector<T> af_vector(2 * x.size()); vector<T> af_vector(2 * x.size());
a->read(af_vector.data(), 0, af_vector.size() * sizeof(T)); a->read(af_vector.data(), 0, af_vector.size() * sizeof(T));
ASSERT_EQ(af_vector, result); ASSERT_EQ(af_vector, result);
std::vector<T> result1(x.size()); vector<T> result1(x.size());
std::vector<T> result2(x.size()); vector<T> result2(x.size());
std::copy(result.begin() + 1, result.begin() + 1 + x.size(), result1.begin()); copy(result.begin() + 1, result.begin() + 1 + x.size(), result1.begin());
a->read(&result2[0], sizeof(T), sizeof(T) * x.size()); a->read(&result2[0], sizeof(T), sizeof(T) * x.size());
ASSERT_EQ(result1, result2); ASSERT_EQ(result1, result2);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment