Commit 3128f36c authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: Add emitter skeleton

parent 1c70aa79
......@@ -95,6 +95,7 @@ if(LLVM_INCLUDE_DIR)
runtime/cpu/call_frame.cpp
runtime/cpu/cpu_manager.cpp
runtime/cpu/cpu_backend.cpp
runtime/cpu/emitter.cpp
runtime/cpu/external_function.cpp
)
set_source_files_properties(codegen/compiler.cpp PROPERTIES COMPILE_FLAGS "-fno-rtti")
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
using namespace std;
using namespace ngraph::runtime::cpu;
void Emitter::EmitAdd(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
}
void Emitter::EmitDot(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const
{
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
class Emitter
{
public:
Emitter() { }
void EmitAdd(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const;
void EmitDot(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs) const;
};
}
}
}
......@@ -53,6 +53,7 @@
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/topological_sort.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/utils.hpp"
......@@ -61,6 +62,13 @@ using namespace ngraph::runtime::cpu;
using ngraph::descriptor::layout::DenseTensorViewLayout;
#define TI(x) type_index(typeid(x))
static const OpMap dispatch{{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot}};
#undef TI
ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function)
: ngraph::runtime::ExternalFunction(function, release_function)
......@@ -68,7 +76,7 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
{
}
void ExternalFunction::compile()
void ExternalFunction::compile(FunctionMap& function_map)
{
if (m_is_compiled)
{
......@@ -199,10 +207,13 @@ void ExternalFunction::compile()
shared_ptr<ngraph::runtime::CallFrame> ExternalFunction::make_call_frame()
{
FunctionMap function_map;
if (!m_is_compiled)
{
compile();
compile(function_map);
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> temps;
for (auto tv : m_temp_views)
{
......@@ -216,6 +227,4 @@ shared_ptr<ngraph::runtime::CallFrame> ExternalFunction::make_call_frame()
}
return make_shared<ngraph::runtime::cpu::CallFrame>(
m_n_inputs, m_n_outputs, temps);
//return shared_ptr<ngraph::runtime::CallFrame>();
}
......@@ -31,25 +31,28 @@ namespace ngraph
namespace cpu
{
class Instruction;
class ExternalFunction;
class Emitter;
class ExternalFunction : public ngraph::runtime::ExternalFunction
{
using FunctionMap = std::unordered_map<std::shared_ptr<Function>,
std::shared_ptr<ExternalFunction>>;
using OpFunction = std::function<void(const ngraph::Node*,
using OpFunction = std::function<void(const Emitter*,
const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)>;
using OpMap = std::unordered_map<std::type_index, OpFunction>;
class ExternalFunction : public ngraph::runtime::ExternalFunction
{
public:
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
protected:
void compile();
void compile(FunctionMap& function_map);
size_t m_n_inputs;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment