Commit 22e1368a authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: Apply formatting fixes from clang-format

parent 75623f48
......@@ -46,10 +46,8 @@ public:
void enable_pch() { pch_enabled = true; }
void disable_pch() { pch_enabled = false; }
void enable_debuginfo() { debuginfo_enabled = true; }
void disable_debuginfo() { debuginfo_enabled = false; }
std::unique_ptr<llvm::Module> compile(const std::string& source, const std::string& name = "");
bool add_module(std::unique_ptr<llvm::Module>&);
......
......@@ -14,9 +14,9 @@
#pragma once
#include <functional>
#include <memory>
#include <vector>
#include <functional>
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
......@@ -31,17 +31,17 @@ namespace ngraph
namespace cpu
{
class CallFrame;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*, ngraph::runtime::TensorViewPtrs&)>;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*,
ngraph::runtime::TensorViewPtrs&)>;
// Compile and execute graphs
class CallFrame : public ngraph::runtime::CallFrame
{
public:
CallFrame(
EntryPoint compiled_function,
size_t n_inputs,
size_t n_outputs,
const TensorViewPtrs& temps);
CallFrame(EntryPoint compiled_function,
size_t n_inputs,
size_t n_outputs,
const TensorViewPtrs& temps);
/// @brief Invoke the function with values matching the signature of the function.
///
......
......@@ -14,9 +14,9 @@
#include <memory>
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/cpu/cpu_backend.hpp"
#include "ngraph/runtime/cpu/cpu_manager.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
using namespace ngraph::runtime::cpu;
......
......@@ -16,8 +16,8 @@
#include <memory>
#include "ngraph/runtime/manager.hpp"
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/runtime/manager.hpp"
namespace ngraph
{
......@@ -34,6 +34,7 @@ namespace ngraph
{
protected:
ngraph::codegen::execution_state exec_state;
public:
virtual std::shared_ptr<Backend> allocate_backend() override;
......
This diff is collapsed.
......@@ -14,19 +14,19 @@
#pragma once
#include <vector>
#include <string>
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#define EMITTER_DECL(E) E(const ngraph::Node* n, \
ExternalFunction* ef, \
FunctionMap& function_map, \
const std::vector<TensorViewInfo>& inputs, \
const std::vector<TensorViewInfo>& outputs)
#define EMITTER_DECL(E) \
E(const ngraph::Node* n, \
ExternalFunction* ef, \
FunctionMap& function_map, \
const std::vector<TensorViewInfo>& inputs, \
const std::vector<TensorViewInfo>& outputs)
namespace ngraph
{
......@@ -40,9 +40,11 @@ namespace ngraph
std::string TU;
public:
Emitter() : TU("") { }
Emitter()
: TU("")
{
}
std::string& GetTU() { return TU; }
void EMITTER_DECL(EmitNop);
void EMITTER_DECL(EmitAdd);
void EMITTER_DECL(EmitDot);
......@@ -71,7 +73,6 @@ namespace ngraph
void EMITTER_DECL(EmitParameterizedConstantUInt8);
void EMITTER_DECL(EmitParameterizedConstantUInt32);
void EMITTER_DECL(EmitParameterizedConstantUInt64);
};
}
}
......
......@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <fstream>
#include <memory>
#include <string>
#include <tuple>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include <string>
#include <fstream>
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/descriptor/input.hpp"
......@@ -70,36 +70,45 @@ using ngraph::descriptor::layout::DenseTensorViewLayout;
#define TI(x) type_index(typeid(x))
static const OpMap dispatcher{{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot},
{TI(ngraph::op::Multiply), &Emitter::EmitMultiply},
{TI(ngraph::op::Parameter), &Emitter::EmitNop},
{TI(ngraph::op::GetTupleElement), &Emitter::EmitGetTupleElement},
{TI(ngraph::op::Tuple), &Emitter::EmitTuple},
{TI(ngraph::op::Abs), &Emitter::EmitAbs},
{TI(ngraph::op::Concat), &Emitter::EmitConcat},
{TI(ngraph::op::Divide), &Emitter::EmitDivide},
{TI(ngraph::op::Equal), &Emitter::EmitEqual},
{TI(ngraph::op::Greater), &Emitter::EmitGreater},
{TI(ngraph::op::GreaterEq), &Emitter::EmitGreaterEq},
{TI(ngraph::op::Less), &Emitter::EmitLess},
{TI(ngraph::op::LessEq), &Emitter::EmitLessEq},
{TI(ngraph::op::Log), &Emitter::EmitLog},
{TI(ngraph::op::Maximum), &Emitter::EmitMaximum},
{TI(ngraph::op::Negative), &Emitter::EmitNegative},
{TI(ngraph::op::NotEqual), &Emitter::EmitNotEqual},
{TI(ngraph::op::Select), &Emitter::EmitSelect},
{TI(ngraph::op::Subtract), &Emitter::EmitSubtract},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Bool>), &Emitter::EmitParameterizedConstantBool},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Float32>), &Emitter::EmitParameterizedConstantFloat32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int8>), &Emitter::EmitParameterizedConstantInt8},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int32>), &Emitter::EmitParameterizedConstantInt32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int64>), &Emitter::EmitParameterizedConstantInt64},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt8>), &Emitter::EmitParameterizedConstantUInt8},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt32>), &Emitter::EmitParameterizedConstantUInt32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt64>), &Emitter::EmitParameterizedConstantUInt64},
};
static const OpMap dispatcher{
{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot},
{TI(ngraph::op::Multiply), &Emitter::EmitMultiply},
{TI(ngraph::op::Parameter), &Emitter::EmitNop},
{TI(ngraph::op::GetTupleElement), &Emitter::EmitGetTupleElement},
{TI(ngraph::op::Tuple), &Emitter::EmitTuple},
{TI(ngraph::op::Abs), &Emitter::EmitAbs},
{TI(ngraph::op::Concat), &Emitter::EmitConcat},
{TI(ngraph::op::Divide), &Emitter::EmitDivide},
{TI(ngraph::op::Equal), &Emitter::EmitEqual},
{TI(ngraph::op::Greater), &Emitter::EmitGreater},
{TI(ngraph::op::GreaterEq), &Emitter::EmitGreaterEq},
{TI(ngraph::op::Less), &Emitter::EmitLess},
{TI(ngraph::op::LessEq), &Emitter::EmitLessEq},
{TI(ngraph::op::Log), &Emitter::EmitLog},
{TI(ngraph::op::Maximum), &Emitter::EmitMaximum},
{TI(ngraph::op::Negative), &Emitter::EmitNegative},
{TI(ngraph::op::NotEqual), &Emitter::EmitNotEqual},
{TI(ngraph::op::Select), &Emitter::EmitSelect},
{TI(ngraph::op::Subtract), &Emitter::EmitSubtract},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Bool>),
&Emitter::EmitParameterizedConstantBool},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Float32>),
&Emitter::EmitParameterizedConstantFloat32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int8>),
&Emitter::EmitParameterizedConstantInt8},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int32>),
&Emitter::EmitParameterizedConstantInt32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::Int64>),
&Emitter::EmitParameterizedConstantInt64},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt8>),
&Emitter::EmitParameterizedConstantUInt8},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt32>),
&Emitter::EmitParameterizedConstantUInt32},
{TI(ngraph::op::ParameterizedConstant<ngraph::element::UInt64>),
&Emitter::EmitParameterizedConstantUInt64},
};
#undef TI
......@@ -183,15 +192,15 @@ void ExternalFunction::compile(FunctionMap& function_map)
Emitter emitter;
auto& TU = emitter.GetTU();
TU += R"(
#include <vector>
#include <memory>
#include <vector>
#include <Eigen/Dense>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/call_frame.hpp"
#include "ngraph/runtime/cpu/eigen_utils.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
void *__dso_handle = 0;
......@@ -247,8 +256,8 @@ extern "C" void __entrypoint(ngraph::runtime::cpu::CallFrame* call_frame,
assert(llvm_module);
estate.add_module(llvm_module);
estate.finalize();
compiled_function = estate.find_function<void(ngraph::runtime::cpu::CallFrame*,
ngraph::runtime::TensorViewPtrs&)>("__entrypoint");
compiled_function = estate.find_function<void(
ngraph::runtime::cpu::CallFrame*, ngraph::runtime::TensorViewPtrs&)>("__entrypoint");
assert(compiled_function);
m_is_compiled = true;
......
......@@ -14,14 +14,14 @@
#pragma once
#include <functional>
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include <functional>
#include "ngraph/function.hpp"
#include "ngraph/codegen/compiler.hpp"
#include "ngraph/function.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
......@@ -35,8 +35,8 @@ namespace ngraph
class Emitter;
class CallFrame;
using FunctionMap = std::unordered_map<std::shared_ptr<Function>,
std::shared_ptr<ExternalFunction>>;
using FunctionMap =
std::unordered_map<std::shared_ptr<Function>, std::shared_ptr<ExternalFunction>>;
using OpFunction = std::function<void(Emitter*,
const ngraph::Node*,
......@@ -47,7 +47,8 @@ namespace ngraph
using OpMap = std::unordered_map<std::type_index, OpFunction>;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*, ngraph::runtime::TensorViewPtrs&)>;
using EntryPoint = std::function<void(ngraph::runtime::cpu::CallFrame*,
ngraph::runtime::TensorViewPtrs&)>;
class ExternalFunction : public ngraph::runtime::ExternalFunction
{
......@@ -55,6 +56,7 @@ namespace ngraph
ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function = true);
std::shared_ptr<ngraph::runtime::CallFrame> make_call_frame();
protected:
void compile(FunctionMap& function_map);
......
......@@ -803,7 +803,6 @@ TEST(cpu, greater)
ASSERT_EQ((vector<char>{0, 1, 0, 1, 0, 1, 1, 0}), result->get_vector());
}
TEST(cpu, greatereq)
{
auto shape = Shape{2, 2, 2};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment