Commit a5476da6 authored by Louis Feng's avatar Louis Feng

Merge branch 'master' into louisfeng/NGMX-296-conv_bias

parents 362bb996 0acaea58
......@@ -25,6 +25,12 @@
using namespace ngraph::runtime::cpu;
MKLDNNEmitter::~MKLDNNEmitter()
{
for (auto p : m_mkldnn_primitives)
delete p;
}
const std::vector<mkldnn::primitive*>& MKLDNNEmitter::get_mkldnn_primitives() const
{
return m_mkldnn_primitives;
......
......@@ -39,6 +39,8 @@ namespace ngraph
{
public:
MKLDNNEmitter() {}
~MKLDNNEmitter();
const std::vector<mkldnn::primitive*>& get_mkldnn_primitives() const;
size_t insert_primitive(mkldnn::primitive* primitive);
......
This diff is collapsed.
......@@ -24,12 +24,12 @@
#include "ngraph/runtime/gpu/gpu_external_function.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#define EMITTER_DECL(E) \
E(codegen::CodeWriter& writer, \
const ngraph::Node* n, \
const std::vector<ngraph::runtime::gpu::GPU_TensorViewWrapper>& args, \
const std::vector<ngraph::runtime::gpu::GPU_TensorViewWrapper>& out)
#define EMITTER_DECL(op_name) \
emit<op_name>(GPU_ExternalFunction * external_function, \
codegen::CodeWriter & writer, \
const ngraph::Node* node, \
const std::vector<GPU_TensorViewWrapper>& args, \
const std::vector<GPU_TensorViewWrapper>& out)
namespace ngraph
{
namespace runtime
......@@ -39,45 +39,30 @@ namespace ngraph
class GPU_Emitter
{
public:
static void EMITTER_DECL(EmitNop);
static void EMITTER_DECL(EmitAdd);
static void EMITTER_DECL(EmitDot);
static void EMITTER_DECL(EmitMultiply);
static void EMITTER_DECL(EmitGetOutputElement);
static void EMITTER_DECL(EmitXLAGetTupleElement);
static void EMITTER_DECL(EmitUnaryElementwise);
static void EMITTER_DECL(EmitTuple);
static void EMITTER_DECL(EmitConcat);
static void EMITTER_DECL(EmitDivide);
static void EMITTER_DECL(EmitEqual);
static void EMITTER_DECL(EmitGreater);
static void EMITTER_DECL(EmitGreaterEq);
static void EMITTER_DECL(EmitLess);
static void EMITTER_DECL(EmitLessEq);
static void EMITTER_DECL(EmitMaximum);
static void EMITTER_DECL(EmitMinimum);
static void EMITTER_DECL(EmitNegative);
static void EMITTER_DECL(EmitNotEqual);
static void EMITTER_DECL(EmitSelect);
static void EMITTER_DECL(EmitSubtract);
static void EMITTER_DECL(EmitBroadcast);
static void EMITTER_DECL(EmitConvert);
static void EMITTER_DECL(EmitConstant);
static void EMITTER_DECL(EmitReshape);
static void EMITTER_DECL(EmitFunctionCall);
static void EMITTER_DECL(EmitReduce);
static void EMITTER_DECL(EmitSlice);
static void EMITTER_DECL(EmitSum);
static void EMITTER_DECL(EmitPower);
static void EMITTER_DECL(EmitReplaceSlice);
static void EMITTER_DECL(EmitOneHot);
static void EMITTER_DECL(EmitSqrt);
static void EMITTER_DECL(EmitConvolution);
static void EMITTER_DECL(EmitMaxPool);
static void EMITTER_DECL(EmitReverse);
static void EMITTER_DECL(EmitReduceWindow);
static void EMITTER_DECL(EmitSelectAndScatter);
static void EMITTER_DECL(EmitResult);
template <typename OP>
static void emit(GPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<GPU_TensorViewWrapper>& args,
const std::vector<GPU_TensorViewWrapper>& out)
{
throw std::runtime_error("Unimplemented op in GPU emitter for " +
node->get_name());
}
static void nop(GPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<GPU_TensorViewWrapper>& args,
const std::vector<GPU_TensorViewWrapper>& out)
{
}
static void EmitUnaryElementwise(GPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<GPU_TensorViewWrapper>& args,
const std::vector<GPU_TensorViewWrapper>& out);
};
}
}
......
......@@ -41,7 +41,8 @@ namespace ngraph
class GPU_CallFrame;
using OpFunction =
std::function<void(codegen::CodeWriter&,
std::function<void(GPU_ExternalFunction* external_function,
codegen::CodeWriter&,
const ngraph::Node*,
const std::vector<GPU_TensorViewWrapper>& inputs,
const std::vector<GPU_TensorViewWrapper>& outputs)>;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment