Commit 5b9000ac authored by nmostafa's avatar nmostafa

[MLIR] Fix issues after rebase on ngraph/master

parent 8cb95f71
......@@ -22,7 +22,7 @@
#include "lowerer.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
......
......@@ -15,12 +15,9 @@
//*****************************************************************************
#include "memory_manager.hpp"
#include "ngraph/ngraph_visibility.hpp"
#include <memory>
using namespace ngraph::runtime::ngmlir;
/// Call back to allocate memory for temps from JIT'ed code
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
......
......@@ -20,6 +20,7 @@
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/assertion.hpp"
using namespace ngraph::descriptor;
using namespace ngraph::op;
......@@ -64,9 +65,9 @@ bool MLIRSubgraphExtractionPass::run_on_function(std::shared_ptr<Function> func)
auto& out_desc = output_descs[0];
// 'replace_output' invalidates iterator of the original container. Use a copy instead.
std::set<Input*> input_descs{out_desc.get_inputs()};
const std::set<descriptor::Input*> input_descs = out_desc.get_inputs();
for (Input* in_desc : input_descs)
for (descriptor::Input* in_desc : input_descs)
{
in_desc->replace_output(ck, i);
}
......
......@@ -38,14 +38,6 @@ namespace ngraph
auto arg1_buffer_index = external_function->get_buffer_index(args[1].get_name());
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
// TODO: Quick hook for MLIR.
if (std::getenv("NGRAPH_MLIR") != nullptr)
{
functors.emplace_back(build_mlir_single_output_binary_op(
node, arg0_tensor, arg1_tensor, out0_tensor));
return;
}
const ngraph::op::MatmulBias* mm = static_cast<const ngraph::op::MatmulBias*>(node);
const auto& arg0_shape = mm->get_a_shape();
......
......@@ -40,29 +40,31 @@ namespace ngraph
// Tensors haven't been allocated yet so we have to keep a pointer to the pointer
// that will hold the future memory address.
std::vector<void**> double_ptr_args;
std::vector<size_t> buffer_indices;
for (const TensorViewWrapper& arg : args)
{
double_ptr_args.push_back(&external_function->get_tensor_data(arg.get_name()));
auto buffer_index = external_function->get_buffer_index(arg.get_name());
buffer_indices.push_back(buffer_index);
}
for (const TensorViewWrapper& result : out)
{
double_ptr_args.push_back(
&external_function->get_tensor_data(result.get_name()));
auto buffer_index = external_function->get_buffer_index(result.get_name());
buffer_indices.push_back(buffer_index);
}
// Create functor that will be executed to compile and run this CompiledKernel.
// Note that 'double_ptr_args' must be captured by value since it's a local var.
auto functor = [node, double_ptr_args](CPURuntimeContext* ctx,
auto functor = [node, buffer_indices](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
// MLIR requires a list of type-erased pointer to arguments. Tensors must have
// been allocated at this point so we can get rid of the extra reference.
std::vector<void*> ptr_args;
for (auto& double_ptr : double_ptr_args)
for (auto& buffer_index : buffer_indices)
{
ptr_args.push_back(*double_ptr);
ptr_args.push_back(ctx->buffer_data[buffer_index]);
}
// Compile nodes within the CompiledKernel op.
......
......@@ -103,7 +103,6 @@
#include "ngraph/runtime/cpu/kernel/subtract.hpp"
#include "ngraph/runtime/cpu/kernel/tan.hpp"
#include "ngraph/runtime/cpu/kernel/tanh.hpp"
#include "ngraph/runtime/cpu/mlir/compiler.hpp"
#include "ngraph/runtime/cpu/op/convert_layout.hpp"
#include "ngraph/runtime/cpu/op/halide_op.hpp"
#include "ngraph/type/element_type.hpp"
......
......@@ -118,7 +118,6 @@
#include "ngraph/runtime/cpu/op/batch_mat_mul_transpose.hpp"
#include "ngraph/runtime/cpu/op/batch_norm_relu.hpp"
#include "ngraph/runtime/cpu/op/bounded_relu.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/runtime/cpu/op/conv_add.hpp"
#include "ngraph/runtime/cpu/op/conv_relu.hpp"
#include "ngraph/runtime/cpu/op/convert_layout.hpp"
......
......@@ -170,7 +170,6 @@
#include "ngraph/runtime/cpu/op/deconv.hpp"
#include "ngraph/runtime/cpu/op/group_conv_bias.hpp"
#include "ngraph/runtime/cpu/op/leaky_relu.hpp"
#include "ngraph/runtime/cpu/op/loop_kernel.hpp"
#include "ngraph/runtime/cpu/op/lstm.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp"
......@@ -427,8 +426,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::And), &runtime::cpu::CPU_Emitter::emit<op::And>},
{TI(ngraph::op::Or), &runtime::cpu::CPU_Emitter::emit<op::Or>},
{TI(ngraph::op::CPULeakyRelu), &runtime::cpu::CPU_Emitter::emit<op::CPULeakyRelu>},
{TI(ngraph::runtime::cpu::op::LoopKernel),
&runtime::cpu::CPU_Emitter::emit<runtime::cpu::op::LoopKernel>},
{TI(ngraph::op::CompiledKernel), &runtime::cpu::CPU_Emitter::emit<op::CompiledKernel>},
{TI(ngraph::op::LRN), &runtime::cpu::CPU_Emitter::emit<ngraph::op::LRN>},
{TI(ngraph::op::GenerateMask), &runtime::cpu::CPU_Emitter::emit<ngraph::op::GenerateMask>},
{TI(ngraph::op::ConvolutionAdd), &runtime::cpu::CPU_Emitter::emit<op::ConvolutionAdd>},
......
This diff is collapsed.
......@@ -44,6 +44,7 @@ namespace ngraph
namespace runtime
{
class Backend;
class Value;
}
std::string to_cplusplus_sourcecode_literal(bool val);
......
......@@ -74,7 +74,6 @@
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/runtime/cpu/op/update_slice.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_loop_kernel_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp"
#include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment