Commit 2c345798 authored by shssf's avatar shssf Committed by Robert Kimball

Backend/API: Implementation of ADD and MUL operations in the compile() (#1200)

* Backend/API: Implementation of ADD and MUL operations in the compile method for IntelGPU

* Branch merge conflicts resolved

* Parameters number check moved to function. RESULT operation handling added.
parent 268853d0
......@@ -17,6 +17,7 @@
set(SRC
intelgpu_backend.cpp
intelgpu_tensor_view.cpp
intelgpu_layout.cpp
)
if (NGRAPH_INTELGPU_ENABLE)
......
......@@ -14,12 +14,52 @@
* limitations under the License.
*******************************************************************************/
#include <CPP/concatenation.hpp>
#include <CPP/eltwise.hpp>
#include <CPP/input_layout.hpp>
#include <CPP/layout.hpp>
#include <CPP/network.hpp>
#include <CPP/reorder.hpp>
#include <CPP/scale.hpp>
#include "ngraph/runtime/intelgpu/intelgpu_backend.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_tensor_view.hpp"
using namespace std;
using namespace ngraph;
void arguments_check(const shared_ptr<Node>& op, size_t input, size_t output)
{
if (op->get_input_size() != input || op->get_output_size() != output)
{
ostringstream os;
os << "Operation \"" << op->description() << "\" input and output sizes mismatch.\n"
<< "Expected input size=" << op->get_input_size() << ", provided=" << input << "\n"
<< "Expected output size=" << op->get_output_size() << ", provided=" << output;
throw std::invalid_argument(os.str());
}
}
void do_eltwise_operation(cldnn::topology& topology,
const shared_ptr<Node>& op,
cldnn::eltwise_mode mode)
{
arguments_check(op, 2, 1);
std::vector<cldnn::primitive_id> op_add_inputs;
for (const descriptor::Input& op_input : op->get_inputs())
{
const std::string& element_name = op_input.get_tensor().get_name();
op_add_inputs.push_back(element_name);
}
const std::string& output_name = op->get_outputs().begin()->get_tensor().get_name();
const cldnn::eltwise op_add(output_name, op_add_inputs, mode);
topology.add(op_add);
}
extern "C" const char* get_ngraph_version_string()
{
return NGRAPH_VERSION;
......@@ -56,7 +96,60 @@ shared_ptr<runtime::TensorView> runtime::intelgpu::IntelGPUBackend::create_tenso
bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{
throw runtime_error("IntelGPUBackend::compile: Not implemented yet");
FunctionInstance& instance = ocl_networks[func];
if (instance.ocl_network != nullptr)
{
return true;
}
cldnn::topology topology;
for (shared_ptr<Node> op : func->get_ops())
{
if ("Parameter" == op->description())
{
arguments_check(op, 0, 1);
const std::string& element_name = op->get_output_tensor_view()->get_tensor().get_name();
const cldnn::layout element_layout =
IntelGPULayout::create_cldnn_layout(op->get_element_type(), op->get_shape());
const cldnn::input_layout op_layout(element_name, element_layout);
topology.add(op_layout);
}
else if ("Result" == op->description())
{
arguments_check(op, 1, 1);
const descriptor::Tensor& input_tensor = op->get_inputs().begin()->get_tensor();
const descriptor::Tensor& output_tensor = op->get_outputs().begin()->get_tensor();
const std::string& input_name = input_tensor.get_name();
const std::string& output_name = output_tensor.get_name();
const cldnn::layout input_layout = IntelGPULayout::create_cldnn_layout(
input_tensor.get_element_type(), op->get_inputs().begin()->get_shape());
const cldnn::reorder op_reorder(output_name, input_name, input_layout);
topology.add(op_reorder);
}
else if ("Add" == op->description())
{
do_eltwise_operation(topology, op, cldnn::eltwise_mode::sum);
}
else if ("Multiply" == op->description())
{
do_eltwise_operation(topology, op, cldnn::eltwise_mode::prod);
}
else
{
ostringstream os;
os << "Unsupported operation \"" << op->description() << '\"';
throw std::invalid_argument(os.str());
}
}
instance.ocl_network = std::make_shared<cldnn::network>(*ocl_engine, topology);
return true;
}
bool runtime::intelgpu::IntelGPUBackend::call(
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
using namespace std;
using namespace ngraph;
runtime::intelgpu::IntelGPULayout::IntelGPULayout(const descriptor::TensorView& tv,
const cldnn::layout& layout)
: TensorViewLayout(tv)
, cldnn_layout(layout)
{
}
size_t runtime::intelgpu::IntelGPULayout::get_index_offset(const std::vector<size_t>& indices)
{
if (indices.size() != strides.size())
{
throw ngraph_error("Indices have incorrect rank");
}
size_t result = 0;
for (int i = 0; i < indices.size(); i++)
{
result += strides[i] + indices[i];
}
return result;
}
bool runtime::intelgpu::IntelGPULayout::
operator==(const descriptor::layout::TensorViewLayout& other) const
{
const IntelGPULayout* p_other = dynamic_cast<const IntelGPULayout*>(&other);
if (!p_other)
{
return false;
}
return (cldnn_layout == p_other->cldnn_layout);
}
cldnn::data_types
runtime::intelgpu::IntelGPULayout::get_cldnn_type(const element::Type& element_type)
{
if (element_type == ngraph::element::i8)
{
return cldnn::data_types::i8;
}
else if (element_type == ngraph::element::u8)
{
return cldnn::data_types::u8;
}
else if (element_type == ngraph::element::f32)
{
return cldnn::data_types::f32;
}
else
{
ostringstream os;
os << "IntelGPUTensorView::get_cldnn_type: Unknown type " << element_type;
throw std::invalid_argument(os.str());
}
}
cldnn::layout runtime::intelgpu::IntelGPULayout::create_cldnn_layout(
const ngraph::element::Type& element_type, const Shape& element_shape)
{
const size_t mem_size = shape_size(element_shape);
const cldnn::data_types data_type = get_cldnn_type(element_type);
const cldnn::tensor tensor(1, mem_size, 1, 1);
const cldnn::format::type format = cldnn::format::yxfb;
return cldnn::layout(data_type, format, tensor);
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <CPP/layout.hpp>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
namespace ngraph
{
namespace runtime
{
namespace intelgpu
{
class IntelGPULayout;
}
}
}
class ngraph::runtime::intelgpu::IntelGPULayout
: public ngraph::descriptor::layout::TensorViewLayout
{
public:
IntelGPULayout(const ngraph::descriptor::TensorView& tv, const cldnn::layout& layout);
~IntelGPULayout() override {}
size_t get_size() override { return cldnn_layout.get_linear_size(); }
size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const override { return strides; }
bool operator==(const TensorViewLayout& other) const override;
void set_cldnn_layout(const cldnn::layout& layout) { cldnn_layout = layout; }
cldnn::layout get_cldnn_layout() const { return cldnn_layout; }
static cldnn::data_types get_cldnn_type(const ngraph::element::Type& element_type);
static cldnn::layout create_cldnn_layout(const ngraph::element::Type& element_type,
const Shape& element_shape);
private:
Strides strides;
cldnn::layout cldnn_layout;
};
......@@ -20,8 +20,8 @@
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_tensor_view.hpp"
#include "ngraph/shape.hpp"
using namespace ngraph;
using namespace std;
......@@ -33,15 +33,10 @@ runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const ngraph::element:
: runtime::TensorView(std::make_shared<ngraph::descriptor::PrimaryTensorView>(
std::make_shared<ngraph::TensorViewType>(element_type, shape), "external"))
{
m_descriptor->set_tensor_view_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
size_t mem_size = shape_size(shape);
cldnn::data_types data_type = get_cldnn_type(element_type);
cldnn::tensor tensor(1, mem_size, 1, 1);
cldnn::format::type format = cldnn::format::yxfb;
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape);
cldnn::layout layout(data_type, format, tensor);
m_descriptor->set_tensor_view_layout(
std::make_shared<runtime::intelgpu::IntelGPULayout>(*m_descriptor, layout));
if (nullptr != memory_pointer)
{
......@@ -79,26 +74,3 @@ void runtime::intelgpu::IntelGPUTensorView::read(void* target, size_t tensor_off
const char* source = ptr.data();
memcpy(target, &source[tensor_offset], n);
}
cldnn::data_types runtime::intelgpu::IntelGPUTensorView::get_cldnn_type(
const ngraph::element::Type& element_type) const
{
if (element_type == ngraph::element::i8)
{
return cldnn::data_types::i8;
}
else if (element_type == ngraph::element::u8)
{
return cldnn::data_types::u8;
}
else if (element_type == ngraph::element::f32)
{
return cldnn::data_types::f32;
}
else
{
ostringstream os;
os << "IntelGPUTensorView::get_cldnn_type: Unknown type " << element_type;
throw std::invalid_argument(os.str());
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment