Commit fff591a3 authored by Sergey Shalnov's avatar Sergey Shalnov Committed by Scott Cyphers

IntelGPU backend: Code refactoring due to Node functions deprecated (#2734)

parent 62e2ae18
......@@ -351,6 +351,11 @@ descriptor::Tensor& Node::get_output_tensor(size_t i) const
return m_outputs.at(i).get_tensor();
}
const string& Node::get_output_tensor_name(size_t i) const
{
return m_outputs.at(i).get_tensor().get_name();
}
descriptor::Tensor& Node::get_output_tensor() const
{
if (get_output_size() != 1)
......@@ -380,6 +385,11 @@ const PartialShape& Node::get_input_partial_shape(size_t i) const
return m_inputs.at(i).get_partial_shape();
}
const string& Node::get_input_tensor_name(size_t i) const
{
return m_inputs.at(i).get_tensor().get_name();
}
bool Node::has_same_type(std::shared_ptr<const Node> node) const
{
if (get_output_size() != node->get_output_size())
......
......@@ -236,6 +236,9 @@ namespace ngraph
/// Returns the tensor for output i
descriptor::Tensor& get_output_tensor(size_t i) const;
/// Returns the tensor name for output i
const std::string& get_output_tensor_name(size_t i) const;
/// Checks that there is exactly one output and returns its tensor.
descriptor::Tensor& get_output_tensor() const;
......@@ -260,6 +263,9 @@ namespace ngraph
/// Returns the partial shape of input i
const PartialShape& get_input_partial_shape(size_t i) const;
/// Returns the tensor name for input i
const std::string& get_input_tensor_name(size_t i) const;
std::unordered_set<descriptor::Tensor*> liveness_new_list;
std::unordered_set<descriptor::Tensor*> liveness_free_list;
......
......@@ -142,36 +142,6 @@ static void arguments_check(const shared_ptr<Node>& op, size_t input, size_t out
}
}
static const string& get_input_name(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_inputs().at(num).get_tensor().get_name();
}
static const string& get_output_name(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_outputs().at(num).get_tensor().get_name();
}
static const Shape& get_input_shape(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_inputs().at(num).get_shape();
}
static const Shape& get_output_shape(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_outputs().at(num).get_shape();
}
static const element::Type& get_input_type(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_inputs().at(num).get_tensor().get_element_type();
}
static const element::Type& get_output_type(const shared_ptr<Node>& op, size_t num = 0)
{
return op->get_outputs().at(num).get_tensor().get_element_type();
}
static void do_eltwise_operation(cldnn::topology& topology,
const shared_ptr<Node>& op,
const string& custom_op,
......@@ -180,25 +150,28 @@ static void do_eltwise_operation(cldnn::topology& topology,
{
arguments_check(op, 2, 1);
if (get_input_type(op) != element::f32 || get_input_type(op, 1) != element::f32 ||
get_output_type(op) != element::f32)
if (op->get_input_element_type(0) != element::f32 ||
op->get_input_element_type(1) != element::f32 ||
op->get_output_element_type(0) != element::f32)
{
runtime::intelgpu::do_eltwise_kernel(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
custom_op,
function_operation);
}
else
{
const cldnn::eltwise op_eltwise(
get_output_name(op), {get_input_name(op, 0), get_input_name(op, 1)}, mode);
op->get_output_tensor_name(0),
{op->get_input_tensor_name(0), op->get_input_tensor_name(1)},
mode);
topology.add(op_eltwise);
}
}
......@@ -210,7 +183,8 @@ static void do_cldnn_unary(cldnn::topology& topology,
{
arguments_check(op, 1, 1);
const cldnn::activation cldnn_unary(get_output_name(op), get_input_name(op), mode, param);
const cldnn::activation cldnn_unary(
op->get_output_tensor_name(0), op->get_input_tensor_name(0), mode, param);
topology.add(cldnn_unary);
}
......@@ -220,12 +194,12 @@ static void
arguments_check(op, 1, 1);
runtime::intelgpu::do_custom_unary_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
operation);
}
......@@ -238,7 +212,7 @@ static void do_universal_unary(cldnn::topology& topology,
{
arguments_check(op, 1, 1);
if (force_custom || (get_input_type(op) != element::f32))
if (force_custom || (op->get_input_element_type(0) != element::f32))
{
do_custom_unary(topology, op, operation);
}
......@@ -257,13 +231,18 @@ static void do_pooling_operation(cldnn::topology& topology,
{
arguments_check(op, 1, 1);
const cldnn::tensor output_size = intelgpu_space::create_cldnn_tensor(get_output_shape(op));
const cldnn::tensor output_size = intelgpu_space::create_cldnn_tensor(op->get_output_shape(0));
const cldnn::tensor input_offset = intelgpu_space::create_cldnn_offset(pad_below);
const cldnn::tensor size = intelgpu_space::create_cldnn_tensor(pool_shape);
const cldnn::tensor stride = intelgpu_space::create_cldnn_tensor(pool_strides);
const cldnn::pooling cldnn_pooling(
get_output_name(op), get_input_name(op), mode, size, stride, input_offset, output_size);
const cldnn::pooling cldnn_pooling(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
mode,
size,
stride,
input_offset,
output_size);
topology.add(cldnn_pooling);
}
......@@ -274,14 +253,14 @@ static void do_logical_operation(cldnn::topology& topology,
arguments_check(op, 2, 1);
runtime::intelgpu::do_logic_kernel(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
operation);
}
......@@ -473,7 +452,7 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 1, 1);
func_output_names.insert(get_input_name(op));
func_output_names.insert(op->get_input_tensor_name(0));
break;
}
case OP_TYPEID::GetOutputElement:
......@@ -486,7 +465,8 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::GetOutputElement> elem =
static_pointer_cast<op::GetOutputElement>(op);
do_equal_propagation(topology, get_input_name(op, elem->get_n()), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(elem->get_n()), op->get_output_tensor_name(0));
break;
}
case OP_TYPEID::Slice:
......@@ -498,19 +478,20 @@ shared_ptr<runtime::Executable>
const Coordinate& upper_bounds = elem->get_upper_bounds();
const Strides& strides = elem->get_strides();
if (get_input_shape(op).empty() || get_output_shape(op).empty() ||
if (op->get_input_shape(0).empty() || op->get_output_shape(0).empty() ||
lower_bounds.empty() || upper_bounds.empty() || strides.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_slice_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
lower_bounds,
upper_bounds,
strides);
......@@ -521,25 +502,25 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 3, 1);
if (get_output_type(op) != element::f32)
if (op->get_output_element_type(0) != element::f32)
{
do_select_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_name(op, 2),
get_input_shape(op, 2),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_input_tensor_name(2),
op->get_input_shape(2),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0));
}
else
{
const cldnn::select cldnn_select(get_output_name(op),
get_input_name(op, 1),
get_input_name(op, 2),
get_input_name(op));
const cldnn::select cldnn_select(op->get_output_tensor_name(0),
op->get_input_tensor_name(1),
op->get_input_tensor_name(2),
op->get_input_tensor_name(0));
topology.add(cldnn_select);
}
break;
......@@ -553,17 +534,18 @@ shared_ptr<runtime::Executable>
if (reversed_axes.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_reverse_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
reversed_axes);
}
break;
......@@ -578,15 +560,15 @@ shared_ptr<runtime::Executable>
const size_t seq_axis = revseq_op->get_sequence_axis();
do_reverse_sequence_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_type(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_input_element_type(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
seq_axis,
batch_axis);
break;
......@@ -595,19 +577,20 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 1, 1);
if (get_input_type(op) == get_output_type(op))
if (op->get_input_element_type(0) == op->get_output_element_type(0))
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_convert_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0));
}
break;
}
......@@ -621,8 +604,9 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::Concat> concat_op = static_pointer_cast<op::Concat>(op);
const size_t ngraph_concat_axis = concat_op->get_concatenation_axis();
if (!shape_size(get_output_shape(op)) || (get_input_type(op) != element::f32) ||
get_output_shape(op).size() > 4)
if (!shape_size(op->get_output_shape(0)) ||
(op->get_input_element_type(0) != element::f32) ||
op->get_output_shape(0).size() > 4)
{
vector<string> input_names;
vector<Shape> input_shapes;
......@@ -639,16 +623,17 @@ shared_ptr<runtime::Executable>
if (input_names.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_concat_operation(topology,
input_names,
input_shapes,
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
ngraph_concat_axis);
}
}
......@@ -657,7 +642,7 @@ shared_ptr<runtime::Executable>
// All input shapes must be the same
// if shape is empty (means Shape{}) in this case treat its size as 1
const size_t ngraph_tensor_dims =
get_input_shape(op).empty() ? 1 : get_input_shape(op).size();
op->get_input_shape(0).empty() ? 1 : op->get_input_shape(0).size();
vector<cldnn::primitive_id> inputs;
cldnn::concatenation::concatenation_axis cldnn_axis =
......@@ -674,12 +659,13 @@ shared_ptr<runtime::Executable>
if (inputs.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
const cldnn::concatenation cldnn_concat(
get_output_name(op), inputs, cldnn_axis);
op->get_output_tensor_name(0), inputs, cldnn_axis);
topology.add(cldnn_concat);
}
}
......@@ -692,20 +678,20 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::Softmax> softmax_op = static_pointer_cast<op::Softmax>(op);
const AxisSet& axes = softmax_op->get_axes();
const size_t axes_size = axes.size();
const size_t shape_dim_count = get_input_shape(op, 0).size();
const size_t shape_dim_count = op->get_input_shape(0).size();
// clDNN has limited support for Softmax operation
// following are the checks to go with custom kernel
if ((shape_dim_count > 3) || ((shape_dim_count == 3) && (axes_size == 2)) ||
(get_input_type(op) != element::f32))
(op->get_input_element_type(0) != element::f32))
{
do_softmax_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axes);
}
else
......@@ -726,7 +712,7 @@ shared_ptr<runtime::Executable>
}
const cldnn::softmax cldnn_softmax(
get_output_name(op), get_input_name(op), dimension);
op->get_output_tensor_name(0), op->get_input_tensor_name(0), dimension);
topology.add(cldnn_softmax);
}
break;
......@@ -773,12 +759,12 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::Constant> constant_inst = static_pointer_cast<op::Constant>(op);
void* memory_pointer = const_cast<void*>(constant_inst->get_data_ptr());
const cldnn::layout layout =
IntelGPULayout::create_cldnn_layout(get_output_type(op), get_output_shape(op));
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(
op->get_output_element_type(0), op->get_output_shape(0));
const cldnn::memory mem(
cldnn::memory::attach<void>(layout, memory_pointer, layout.bytes_count()));
const cldnn::data op_const(get_output_name(op), mem);
const cldnn::data op_const(op->get_output_tensor_name(0), mem);
topology.add(op_const);
break;
}
......@@ -788,14 +774,16 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::Dot> dot_inst = static_pointer_cast<op::Dot>(op);
const size_t axes_count = dot_inst->get_reduction_axes_count();
const Shape& input0_shape = get_input_shape(op, 0);
const Shape& input1_shape = get_input_shape(op, 1);
const Shape& input0_shape = op->get_input_shape(0);
const Shape& input1_shape = op->get_input_shape(1);
const size_t input0_elem_count = shape_size(input0_shape);
const size_t input1_elem_count = shape_size(input1_shape);
if (get_input_type(op) == element::f32 && get_input_type(op, 1) == element::f32 &&
get_output_type(op) == element::f32 && input0_elem_count && input1_elem_count &&
(axes_count < 2) && (input0_shape.size() < 3) && (input1_shape.size() < 3))
if (op->get_input_element_type(0) == element::f32 &&
op->get_input_element_type(1) == element::f32 &&
op->get_output_element_type(0) == element::f32 && input0_elem_count &&
input1_elem_count && (axes_count < 2) && (input0_shape.size() < 3) &&
(input1_shape.size() < 3))
{
bool transpose0 = false;
bool transpose1 = false;
......@@ -814,9 +802,9 @@ shared_ptr<runtime::Executable>
transpose1 = true;
}
const cldnn::gemm dot_op(get_output_name(op),
get_input_name(op, 0),
get_input_name(op, 1),
const cldnn::gemm dot_op(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
op->get_input_tensor_name(1),
transpose0,
transpose1);
topology.add(dot_op);
......@@ -824,13 +812,13 @@ shared_ptr<runtime::Executable>
else
{
do_dot_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axes_count);
}
break;
......@@ -841,7 +829,8 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::MaxPool> max_pool = static_pointer_cast<op::MaxPool>(op);
if ((get_input_shape(op).size() > 4) || (get_output_type(op) != element::f32) ||
if ((op->get_input_shape(0).size() > 4) ||
(op->get_output_element_type(0) != element::f32) ||
!max_pool->get_padding_below().empty() || !max_pool->get_padding_above().empty())
{
const shared_ptr<Node> def_val = max_pool->get_default_value();
......@@ -850,11 +839,11 @@ shared_ptr<runtime::Executable>
const vector<std::string>& values = def_const->get_value_strings();
do_max_avg_pool_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
max_pool->get_window_shape(),
max_pool->get_window_movement_strides(),
max_pool->get_padding_below(),
......@@ -888,13 +877,13 @@ shared_ptr<runtime::Executable>
static_pointer_cast<op::MaxPoolBackprop>(op);
do_max_pool_backprop_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
max_pool_b->get_window_shape(),
max_pool_b->get_window_movement_strides(),
max_pool_b->get_padding_below());
......@@ -906,7 +895,8 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::AvgPool> avg_pool = static_pointer_cast<op::AvgPool>(op);
if ((get_input_shape(op).size() > 4) || (get_output_type(op) != element::f32) ||
if ((op->get_input_shape(0).size() > 4) ||
(op->get_output_element_type(0) != element::f32) ||
avg_pool->get_include_padding_in_avg_computation() ||
!avg_pool->get_padding_below().empty() || !avg_pool->get_padding_above().empty())
{
......@@ -916,11 +906,11 @@ shared_ptr<runtime::Executable>
const vector<std::string>& values = def_const->get_value_strings();
do_max_avg_pool_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
avg_pool->get_window_shape(),
avg_pool->get_window_movement_strides(),
avg_pool->get_padding_below(),
......@@ -951,11 +941,11 @@ shared_ptr<runtime::Executable>
static_pointer_cast<op::AvgPoolBackprop>(op);
do_avg_pool_backprop_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
avg_pool_b->get_window_shape(),
avg_pool_b->get_window_movement_strides(),
avg_pool_b->get_padding_below(),
......@@ -971,12 +961,15 @@ shared_ptr<runtime::Executable>
if (axis.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else if ((get_output_shape(op).size() <= 4) && (shape_size(get_output_shape(op)) > 0) &&
((get_input_type(op) == element::f32) || (get_input_type(op) == element::i32)))
else if ((op->get_output_shape(0).size() <= 4) &&
(shape_size(op->get_output_shape(0)) > 0) &&
((op->get_input_element_type(0) == element::f32) ||
(op->get_input_element_type(0) == element::i32)))
{
const size_t shift = 4 - get_output_shape(op).size();
const size_t shift = 4 - op->get_output_shape(0).size();
vector<uint16_t> fixed_b_axes;
for (uint16_t i = 0; i < shift; ++i)
......@@ -990,21 +983,23 @@ shared_ptr<runtime::Executable>
}
const cldnn::tensor output_tensor_size =
intelgpu_space::create_cldnn_tensor(get_output_shape(op));
intelgpu_space::create_cldnn_tensor(op->get_output_shape(0));
const cldnn::broadcast cldnn_broadcast(
get_output_name(op), get_input_name(op), output_tensor_size, fixed_b_axes);
const cldnn::broadcast cldnn_broadcast(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
output_tensor_size,
fixed_b_axes);
topology.add(cldnn_broadcast);
}
else
{
do_bcast_sum_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
true);
}
......@@ -1019,17 +1014,18 @@ shared_ptr<runtime::Executable>
if (axis.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_bcast_sum_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
false);
}
......@@ -1044,16 +1040,17 @@ shared_ptr<runtime::Executable>
if (axis.empty())
{
do_equal_propagation(topology, get_input_name(op), get_output_name(op));
do_equal_propagation(
topology, op->get_input_tensor_name(0), op->get_output_tensor_name(0));
}
else
{
do_product_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis);
}
break;
......@@ -1065,16 +1062,16 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::Reshape> op_reshape = static_pointer_cast<op::Reshape>(op);
const AxisVector& reshape_axes = op_reshape->get_input_order();
if ((get_input_type(op) != element::f32) || (get_input_shape(op).size() > 4) ||
(get_output_shape(op).size() > 4))
if ((op->get_input_element_type(0) != element::f32) ||
(op->get_input_shape(0).size() > 4) || (op->get_output_shape(0).size() > 4))
{
do_reshape_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
reshape_axes);
}
else
......@@ -1097,15 +1094,15 @@ shared_ptr<runtime::Executable>
}
const cldnn::permute cldnn_permute(
get_output_name(op), get_input_name(op), permute_order);
op->get_output_tensor_name(0), op->get_input_tensor_name(0), permute_order);
topology.add(cldnn_permute);
}
else
{
const cldnn::tensor new_shape =
intelgpu_space::create_cldnn_tensor(get_output_shape(op));
intelgpu_space::create_cldnn_tensor(op->get_output_shape(0));
const cldnn::reshape reshape_op(
get_output_name(op), get_input_name(op), new_shape);
op->get_output_tensor_name(0), op->get_input_tensor_name(0), new_shape);
topology.add(reshape_op);
}
}
......@@ -1123,11 +1120,11 @@ shared_ptr<runtime::Executable>
// Empty axis is not a case for do_equal_propagation()
do_all_any_op(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
"lhs && rhs",
values.at(0));
......@@ -1145,11 +1142,11 @@ shared_ptr<runtime::Executable>
// Empty axis is not a case for do_equal_propagation()
do_all_any_op(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
"lhs || rhs",
values.at(0));
......@@ -1159,25 +1156,27 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 2, 1);
if (get_input_type(op) != element::f32 || get_input_type(op, 1) != element::f32 ||
get_output_type(op) != element::f32 || get_output_shape(op).size() > 4)
if (op->get_input_element_type(0) != element::f32 ||
op->get_input_element_type(1) != element::f32 ||
op->get_output_element_type(0) != element::f32 ||
op->get_output_shape(0).size() > 4)
{
do_relu_backprop(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0));
}
else
{
const cldnn_activation_additional_params& param = {0.f, 0.f};
const cldnn::activation_grad cldnn_activ_grad(get_output_name(op),
get_input_name(op, 1),
get_input_name(op, 0),
const cldnn::activation_grad cldnn_activ_grad(op->get_output_tensor_name(0),
op->get_input_tensor_name(1),
op->get_input_tensor_name(0),
activation_grad_relu,
param);
topology.add(cldnn_activ_grad);
......@@ -1248,7 +1247,7 @@ shared_ptr<runtime::Executable>
}
case OP_TYPEID::Relu:
{
const string output_type_name = get_opencl_type_name(get_output_type(op));
const string output_type_name = get_opencl_type_name(op->get_output_element_type(0));
const string convert_to_type = "convert_" + output_type_name;
const string zero_const = convert_to_type + "(0)";
......@@ -1260,7 +1259,8 @@ shared_ptr<runtime::Executable>
}
case OP_TYPEID::Sigmoid:
{
const string one_const = "convert_" + get_opencl_type_name(get_output_type(op)) + "(1)";
const string one_const =
"convert_" + get_opencl_type_name(op->get_output_element_type(0)) + "(1)";
do_universal_unary(topology,
op,
one_const + " / (" + one_const + " + exp(-input_var))",
......@@ -1297,13 +1297,13 @@ shared_ptr<runtime::Executable>
arguments_check(op, 2, 1);
do_sigmoid_backprop_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0));
break;
}
case OP_TYPEID::Not:
......@@ -1311,11 +1311,11 @@ shared_ptr<runtime::Executable>
arguments_check(op, 1, 1);
do_not_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op));
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0));
break;
}
case OP_TYPEID::Greater:
......@@ -1366,12 +1366,12 @@ shared_ptr<runtime::Executable>
const CoordinateDiff& pad_below = pad->get_padding_below();
do_pad_operation(topology,
get_input_name(op, 0),
get_input_shape(op),
get_input_name(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
pad_below);
break;
}
......@@ -1384,35 +1384,35 @@ shared_ptr<runtime::Executable>
const double eps = batch_norm->get_eps_value();
do_create_mean(topology,
get_output_name(op, 2), // d_beta
get_output_type(op, 2),
get_input_name(op, 5), // delta
get_input_shape(op, 5),
op->get_output_tensor_name(2), // d_beta
op->get_output_element_type(2),
op->get_input_tensor_name(5), // delta
op->get_input_shape(5),
true);
do_create_variance_back(topology,
get_output_name(op, 1), // d_gamma
get_output_type(op, 1),
op->get_output_tensor_name(1), // d_gamma
op->get_output_element_type(1),
eps,
get_input_name(op, 2), // input
get_input_shape(op, 2),
get_input_name(op, 3), // gamma
get_input_name(op, 4), // beta
get_input_name(op, 5)); // delta
op->get_input_tensor_name(2), // input
op->get_input_shape(2),
op->get_input_tensor_name(3), // gamma
op->get_input_tensor_name(4), // beta
op->get_input_tensor_name(5)); // delta
do_batch_norm_backprop_operation(topology,
get_input_shape(op, 2),
get_input_type(op, 2),
get_input_name(op, 0),
get_input_name(op, 1),
get_input_name(op, 2),
get_input_name(op, 3),
get_input_name(op, 4),
get_input_name(op, 5),
op->get_input_shape(2),
op->get_input_element_type(2),
op->get_input_tensor_name(0),
op->get_input_tensor_name(1),
op->get_input_tensor_name(2),
op->get_input_tensor_name(3),
op->get_input_tensor_name(4),
op->get_input_tensor_name(5),
eps,
get_output_name(op, 0),
get_output_name(op, 1),
get_output_name(op, 2));
op->get_output_tensor_name(0),
op->get_output_tensor_name(1),
op->get_output_tensor_name(2));
break;
}
case OP_TYPEID::BatchNormInference:
......@@ -1427,10 +1427,10 @@ shared_ptr<runtime::Executable>
// Should be removed after fix in clDNN.
// Drop 14.0 of clDNN contains this bug.
bool proceed_with_custom_kernel = false;
const string& gamma = get_input_name(op, 0);
const string& beta = get_input_name(op, 1);
const string& mean = get_input_name(op, 3);
const string& variance = get_input_name(op, 4);
const string& gamma = op->get_input_tensor_name(0);
const string& beta = op->get_input_tensor_name(1);
const string& mean = op->get_input_tensor_name(3);
const string& variance = op->get_input_tensor_name(4);
if ((gamma == beta) || (gamma == mean) || (gamma == variance) || (beta == mean) ||
(beta == variance) || (mean == variance))
......@@ -1438,28 +1438,28 @@ shared_ptr<runtime::Executable>
proceed_with_custom_kernel = true;
}
if (proceed_with_custom_kernel || (get_input_shape(op, 2).size() != 4) ||
(get_input_type(op) != ngraph::element::f32))
if (proceed_with_custom_kernel || (op->get_input_shape(2).size() != 4) ||
(op->get_input_element_type(0) != ngraph::element::f32))
{
do_batch_norm_operation(topology,
get_output_name(op),
get_output_type(op),
op->get_output_tensor_name(0),
op->get_output_element_type(0),
eps,
get_input_name(op, 2),
get_input_shape(op, 2),
get_input_name(op, 0),
get_input_name(op, 1),
get_input_name(op, 3),
get_input_name(op, 4));
op->get_input_tensor_name(2),
op->get_input_shape(2),
op->get_input_tensor_name(0),
op->get_input_tensor_name(1),
op->get_input_tensor_name(3),
op->get_input_tensor_name(4));
}
else
{
const cldnn::batch_norm batchnorm(get_output_name(op),
get_input_name(op, 2), // input
get_input_name(op, 3), // mean
get_input_name(op, 4), // variance
get_input_name(op, 0), // gamma
get_input_name(op, 1), // beta
const cldnn::batch_norm batchnorm(op->get_output_tensor_name(0),
op->get_input_tensor_name(2), // input
op->get_input_tensor_name(3), // mean
op->get_input_tensor_name(4), // variance
op->get_input_tensor_name(0), // gamma
op->get_input_tensor_name(1), // beta
eps); // epsilon (float)
topology.add(batchnorm);
}
......@@ -1471,8 +1471,8 @@ shared_ptr<runtime::Executable>
static_pointer_cast<op::BatchNormTraining>(op);
const double eps = bnorm->get_eps_value();
if ((get_input_shape(op, 2).size() != 4) ||
(get_input_type(op) != ngraph::element::f32))
if ((op->get_input_shape(2).size() != 4) ||
(op->get_input_element_type(0) != ngraph::element::f32))
{
string mean_name;
string variance_name;
......@@ -1486,21 +1486,21 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 3, 3);
mean_name = get_output_name(op, 1);
variance_name = get_output_name(op, 2);
mean_name = op->get_output_tensor_name(1);
variance_name = op->get_output_tensor_name(2);
do_create_mean(topology,
mean_name,
get_output_type(op),
get_input_name(op, 2),
get_input_shape(op, 2),
op->get_output_element_type(0),
op->get_input_tensor_name(2),
op->get_input_shape(2),
false);
do_create_variance(topology,
variance_name,
get_output_type(op),
get_input_name(op, 2),
get_input_shape(op, 2),
op->get_output_element_type(0),
op->get_input_tensor_name(2),
op->get_input_shape(2),
mean_name);
}
......@@ -1510,18 +1510,18 @@ shared_ptr<runtime::Executable>
{
arguments_check(op, 5, 1);
mean_name = get_input_name(op, 3);
variance_name = get_input_name(op, 4);
mean_name = op->get_input_tensor_name(3);
variance_name = op->get_input_tensor_name(4);
}
do_batch_norm_operation(topology,
get_output_name(op),
get_output_type(op),
op->get_output_tensor_name(0),
op->get_output_element_type(0),
eps,
get_input_name(op, 2),
get_input_shape(op, 2),
get_input_name(op, 0),
get_input_name(op, 1),
op->get_input_tensor_name(2),
op->get_input_shape(2),
op->get_input_tensor_name(0),
op->get_input_tensor_name(1),
mean_name,
variance_name);
}
......@@ -1534,23 +1534,23 @@ shared_ptr<runtime::Executable>
{
if (op->get_inputs().size() == 5 && op->get_outputs().size() == 1)
{
const cldnn::batch_norm batchnorm(get_output_name(op),
get_input_name(op, 2), // input
get_input_name(op, 3), // mean
get_input_name(op, 4), // variance
get_input_name(op, 0), // gamma
get_input_name(op, 1), // beta
const cldnn::batch_norm batchnorm(op->get_output_tensor_name(0),
op->get_input_tensor_name(2), // input
op->get_input_tensor_name(3), // mean
op->get_input_tensor_name(4), // variance
op->get_input_tensor_name(0), // gamma
op->get_input_tensor_name(1), // beta
eps); // epsilon (float)
topology.add(batchnorm);
}
else if (op->get_inputs().size() == 3 && op->get_outputs().size() == 3)
{
const string mean_name = get_output_name(op, 1);
const string variance_name = get_output_name(op, 2);
const string mean_name = op->get_output_tensor_name(1);
const string variance_name = op->get_output_tensor_name(2);
// Create a memory for mean as mutable_data to treat it as constant
const cldnn::layout mean_layout = IntelGPULayout::create_cldnn_layout(
get_output_type(op, 1), get_output_shape(op, 1));
op->get_output_element_type(1), op->get_output_shape(1));
const cldnn::memory mean_mem(
cldnn::memory::allocate(*cldnn_engine, mean_layout));
......@@ -1559,25 +1559,25 @@ shared_ptr<runtime::Executable>
// Create a memory for variance as mutable_data to treat it as constant
const cldnn::layout variance_layout = IntelGPULayout::create_cldnn_layout(
get_output_type(op, 2), get_output_shape(op, 2));
op->get_output_element_type(2), op->get_output_shape(2));
const cldnn::memory variance_mem(
cldnn::memory::allocate(*cldnn_engine, variance_layout));
const cldnn::mutable_data variance_const(variance_name, variance_mem);
topology.add(variance_const);
const cldnn::batch_norm batchnorm(get_output_name(op),
get_input_name(op, 2), // input
const cldnn::batch_norm batchnorm(op->get_output_tensor_name(0),
op->get_input_tensor_name(2), // input
eps, // epsilon (float)
mean_name,
variance_name,
get_input_name(op, 0), // gamma
get_input_name(op, 1)); // beta
op->get_input_tensor_name(0), // gamma
op->get_input_tensor_name(1)); // beta
topology.add(batchnorm);
// Need to mark this operation as "output" to keep mean and variance
// in cldnn::network
func_output_names.insert(get_output_name(op));
func_output_names.insert(op->get_output_tensor_name(0));
}
else
{
......@@ -1602,16 +1602,16 @@ shared_ptr<runtime::Executable>
if ((win_stride.size() > 2) || (pad_below.size() > 2) || (pad_above.size() > 2) ||
(win_dilation.size() > 2) || (data_dilation.size() > 2) ||
(data_dilation.at(0) != 1) || (data_dilation.at(1) != 1) ||
(get_output_type(op) != element::f32))
(op->get_output_element_type(0) != element::f32))
{
do_convolution_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
conv_op->get_padding_below(),
conv_op->get_window_movement_strides(),
conv_op->get_window_dilation_strides(),
......@@ -1628,7 +1628,7 @@ shared_ptr<runtime::Executable>
{
cldnn::tensor::value_type input_offset_x = -pad_below.at(1);
cldnn::tensor::value_type input_offset_y = -pad_below.at(0);
std::string op_input_name = get_input_name(op, 0);
std::string op_input_name = op->get_input_tensor_name(0);
if ((pad_below.at(0) != pad_above.at(0)) || (pad_below.at(1) != pad_above.at(1)))
{
......@@ -1637,10 +1637,11 @@ shared_ptr<runtime::Executable>
const cldnn::tensor border_pad_below(0, 0, pad_above.at(1), pad_above.at(0), 0);
input_offset_x = 0;
input_offset_y = 0;
op_input_name = op_input_name + "_" + get_output_name(op) + "_bordered";
op_input_name =
op_input_name + "_" + op->get_output_tensor_name(0) + "_bordered";
const cldnn::border cldnn_border(op_input_name,
get_input_name(op, 0),
op->get_input_tensor_name(0),
border_pad_above,
border_pad_below,
cldnn::border_type::zero);
......@@ -1651,9 +1652,9 @@ shared_ptr<runtime::Executable>
const cldnn::tensor strides(1, 1, win_stride.at(1), win_stride.at(0));
const cldnn::tensor dilation(1, 1, win_dilation.at(1), win_dilation.at(0));
const cldnn::convolution cldnn_conv(get_output_name(op),
const cldnn::convolution cldnn_conv(op->get_output_tensor_name(0),
op_input_name,
{get_input_name(op, 1)},
{op->get_input_tensor_name(1)},
strides,
input_offset,
dilation);
......@@ -1677,16 +1678,16 @@ shared_ptr<runtime::Executable>
if ((win_stride.size() > 2) || (win_stride.at(0) != 1) || (win_stride.at(1) != 1) ||
(pad_below.size() > 2) || (pad_above.size() > 2) || (data_dilation.size() > 2) ||
(data_dilation.at(0) != 1) || (data_dilation.at(1) != 1) ||
(win_dilation.size() > 2) || (get_output_type(op) != element::f32))
(win_dilation.size() > 2) || (op->get_output_element_type(0) != element::f32))
{
do_convolution_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
conv_op->get_padding_below_forward(),
win_stride,
win_dilation,
......@@ -1716,13 +1717,13 @@ shared_ptr<runtime::Executable>
cldnn::tensor::value_type input_offset_x = -pad_above_x;
cldnn::tensor::value_type input_offset_y = -pad_above_y;
std::string op_input_name = get_input_name(op, 0);
std::string op_input_name = op->get_input_tensor_name(0);
string filter_name = get_output_name(op) + "_filter_output";
string filter_name = op->get_output_tensor_name(0) + "_filter_output";
// Create a memory for filter as mutable_data to treat it as constant
const cldnn::layout filter_layout =
IntelGPULayout::create_cldnn_layout(get_output_type(op), get_output_shape(op));
const cldnn::layout filter_layout = IntelGPULayout::create_cldnn_layout(
op->get_output_element_type(0), op->get_output_shape(0));
const cldnn::memory filter_mem(
cldnn::memory::allocate(*cldnn_engine, filter_layout));
......@@ -1737,9 +1738,10 @@ shared_ptr<runtime::Executable>
const cldnn::tensor border_pad_below(0, 0, pad_below_x, pad_below_y, 0);
input_offset_x = 0;
input_offset_y = 0;
op_input_name = op_input_name + "_" + get_output_name(op) + "_bordered";
op_input_name =
op_input_name + "_" + op->get_output_tensor_name(0) + "_bordered";
const cldnn::border cldnn_border(op_input_name,
get_input_name(op, 0),
op->get_input_tensor_name(0),
border_pad_above,
border_pad_below,
cldnn::border_type::zero);
......@@ -1749,8 +1751,8 @@ shared_ptr<runtime::Executable>
const cldnn::tensor input_offset(0, 0, input_offset_x, input_offset_y, 0);
const cldnn::tensor strides(1, 1, win_dilation.at(1), win_dilation.at(0));
const cldnn::convolution_grad_weights conv_back_flt(get_output_name(op),
get_input_name(op, 1),
const cldnn::convolution_grad_weights conv_back_flt(op->get_output_tensor_name(0),
op->get_input_tensor_name(1),
op_input_name,
{filter_name},
strides,
......@@ -1777,17 +1779,17 @@ shared_ptr<runtime::Executable>
(pad_below.size() > 2) || (pad_above.size() > 2) || (data_dilation.size() > 2) ||
(data_dilation.at(0) != 1) || (data_dilation.at(1) != 1) ||
(win_dilation.size() > 2) || (win_dilation.at(0) != 1) ||
(win_dilation.at(1) != 1) || (get_output_type(op) != element::f32) ||
(win_dilation.at(1) != 1) || (op->get_output_element_type(0) != element::f32) ||
((pad_below.at(0) == pad_above.at(0)) && (pad_below.at(1) == pad_above.at(1))))
{
do_convolution_operation(topology,
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_name(op, 0),
get_input_shape(op, 0),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
pad_below,
win_stride,
win_dilation,
......@@ -1803,7 +1805,7 @@ shared_ptr<runtime::Executable>
else
{
cldnn::tensor::value_type input_offset_xy = -1;
std::string op_input_name = get_input_name(op, 1);
std::string op_input_name = op->get_input_tensor_name(1);
if ((pad_below.at(0) == pad_above.at(0)) && (pad_below.at(1) == pad_above.at(1)))
{
......@@ -1816,10 +1818,11 @@ shared_ptr<runtime::Executable>
// Different input padding for operation workarounded by adding aux layer
const cldnn::tensor crop_pad_below(0, 0, -pad_below.at(1), -pad_below.at(0), 0);
const cldnn::tensor crop_pad_above(0, 0, -pad_above.at(1), -pad_above.at(0), 0);
op_input_name = op_input_name + "_" + get_output_name(op) + "_cropped";
op_input_name =
op_input_name + "_" + op->get_output_tensor_name(0) + "_cropped";
const cldnn::crop cldnn_crop(op_input_name,
get_input_name(op, 1),
op->get_input_tensor_name(1),
crop_pad_below,
crop_pad_above,
cldnn::crop_borders_t());
......@@ -1829,9 +1832,10 @@ shared_ptr<runtime::Executable>
const cldnn::tensor input_offset(0, 0, input_offset_xy, input_offset_xy, 0);
const cldnn::tensor strides(1, 1, win_stride.at(1), win_stride.at(0));
const cldnn::convolution_grad_input cldnn_conv_back_data(get_output_name(op),
const cldnn::convolution_grad_input cldnn_conv_back_data(
op->get_output_tensor_name(0),
op_input_name,
{get_input_name(op, 0)},
{op->get_input_tensor_name(0)},
strides,
input_offset);
topology.add(cldnn_conv_back_data);
......@@ -1846,11 +1850,11 @@ shared_ptr<runtime::Executable>
const AxisSet& axis = min_op->get_reduction_axes();
do_max_min_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
true);
break;
......@@ -1863,11 +1867,11 @@ shared_ptr<runtime::Executable>
const AxisSet& axis = max_op->get_reduction_axes();
do_max_min_operation(topology,
get_input_name(op),
get_input_shape(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axis,
false);
break;
......@@ -1880,12 +1884,12 @@ shared_ptr<runtime::Executable>
const size_t one_hot_axis = one_hot_op->get_one_hot_axis();
do_one_hot_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
one_hot_axis);
break;
}
......@@ -1900,12 +1904,12 @@ shared_ptr<runtime::Executable>
if (index_elem_type == element::i64 || index_elem_type == element::i32)
{
do_arg_max_min_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
reduction_axis,
true);
}
......@@ -1913,8 +1917,11 @@ shared_ptr<runtime::Executable>
{
cldnn::arg_max_min::axis_name axis =
reduction_axis == 0 ? cldnn::arg_max_min::y : cldnn::arg_max_min::x;
const cldnn::arg_max_min arg_max_min(
get_output_name(op), get_input_name(op), cldnn::arg_max_min::max, 1, axis);
const cldnn::arg_max_min arg_max_min(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
cldnn::arg_max_min::max,
1,
axis);
topology.add(arg_max_min);
}
break;
......@@ -1930,12 +1937,12 @@ shared_ptr<runtime::Executable>
if (index_elem_type == element::i64 || index_elem_type == element::i32)
{
do_arg_max_min_operation(topology,
get_input_name(op),
get_input_shape(op),
get_input_type(op),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
reduction_axis,
false);
}
......@@ -1943,8 +1950,11 @@ shared_ptr<runtime::Executable>
{
cldnn::arg_max_min::axis_name axis =
reduction_axis == 0 ? cldnn::arg_max_min::y : cldnn::arg_max_min::x;
const cldnn::arg_max_min arg_max_min(
get_output_name(op), get_input_name(op), cldnn::arg_max_min::min, 1, axis);
const cldnn::arg_max_min arg_max_min(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
cldnn::arg_max_min::min,
1,
axis);
topology.add(arg_max_min);
}
break;
......@@ -1958,16 +1968,16 @@ shared_ptr<runtime::Executable>
const op::Quantize::RoundMode mode = quant_op->get_round_mode();
do_quantize_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_name(op, 2),
get_input_shape(op, 2),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_input_tensor_name(2),
op->get_input_shape(2),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axes,
mode);
break;
......@@ -1980,18 +1990,18 @@ shared_ptr<runtime::Executable>
const AxisSet& axes = dequ_op->get_axes();
do_dequantize_operation(topology,
get_input_name(op, 0),
get_input_shape(op, 0),
get_input_type(op, 0),
get_input_name(op, 1),
get_input_shape(op, 1),
get_input_type(op, 1),
get_input_name(op, 2),
get_input_shape(op, 2),
get_input_type(op, 2),
get_output_name(op),
get_output_shape(op),
get_output_type(op),
op->get_input_tensor_name(0),
op->get_input_shape(0),
op->get_input_element_type(0),
op->get_input_tensor_name(1),
op->get_input_shape(1),
op->get_input_element_type(1),
op->get_input_tensor_name(2),
op->get_input_shape(2),
op->get_input_element_type(2),
op->get_output_tensor_name(0),
op->get_output_shape(0),
op->get_output_element_type(0),
axes);
break;
}
......@@ -2001,8 +2011,8 @@ shared_ptr<runtime::Executable>
const shared_ptr<op::LRN> lrn_op = static_pointer_cast<op::LRN>(op);
const cldnn::lrn lrn(get_output_name(op),
get_input_name(op),
const cldnn::lrn lrn(op->get_output_tensor_name(0),
op->get_input_tensor_name(0),
lrn_op->get_nsize(),
lrn_op->get_bias(),
lrn_op->get_alpha(),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment