Unverified Commit 0c813cf2 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Switch some get_inputs uses to use the newer inputs (#2968)

* Switch some get_inputs uses to use the newer inputs

* Review comments
parent 513f8de6
......@@ -84,7 +84,7 @@ namespace ngraph
output_zero_point->get_element_type(),
axes,
op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);
return q_dot;
return move(q_dot);
}
}
......
......@@ -99,11 +99,11 @@ Shape op::GroupConvolution::get_weights_dimensions() const
const size_t OC = 0;
const size_t OC_IN_OUTPUT = 1;
const size_t IC = 1;
Shape weights_shape_groups{get_inputs().at(1).get_shape()};
Shape weights_shape_groups{get_input_shape(1)};
// adjust output and channel given a number of groups
weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups();
weights_shape_groups.at(IC) = get_inputs().at(0).get_shape().at(IC) / get_groups();
weights_shape_groups.at(IC) = get_input_shape(0).at(IC) / get_groups();
// push_front the number of groups
weights_shape_groups.insert(weights_shape_groups.begin(), get_groups());
return weights_shape_groups;
......
......@@ -87,7 +87,7 @@ namespace ngraph
for (auto entry : m_pattern_map)
{
// leaf label
if (entry.first->get_inputs().empty())
if (entry.first->get_input_size() == 0)
{
label_exclusions.push_back(entry.second);
}
......
......@@ -255,7 +255,7 @@ namespace ngraph
auto arg2_buffer_index = external_function->get_buffer_index(args[2].get_name());
auto arg3_buffer_index = external_function->get_buffer_index(args[3].get_name());
auto out_buffer_index = external_function->get_buffer_index(out[0].get_name());
size_t arg3_size = node->get_inputs()[3].get_tensor().size();
size_t arg3_size = node->input(3).get_tensor().size();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
......@@ -324,7 +324,7 @@ namespace ngraph
auto arg1_buffer_index = external_function->get_buffer_index(args[1].get_name());
auto arg2_buffer_index = external_function->get_buffer_index(args[2].get_name());
auto out_buffer_index = external_function->get_buffer_index(out[0].get_name());
size_t arg2_size = node->get_inputs()[2].get_tensor().size();
size_t arg2_size = node->input(2).get_tensor().size();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
......
......@@ -327,7 +327,7 @@ namespace ngraph
auto arg5_buffer_index =
external_function->get_buffer_index(args[5].get_name());
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
size_t arg3_size = node->get_inputs()[3].get_tensor().size();
size_t arg3_size = node->input(3).get_tensor().size();
auto scales_size = shape_size(args[4].get_shape());
auto sum_scales_size = shape_size(args[5].get_shape());
......@@ -447,7 +447,7 @@ namespace ngraph
auto arg5_buffer_index =
external_function->get_buffer_index(args[5].get_name());
auto out0_buffer_index = external_function->get_buffer_index(out[0].get_name());
size_t arg3_size = node->get_inputs()[3].get_tensor().size();
size_t arg3_size = node->input(3).get_tensor().size();
auto scales_size = shape_size(args[4].get_shape());
auto sum_scales_size = shape_size(args[5].get_shape());
......
......@@ -31,7 +31,7 @@ static void visualize_layout_format(const Node& node, ostream& ss)
{
try
{
auto input_desc = node.get_inputs().at(0).get_tensor().get_tensor_layout();
auto input_desc = node.input(0).get_tensor().get_tensor_layout();
auto result_desc = node.get_output_tensor_ptr()->get_tensor_layout();
auto in_tvl = static_pointer_cast<runtime::cpu::LayoutDescriptor>(input_desc);
......
......@@ -94,8 +94,8 @@ void op::BatchMatMulTranspose::generate_adjoints(autodiff::Adjoints& adjoints,
{
auto delta = deltas.at(0); // NxIxK
auto arg0 = get_inputs().at(0).get_output().get_node(); // NxIxJ (maybe transposed)
auto arg1 = get_inputs().at(1).get_output().get_node(); // NxJxK (maybe transposed)
auto arg0 = input(0).get_source_output().get_node_shared_ptr(); // NxIxJ (maybe transposed)
auto arg1 = input(1).get_source_output().get_node_shared_ptr(); // NxJxK (maybe transposed)
// If arg1 is already transposed, it does not need to be transposed again
auto delta_dot_arg1 =
......
......@@ -85,10 +85,10 @@ Shape op::GroupConvolutionBias::get_weights_dimensions()
const size_t OC_IN_OUTPUT = 1;
const size_t IC = 1;
Shape weights_shape_groups{get_inputs().at(1).get_shape()};
Shape weights_shape_groups{get_input_shape(1)};
weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups();
weights_shape_groups.at(IC) = get_inputs().at(0).get_shape().at(IC) / get_groups();
weights_shape_groups.at(IC) = get_input_shape(0).at(IC) / get_groups();
// push_front the number of groups
weights_shape_groups.insert(weights_shape_groups.begin(), get_groups());
......
......@@ -534,8 +534,12 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_batch_norm_relu()
<< m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map();
auto m_bn = std::static_pointer_cast<ngraph::op::BatchNormTraining>(
m.get_match_root()->get_argument(0)->get_inputs().at(0).get_output().get_node());
auto m_bn =
std::static_pointer_cast<ngraph::op::BatchNormTraining>(m.get_match_root()
->get_argument(0)
->input(0)
.get_source_output()
.get_node_shared_ptr());
if (!mkldnn_utils::can_use_mkldnn_batchnorm_fprop(m_bn.get()))
{
......@@ -603,7 +607,7 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_batch_norm_relu_global_sta
auto pattern_map = m.get_pattern_map();
auto bn_match = m.get_match_root()->get_inputs().at(0).get_output().get_node();
auto bn_match = m.get_match_root()->input(0).get_source_output().get_node_shared_ptr();
if (bn_match->get_users().size() > 1)
{
NGRAPH_DEBUG << "Relu isn't the only user of BatchNorm's output";
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment