Commit b466027e authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[CPU] Fix ambiguous 'op' namespace. (#2683)

parent 105f03bc
...@@ -38,7 +38,7 @@ namespace ngraph ...@@ -38,7 +38,7 @@ namespace ngraph
auto& out_tensor = external_function->get_tensor_data(out[0].get_name()); auto& out_tensor = external_function->get_tensor_data(out[0].get_name());
size_t count = out[0].get_size(); size_t count = out[0].get_size();
auto alpha = static_cast<const op::BoundedRelu*>(node)->get_alpha(); auto alpha = static_cast<const ngraph::op::BoundedRelu*>(node)->get_alpha();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node)) if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{ {
......
...@@ -38,7 +38,7 @@ namespace ngraph ...@@ -38,7 +38,7 @@ namespace ngraph
auto& out_tensor = external_function->get_tensor_data(out[0].get_name()); auto& out_tensor = external_function->get_tensor_data(out[0].get_name());
size_t count = out[0].get_size(); size_t count = out[0].get_size();
auto alpha = static_cast<const op::LeakyRelu*>(node)->get_alpha(); auto alpha = static_cast<const ngraph::op::LeakyRelu*>(node)->get_alpha();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node)) if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{ {
......
...@@ -313,7 +313,7 @@ namespace ngraph ...@@ -313,7 +313,7 @@ namespace ngraph
auto arg0_shape = args[0].get_shape(); auto arg0_shape = args[0].get_shape();
auto arg1_shape = args[1].get_shape(); auto arg1_shape = args[1].get_shape();
auto daxes = quantize->get_axes(); auto daxes = quantize->get_axes();
op::Quantize::RoundMode round_mode = quantize->get_round_mode(); ngraph::op::Quantize::RoundMode round_mode = quantize->get_round_mode();
if (args[0].get_element_type() == element::f32) if (args[0].get_element_type() == element::f32)
{ {
......
...@@ -705,7 +705,7 @@ bool runtime::cpu::mkldnn_utils::use_mkldnn_kernel(const ngraph::Node* node) ...@@ -705,7 +705,7 @@ bool runtime::cpu::mkldnn_utils::use_mkldnn_kernel(const ngraph::Node* node)
void runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(Node* node) void runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(Node* node)
{ {
auto ngraph_op = static_cast<op::Op*>(node); auto ngraph_op = static_cast<ngraph::op::Op*>(node);
auto op_annotations = std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>(); auto op_annotations = std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true); op_annotations->set_mkldnn_op(true);
ngraph_op->set_op_annotations(op_annotations); ngraph_op->set_op_annotations(op_annotations);
......
This diff is collapsed.
...@@ -64,7 +64,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr< ...@@ -64,7 +64,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr<
{ {
if (n->description() == "Concat") if (n->description() == "Concat")
{ {
auto concat = std::static_pointer_cast<op::Concat>(n); auto concat = std::static_pointer_cast<ngraph::op::Concat>(n);
auto shape = concat->get_input_shape(0); auto shape = concat->get_input_shape(0);
auto axis = concat->get_concatenation_axis(); auto axis = concat->get_concatenation_axis();
auto product = 1; auto product = 1;
...@@ -134,7 +134,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr< ...@@ -134,7 +134,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr<
{ {
if (arg->is_op()) if (arg->is_op())
{ {
auto op = std::static_pointer_cast<op::Op>(arg); auto op = std::static_pointer_cast<ngraph::op::Op>(arg);
auto annotation = op->get_op_annotations(); auto annotation = op->get_op_annotations();
if (annotation && annotation->get_in_place_oi_pairs().size() > 0) if (annotation && annotation->get_in_place_oi_pairs().size() > 0)
...@@ -177,7 +177,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr< ...@@ -177,7 +177,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr<
{ {
if (user->is_op()) if (user->is_op())
{ {
auto op = std::static_pointer_cast<op::Op>(user); auto op = std::static_pointer_cast<ngraph::op::Op>(user);
if (auto op_annotations = op->get_op_annotations()) if (auto op_annotations = op->get_op_annotations())
{ {
if (op_annotations->get_in_place_oi_pairs().size() > 0) if (op_annotations->get_in_place_oi_pairs().size() > 0)
...@@ -227,7 +227,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr< ...@@ -227,7 +227,7 @@ bool runtime::cpu::pass::CPUMemoryOptimization::run_on_function(std::shared_ptr<
{ {
if (n->description() == "Slice") if (n->description() == "Slice")
{ {
auto slice = std::static_pointer_cast<op::Slice>(n); auto slice = std::static_pointer_cast<ngraph::op::Slice>(n);
auto in_shape = slice->get_input_shape(0); auto in_shape = slice->get_input_shape(0);
auto out_shape = slice->get_output_shape(0); auto out_shape = slice->get_output_shape(0);
auto strides = slice->get_strides(); auto strides = slice->get_strides();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment