Commit 7d250919 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Re-enable -Wswitch and -Wswitch-enum (#3207)

* Re-enable -Wswitch and -Wswitch-enum

* Collapse identical switch cases

* Fix (I think) compilation errors

* Partially fix missing cases in GPU xformer, so clang will show me the rest of them...

* One last enum value
parent 824d6144
......@@ -28,8 +28,6 @@ add_compile_options(-Wno-global-constructors)
add_compile_options(-Wno-exit-time-destructors)
add_compile_options(-Wno-missing-prototypes)
add_compile_options(-Wno-missing-noreturn)
add_compile_options(-Wno-switch)
add_compile_options(-Wno-switch-enum)
add_compile_options(-Wno-covered-switch-default)
add_compile_options(-Wno-undef)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
......
......@@ -51,9 +51,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case element::Type_t::boolean:
return make_constant_from_string("0", get_element_type(), get_shape());
case element::Type_t::bf16:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape());
case element::Type_t::f16:
case element::Type_t::f32:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape());
case element::Type_t::f64:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape());
case element::Type_t::i8:
......@@ -80,6 +79,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case element::Type_t::u64:
return make_constant_from_string(
to_string(numeric_limits<uint64_t>::min()), get_element_type(), get_shape());
case element::Type_t::undefined:
case element::Type_t::dynamic:
default: throw runtime_error("Max default value not defined for type");
}
}
......@@ -51,9 +51,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case element::Type_t::boolean:
return make_constant_from_string("1", get_element_type(), get_shape());
case element::Type_t::bf16:
return make_constant_from_string("INFINITY", get_element_type(), get_shape());
case element::Type_t::f16:
case element::Type_t::f32:
return make_constant_from_string("INFINITY", get_element_type(), get_shape());
case element::Type_t::f64:
return make_constant_from_string("INFINITY", get_element_type(), get_shape());
case element::Type_t::i8:
......@@ -80,6 +79,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case element::Type_t::u64:
return make_constant_from_string(
to_string(numeric_limits<uint64_t>::max()), get_element_type(), get_shape());
case element::Type_t::undefined:
case element::Type_t::dynamic:
default: throw runtime_error("Min default value not defined for type");
}
}
......@@ -3027,6 +3027,7 @@ namespace ngraph
case ngraph::op::PadMode::REFLECT:
pad_mode_string = "ngraph::op::PadMode::REFLECT";
break;
case ngraph::op::PadMode::SYMMETRIC: throw ngraph_error("Unsupported PadMode");
}
writer << "reference::pad<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n";
......@@ -3470,6 +3471,7 @@ namespace ngraph
func_block += "d_" + out_denom + " = 1;\n";
}
break;
case ngraph::op::SigmoidMultiply::FunctionType::NumTypes:
default:
throw ngraph_error(
"generate_sigmoid_mul_func input function type not supported");
......
......@@ -1290,6 +1290,18 @@ static void dump_one_kernel_with_type(runtime::cpu::CPU_DebugTracer& debug_trace
t_attrs.m_t_shape,
in_out);
break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::boolean:
case element::Type_t::bf16:
case element::Type_t::f16:
case element::Type_t::f64:
case element::Type_t::i16:
case element::Type_t::i64:
case element::Type_t::u16:
case element::Type_t::u32:
case element::Type_t::u64:
default: break;
}
}
......@@ -1613,8 +1625,9 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE");
case TensorRole::CONSTANT: return string("TensorRole::CONSTANT");
case TensorRole::OUTPUT: return string("TensorRole::OUTPUT");
case TensorRole::UNKNOWN:
default: throw runtime_error("unhandled CPU tensor role");
}
throw runtime_error("unhandled CPU tensor role");
};
//dump the tensor roles to debug manifest
......
......@@ -578,8 +578,9 @@ void runtime::cpu::pass::CPUMemoryAssignment::liveness_analysis(
case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE");
case TensorRole::CONSTANT: return string("TensorRole::CONSTANT");
case TensorRole::OUTPUT: return string("TensorRole::OUTPUT");
case TensorRole::UNKNOWN:
default: throw runtime_error("unhandled CPU tensor role");
}
throw runtime_error("unhandled CPU tensor role");
};
//liveness analysis
......
......@@ -58,6 +58,10 @@ namespace ngraph
case CUDNN_DATA_INT32:
r = m_host_parameters->cache(static_cast<int32_t>(value));
break;
case CUDNN_DATA_HALF:
case CUDNN_DATA_INT8x4:
case CUDNN_DATA_UINT8:
case CUDNN_DATA_UINT8x4:
default:
throw std::runtime_error(
"Encountered unhandled cudnnDataType_t during compilation.");
......
......@@ -122,6 +122,10 @@ static void random_init(shared_ptr<runtime::Tensor> tv)
case element::Type_t::u16: init_int_tv<uint16_t>(tv, 0, 1); break;
case element::Type_t::u32: init_int_tv<uint32_t>(tv, 0, 1); break;
case element::Type_t::u64: init_int_tv<uint64_t>(tv, 0, 1); break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::bf16:
case element::Type_t::f16:
default: throw runtime_error("unsupported type");
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment