Unverified Commit 8855c723 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

fix compiler warnings (#1891)

parent b3b5b9fd
......@@ -43,3 +43,6 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-padded")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-conversion")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-double-promotion")
......@@ -72,7 +72,6 @@ void ngraph::op::BatchNormInference::validate_and_infer_types()
auto bn_input_shape = get_input_shape(INPUT);
BatchNormBase::validate_and_infer_types();
auto in_size = get_input_size();
auto& et = get_input_element_type(INPUT);
set_output_size(1);
set_output_type(0, et, bn_input_shape);
......@@ -87,7 +86,6 @@ void ngraph::op::BatchNormTraining::validate_and_infer_types()
auto bn_input_shape = get_input_shape(INPUT);
BatchNormBase::validate_and_infer_types();
auto in_size = get_input_size();
auto& et = get_input_element_type(INPUT);
Shape channel_shape{bn_input_shape[1]};
set_output_size(3);
......
......@@ -182,17 +182,17 @@ runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction(
const shared_ptr<ngraph::Function>& function, bool release_function)
: m_function(function)
, m_release_function(release_function)
, m_use_tbb(std::getenv("NGRAPH_CPU_USE_TBB") != nullptr)
, m_compiled_function(nullptr)
, m_emit_timing(false)
, m_function_name(function->get_name())
, m_is_built(false)
, m_use_tbb(std::getenv("NGRAPH_CPU_USE_TBB") != nullptr)
#if !defined(NGRAPH_DEX_ONLY)
, m_is_compiled(false)
, m_direct_execution(!std::getenv("NGRAPH_CODEGEN"))
#else
, m_direct_execution(true)
#endif
, m_compiled_function(nullptr)
, m_function_name(function->get_name())
, m_is_built(false)
{
}
......
......@@ -212,7 +212,6 @@ namespace ngraph
std::string emit_op_as_function(const Node&, const std::string& function_name);
std::string strip_comments(const std::string&);
bool m_is_compiled;
std::unique_ptr<codegen::Compiler> m_compiler;
std::unique_ptr<codegen::ExecutionEngine> m_execution_engine;
......@@ -229,7 +228,10 @@ namespace ngraph
bool m_emit_timing;
bool m_use_tbb;
#if !defined(NGRAPH_DEX_ONLY)
bool m_is_compiled;
bool m_direct_execution;
#endif
EntryPoint m_compiled_function;
std::unordered_map<std::string, std::string> m_variable_name_map;
......@@ -260,7 +262,6 @@ namespace ngraph
std::list<std::pair<std::reference_wrapper<void*>, size_t>> function_output_index;
std::unordered_map<std::string, std::shared_ptr<CPU_ExternalFunction>> callees;
bool m_is_built;
bool m_direct_execution;
std::vector<runtime::PerformanceCounter> m_perf_counters;
#if defined(NGRAPH_HALIDE)
......
......@@ -21,7 +21,7 @@ using namespace ngraph;
shared_ptr<Node> runtime::cpu::op::HalideOp::copy_with_new_args(const NodeVector& new_args) const
{
return make_shared<HalideOp>(new_args, ops, output_type, output_shape);
return make_shared<HalideOp>(new_args, m_ops, m_output_type, m_output_shape);
}
runtime::cpu::op::HalideOp::HalideOp(const NodeVector& args,
......@@ -29,14 +29,14 @@ runtime::cpu::op::HalideOp::HalideOp(const NodeVector& args,
const element::Type& out_type,
const Shape& out_shape)
: Op("HalideOp", check_single_output_args(args))
, ops(ops)
, output_type(out_type)
, output_shape(out_shape)
, m_ops(ops)
, m_output_type(out_type)
, m_output_shape(out_shape)
{
constructor_validate_and_infer_types();
}
void runtime::cpu::op::HalideOp::validate_and_infer_types()
{
set_output_type(0, output_type, output_shape);
set_output_type(0, m_output_type, m_output_shape);
}
......@@ -42,11 +42,11 @@ namespace ngraph
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const std::list<std::shared_ptr<Node>>& get_ops() const { return ops; }
const std::list<std::shared_ptr<Node>>& get_ops() const { return m_ops; }
private:
std::list<std::shared_ptr<Node>> ops;
element::Type output_type;
Shape output_shape;
std::list<std::shared_ptr<Node>> m_ops;
element::Type m_output_type;
Shape m_output_shape;
};
}
}
......
......@@ -102,7 +102,10 @@ size_t bfloat16::size() const
bool bfloat16::operator==(const bfloat16& other) const
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
return (static_cast<float>(*this) == static_cast<float>(other));
#pragma clang diagnostic pop
}
bool bfloat16::operator<(const bfloat16& other) const
......@@ -127,12 +130,17 @@ bool bfloat16::operator>=(const bfloat16& other) const
bfloat16::operator float() const
{
float result = 0;
uint16_t* u16_ptr = reinterpret_cast<uint16_t*>(&result);
// float result = 0;
// uint16_t* u16_ptr = reinterpret_cast<uint16_t*>(&result);
// Treat the system as little endian (Intel x86 family)
u16_ptr[1] = m_value;
return result;
// // Treat the system as little endian (Intel x86 family)
// u16_ptr[1] = m_value;
return static_cast<float>(static_cast<uint32_t>(m_value) << 16);
}
bfloat16::operator double() const
{
return static_cast<float>(m_value);
}
std::ostream& operator<<(std::ostream& out, const bfloat16& obj)
......
......@@ -32,6 +32,7 @@ namespace ngraph
public:
bfloat16() {}
bfloat16(float value, bool rounding = false);
bfloat16(const bfloat16&) = default;
bfloat16& operator=(const bfloat16&) = default;
virtual ~bfloat16() {}
std::string to_string() const;
......@@ -43,6 +44,7 @@ namespace ngraph
bool operator>(const bfloat16& other) const;
bool operator>=(const bfloat16& other) const;
operator float() const;
operator double() const;
static std::vector<float> to_float_vector(const std::vector<bfloat16>&);
static std::vector<bfloat16> from_float_vector(const std::vector<float>&);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment