Unverified Commit bbb00715 authored by aslepko's avatar aslepko Committed by GitHub

Merge branch 'master' into aslepko/ci

parents b9429dee f490903b
...@@ -28,8 +28,6 @@ add_compile_options(-Wno-global-constructors) ...@@ -28,8 +28,6 @@ add_compile_options(-Wno-global-constructors)
add_compile_options(-Wno-exit-time-destructors) add_compile_options(-Wno-exit-time-destructors)
add_compile_options(-Wno-missing-prototypes) add_compile_options(-Wno-missing-prototypes)
add_compile_options(-Wno-missing-noreturn) add_compile_options(-Wno-missing-noreturn)
add_compile_options(-Wno-switch)
add_compile_options(-Wno-switch-enum)
add_compile_options(-Wno-covered-switch-default) add_compile_options(-Wno-covered-switch-default)
add_compile_options(-Wno-undef) add_compile_options(-Wno-undef)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
......
...@@ -43,14 +43,26 @@ namespace ngraph ...@@ -43,14 +43,26 @@ namespace ngraph
ASSERT_VALID_ARGUMENT(node, !(lambd < 0.0f)) ASSERT_VALID_ARGUMENT(node, !(lambd < 0.0f))
<< " The provided 'lambd' value:" << lambd << " must not be negative."; << " The provided 'lambd' value:" << lambd << " must not be negative.";
const auto negative_lambd = ngraph::op::Constant::create( std::shared_ptr<ngraph::op::Constant> negative_lambd;
input->get_element_type(), input->get_shape(), {-lambd}); const auto input_element_type = input->get_element_type();
if (input_element_type.is_signed())
{
negative_lambd = ngraph::op::Constant::create(
input_element_type, input->get_shape(), {-lambd});
}
else
{
// Passing -lambd to unsigned type constant will cause an overflow.
// For unsigned types the lowest possible value is 0.
negative_lambd = ngraph::op::Constant::create(
input_element_type, input->get_shape(), {0});
}
const auto positive_lambd = ngraph::op::Constant::create( const auto positive_lambd = ngraph::op::Constant::create(
input->get_element_type(), input->get_shape(), {lambd}); input_element_type, input->get_shape(), {lambd});
const auto bias_tensor = ngraph::op::Constant::create( const auto bias_tensor = ngraph::op::Constant::create(
input->get_element_type(), input->get_shape(), {bias}); input_element_type, input->get_shape(), {bias});
// Create a mask indicating locations of values that need to be adjusted // Create a mask indicating locations of values that need to be adjusted
// by adding and subtracting bias // by adding and subtracting bias
...@@ -63,9 +75,9 @@ namespace ngraph ...@@ -63,9 +75,9 @@ namespace ngraph
// Convert from bool to the input type to be able to multiply adjusted inputs // Convert from bool to the input type to be able to multiply adjusted inputs
// by the created masks // by the created masks
values_below_neg_lambd = std::make_shared<ngraph::op::Convert>( values_below_neg_lambd = std::make_shared<ngraph::op::Convert>(
values_below_neg_lambd, input->get_element_type()); values_below_neg_lambd, input_element_type);
values_above_pos_lambd = std::make_shared<ngraph::op::Convert>( values_above_pos_lambd = std::make_shared<ngraph::op::Convert>(
values_above_pos_lambd, input->get_element_type()); values_above_pos_lambd, input_element_type);
std::shared_ptr<ngraph::Node> input_minus_bias = input - bias_tensor; std::shared_ptr<ngraph::Node> input_minus_bias = input - bias_tensor;
std::shared_ptr<ngraph::Node> input_plus_bias = input + bias_tensor; std::shared_ptr<ngraph::Node> input_plus_bias = input + bias_tensor;
......
...@@ -102,28 +102,28 @@ void ngraph::traverse_nodes(const NodeVector& subgraph_results, ...@@ -102,28 +102,28 @@ void ngraph::traverse_nodes(const NodeVector& subgraph_results,
} }
} }
NodeVector ngraph::find_common_args(std::shared_ptr<Node> target, std::shared_ptr<Node> replacement) NodeVector ngraph::find_common_args(std::shared_ptr<Node> node1, std::shared_ptr<Node> node2)
{ {
std::unordered_set<std::shared_ptr<Node>> target_args; std::unordered_set<std::shared_ptr<Node>> node1_args;
auto compute_target_args = [&target_args](const std::shared_ptr<Node> node) { auto compute_node1_args = [&node1_args](const std::shared_ptr<Node> node) {
target_args.insert(node); node1_args.insert(node);
}; };
traverse_nodes({target}, compute_target_args, false, NodeVector{}); traverse_nodes({node1}, compute_node1_args, false, NodeVector{});
std::unordered_set<std::shared_ptr<Node>> replacement_args; std::unordered_set<std::shared_ptr<Node>> node2_args;
auto compute_replacement_args = [&replacement_args](const std::shared_ptr<Node> node) { auto compute_node2_args = [&node2_args](const std::shared_ptr<Node> node) {
replacement_args.insert(node); node2_args.insert(node);
}; };
traverse_nodes({replacement}, compute_replacement_args, false, NodeVector{}); traverse_nodes({node2}, compute_node2_args, false, NodeVector{});
NodeVector common_args; NodeVector common_args;
for (auto e : target_args) for (auto e : node1_args)
{ {
if (replacement_args.count(e) > 0) if (node2_args.count(e) > 0)
{ {
common_args.push_back(e); common_args.push_back(e);
} }
...@@ -149,12 +149,19 @@ void ngraph::replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> re ...@@ -149,12 +149,19 @@ void ngraph::replace_node(std::shared_ptr<Node> target, std::shared_ptr<Node> re
if (ngraph::get_provenance_enabled()) if (ngraph::get_provenance_enabled())
{ {
auto common_args = ngraph::find_common_args(target, replacement);
auto set_replacement_prov = [replacement](std::shared_ptr<Node> node) { auto set_replacement_prov = [replacement](std::shared_ptr<Node> node) {
replacement->merge_provenance_tags_from(node); replacement->merge_provenance_tags_from(node);
}; };
traverse_nodes( traverse_nodes({target}, set_replacement_prov, false, common_args);
{target}, set_replacement_prov, false, ngraph::find_common_args(target, replacement));
auto set_prov_new_nodes = [replacement](std::shared_ptr<Node> node) {
node->merge_provenance_tags_from(replacement);
};
traverse_nodes({replacement}, set_prov_new_nodes, false, common_args);
} }
// For each of target's output O with replacement output O_rep: // For each of target's output O with replacement output O_rep:
......
...@@ -51,9 +51,8 @@ shared_ptr<Node> op::Max::get_default_value() const ...@@ -51,9 +51,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case element::Type_t::boolean: case element::Type_t::boolean:
return make_constant_from_string("0", get_element_type(), get_shape()); return make_constant_from_string("0", get_element_type(), get_shape());
case element::Type_t::bf16: case element::Type_t::bf16:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape()); case element::Type_t::f16:
case element::Type_t::f32: case element::Type_t::f32:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape());
case element::Type_t::f64: case element::Type_t::f64:
return make_constant_from_string("-INFINITY", get_element_type(), get_shape()); return make_constant_from_string("-INFINITY", get_element_type(), get_shape());
case element::Type_t::i8: case element::Type_t::i8:
...@@ -80,6 +79,8 @@ shared_ptr<Node> op::Max::get_default_value() const ...@@ -80,6 +79,8 @@ shared_ptr<Node> op::Max::get_default_value() const
case element::Type_t::u64: case element::Type_t::u64:
return make_constant_from_string( return make_constant_from_string(
to_string(numeric_limits<uint64_t>::min()), get_element_type(), get_shape()); to_string(numeric_limits<uint64_t>::min()), get_element_type(), get_shape());
case element::Type_t::undefined:
case element::Type_t::dynamic:
default: throw runtime_error("Max default value not defined for type"); default: throw runtime_error("Max default value not defined for type");
} }
} }
...@@ -51,9 +51,8 @@ shared_ptr<Node> op::Min::get_default_value() const ...@@ -51,9 +51,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case element::Type_t::boolean: case element::Type_t::boolean:
return make_constant_from_string("1", get_element_type(), get_shape()); return make_constant_from_string("1", get_element_type(), get_shape());
case element::Type_t::bf16: case element::Type_t::bf16:
return make_constant_from_string("INFINITY", get_element_type(), get_shape()); case element::Type_t::f16:
case element::Type_t::f32: case element::Type_t::f32:
return make_constant_from_string("INFINITY", get_element_type(), get_shape());
case element::Type_t::f64: case element::Type_t::f64:
return make_constant_from_string("INFINITY", get_element_type(), get_shape()); return make_constant_from_string("INFINITY", get_element_type(), get_shape());
case element::Type_t::i8: case element::Type_t::i8:
...@@ -80,6 +79,8 @@ shared_ptr<Node> op::Min::get_default_value() const ...@@ -80,6 +79,8 @@ shared_ptr<Node> op::Min::get_default_value() const
case element::Type_t::u64: case element::Type_t::u64:
return make_constant_from_string( return make_constant_from_string(
to_string(numeric_limits<uint64_t>::max()), get_element_type(), get_shape()); to_string(numeric_limits<uint64_t>::max()), get_element_type(), get_shape());
case element::Type_t::undefined:
case element::Type_t::dynamic:
default: throw runtime_error("Min default value not defined for type"); default: throw runtime_error("Min default value not defined for type");
} }
} }
...@@ -3027,6 +3027,7 @@ namespace ngraph ...@@ -3027,6 +3027,7 @@ namespace ngraph
case ngraph::op::PadMode::REFLECT: case ngraph::op::PadMode::REFLECT:
pad_mode_string = "ngraph::op::PadMode::REFLECT"; pad_mode_string = "ngraph::op::PadMode::REFLECT";
break; break;
case ngraph::op::PadMode::SYMMETRIC: throw ngraph_error("Unsupported PadMode");
} }
writer << "reference::pad<" << out[0].get_type() << ">(" << args[0].get_name() writer << "reference::pad<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n"; << ",\n";
...@@ -3470,6 +3471,7 @@ namespace ngraph ...@@ -3470,6 +3471,7 @@ namespace ngraph
func_block += "d_" + out_denom + " = 1;\n"; func_block += "d_" + out_denom + " = 1;\n";
} }
break; break;
case ngraph::op::SigmoidMultiply::FunctionType::NumTypes:
default: default:
throw ngraph_error( throw ngraph_error(
"generate_sigmoid_mul_func input function type not supported"); "generate_sigmoid_mul_func input function type not supported");
......
...@@ -1290,6 +1290,18 @@ static void dump_one_kernel_with_type(runtime::cpu::CPU_DebugTracer& debug_trace ...@@ -1290,6 +1290,18 @@ static void dump_one_kernel_with_type(runtime::cpu::CPU_DebugTracer& debug_trace
t_attrs.m_t_shape, t_attrs.m_t_shape,
in_out); in_out);
break; break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::boolean:
case element::Type_t::bf16:
case element::Type_t::f16:
case element::Type_t::f64:
case element::Type_t::i16:
case element::Type_t::i64:
case element::Type_t::u16:
case element::Type_t::u32:
case element::Type_t::u64:
default: break;
} }
} }
...@@ -1613,8 +1625,9 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co ...@@ -1613,8 +1625,9 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE"); case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE");
case TensorRole::CONSTANT: return string("TensorRole::CONSTANT"); case TensorRole::CONSTANT: return string("TensorRole::CONSTANT");
case TensorRole::OUTPUT: return string("TensorRole::OUTPUT"); case TensorRole::OUTPUT: return string("TensorRole::OUTPUT");
case TensorRole::UNKNOWN:
default: throw runtime_error("unhandled CPU tensor role");
} }
throw runtime_error("unhandled CPU tensor role");
}; };
//dump the tensor roles to debug manifest //dump the tensor roles to debug manifest
......
...@@ -578,8 +578,9 @@ void runtime::cpu::pass::CPUMemoryAssignment::liveness_analysis( ...@@ -578,8 +578,9 @@ void runtime::cpu::pass::CPUMemoryAssignment::liveness_analysis(
case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE"); case TensorRole::INTERMEDIATE: return string("TensorRole::INTERMEDIATE");
case TensorRole::CONSTANT: return string("TensorRole::CONSTANT"); case TensorRole::CONSTANT: return string("TensorRole::CONSTANT");
case TensorRole::OUTPUT: return string("TensorRole::OUTPUT"); case TensorRole::OUTPUT: return string("TensorRole::OUTPUT");
case TensorRole::UNKNOWN:
default: throw runtime_error("unhandled CPU tensor role");
} }
throw runtime_error("unhandled CPU tensor role");
}; };
//liveness analysis //liveness analysis
......
...@@ -31,29 +31,29 @@ struct CPURuntimeContextCG ...@@ -31,29 +31,29 @@ struct CPURuntimeContextCG
std::vector<mkldnn::primitive*> mkldnn_primitives; std::vector<mkldnn::primitive*> mkldnn_primitives;
std::vector<char*> mkldnn_workspaces; std::vector<char*> mkldnn_workspaces;
std::vector<mkldnn::memory::desc*> mkldnn_descriptors; std::vector<mkldnn::memory::desc*> mkldnn_descriptors;
mkldnn::engine global_cpu_engine = mkldnn::engine(mkldnn::engine::cpu, 0); mkldnn::engine global_cpu_engine = mkldnn::engine(mkldnn::engine::cpu, 0);
void set_memory_ptr(size_t primitive_index, void set_memory_ptr(size_t primitive_index,
void* ptr) void* ptr)
{ {
auto primitive = static_cast<mkldnn::memory*>(mkldnn_primitives[primitive_index]); auto primitive = static_cast<mkldnn::memory*>(mkldnn_primitives[primitive_index]);
primitive->set_data_handle(ptr); primitive->set_data_handle(ptr);
} }
void mkldnn_invoke_primitive(size_t primitive_index) void mkldnn_invoke_primitive(size_t primitive_index)
{ {
mkldnn::stream s(mkldnn::stream::kind::eager); mkldnn::stream s(mkldnn::stream::kind::eager);
try try
{ {
s.submit({*mkldnn_primitives[primitive_index]}).wait(); s.submit({*mkldnn_primitives[primitive_index]}).wait();
} }
catch (const mkldnn::error& e) catch (const mkldnn::error& e)
{ {
throw std::runtime_error("Could not run mkldnn primitive " + e.message); throw std::runtime_error("Could not run mkldnn primitive " + e.message);
} }
} }
private: private:
...@@ -89,32 +89,32 @@ private: ...@@ -89,32 +89,32 @@ private:
void init_mkldnn_primitives(); void init_mkldnn_primitives();
inline void cleanup_mkldnn_primitives() inline void cleanup_mkldnn_primitives()
{ {
for (auto p : mkldnn_primitives) for (auto p : mkldnn_primitives)
{ {
delete p; delete p;
} }
#ifndef _WIN32 #ifndef _WIN32
//To avoid memory leak in mkldnn, release any buffers that are not free'd yet. //To avoid memory leak in mkldnn, release any buffers that are not free'd yet.
//https://software.intel.com/en-us/mkl-linux-developer-guide-avoiding-memory-leaks-in-intel-mkl //https://software.intel.com/en-us/mkl-linux-developer-guide-avoiding-memory-leaks-in-intel-mkl
//mkl_free_buffers() is not exposed at this point, hence using mkl_serv_free_buffers() //mkl_free_buffers() is not exposed at this point, hence using mkl_serv_free_buffers()
ngraph::runtime::cpu::mkldnn_utils::mkl_serv_free_buffers(); ngraph::runtime::cpu::mkldnn_utils::mkl_serv_free_buffers();
#endif #endif
for (auto w : mkldnn_workspaces) for (auto w : mkldnn_workspaces)
{ {
free(w); free(w);
} }
} }
inline void cleanup_mkldnn_descriptors() inline void cleanup_mkldnn_descriptors()
{ {
for (auto d : mkldnn_descriptors) for (auto d : mkldnn_descriptors)
{ {
free(d); free(d);
} }
} }
}; };
extern "C" CPURuntimeContextCG* init_cg_ctx() extern "C" CPURuntimeContextCG* init_cg_ctx()
...@@ -128,23 +128,23 @@ extern "C" void destroy_cg_ctx(CPURuntimeContextCG* cg_ctx) ...@@ -128,23 +128,23 @@ extern "C" void destroy_cg_ctx(CPURuntimeContextCG* cg_ctx)
} }
static void static void
deserialize_memory_descs_and_build_memory_primitives(std::ifstream& desc_file, deserialize_memory_descs_and_build_memory_primitives(std::ifstream& desc_file,
CPURuntimeContextCG* cg_ctx, CPURuntimeContextCG* cg_ctx,
size_t descs_count) size_t descs_count)
{ {
cg_ctx->mkldnn_descriptors = std::vector<mkldnn::memory::desc*>(descs_count); cg_ctx->mkldnn_descriptors = std::vector<mkldnn::memory::desc*>(descs_count);
for (auto i = 0; i < descs_count; i++) for (auto i = 0; i < descs_count; i++)
{ {
size_t primitive_index; size_t primitive_index;
desc_file >> primitive_index; desc_file >> primitive_index;
auto desc = (mkldnn::memory::desc*)malloc(sizeof(mkldnn::memory::desc)); auto desc = (mkldnn::memory::desc*)malloc(sizeof(mkldnn::memory::desc));
if (!desc) if (!desc)
{ {
throw std::bad_alloc(); throw std::bad_alloc();
} }
desc_file.read(reinterpret_cast<char*>(desc), sizeof(mkldnn::memory::desc)); desc_file.read(reinterpret_cast<char*>(desc), sizeof(mkldnn::memory::desc));
cg_ctx->mkldnn_descriptors[i] = desc; cg_ctx->mkldnn_descriptors[i] = desc;
cg_ctx->mkldnn_primitives[primitive_index] = new mkldnn::memory({*cg_ctx->mkldnn_descriptors[i], cg_ctx->global_cpu_engine}, nullptr); cg_ctx->mkldnn_primitives[primitive_index] = new mkldnn::memory({*cg_ctx->mkldnn_descriptors[i], cg_ctx->global_cpu_engine}, nullptr);
} }
}; };
)" )"
...@@ -58,6 +58,10 @@ namespace ngraph ...@@ -58,6 +58,10 @@ namespace ngraph
case CUDNN_DATA_INT32: case CUDNN_DATA_INT32:
r = m_host_parameters->cache(static_cast<int32_t>(value)); r = m_host_parameters->cache(static_cast<int32_t>(value));
break; break;
case CUDNN_DATA_HALF:
case CUDNN_DATA_INT8x4:
case CUDNN_DATA_UINT8:
case CUDNN_DATA_UINT8x4:
default: default:
throw std::runtime_error( throw std::runtime_error(
"Encountered unhandled cudnnDataType_t during compilation."); "Encountered unhandled cudnnDataType_t during compilation.");
......
...@@ -122,6 +122,10 @@ static void random_init(shared_ptr<runtime::Tensor> tv) ...@@ -122,6 +122,10 @@ static void random_init(shared_ptr<runtime::Tensor> tv)
case element::Type_t::u16: init_int_tv<uint16_t>(tv, 0, 1); break; case element::Type_t::u16: init_int_tv<uint16_t>(tv, 0, 1); break;
case element::Type_t::u32: init_int_tv<uint32_t>(tv, 0, 1); break; case element::Type_t::u32: init_int_tv<uint32_t>(tv, 0, 1); break;
case element::Type_t::u64: init_int_tv<uint64_t>(tv, 0, 1); break; case element::Type_t::u64: init_int_tv<uint64_t>(tv, 0, 1); break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
case element::Type_t::bf16:
case element::Type_t::f16:
default: throw runtime_error("unsupported type"); default: throw runtime_error("unsupported type");
} }
} }
......
...@@ -221,19 +221,70 @@ TEST(provenance, provenance) ...@@ -221,19 +221,70 @@ TEST(provenance, provenance)
// Replacement: // Replacement:
// //
// A{tag_a} B{tag_b} // A{tag_a} B{tag_b}
// | | // | |
// E{tag_e} | // E{} |
// | | // | |
// C -> D{tag_d} // C -> D{tag_d}
// //
// //
// After: // After:
// //
// A{tag_a} B{tag_b}
// | |
// E{tag_c, tag_d} |
// | |
// D{tag_c, tag_d}
//
// Comment:
// * D is the replacement root replacing C and creating a new argument node E
//
{
auto x = make_shared<op::Parameter>(element::i32, PartialShape{2, 3, 4});
auto y = make_shared<op::Parameter>(element::i32, PartialShape{2, 3, 4});
auto a = make_shared<op::Add>(x, y);
a->add_provenance_tag("tag_a");
auto b = make_shared<op::Multiply>(y, x);
b->add_provenance_tag("tag_b");
auto c = make_shared<op::Subtract>(a, b);
c->add_provenance_tag("tag_c");
auto f = make_shared<Function>(c, ParameterVector{x, y});
auto e = make_shared<op::Subtract>(a, x);
auto d = make_shared<op::Subtract>(e, b);
d->add_provenance_tag("tag_d");
replace_node(c, d);
EXPECT_EQ(d->get_provenance_tags(), (ProvSet{"tag_c", "tag_d"}));
EXPECT_EQ(e->get_provenance_tags(), (ProvSet{"tag_c", "tag_d"}));
}
//
// Before:
//
// A{tag_a} B{tag_b}
// | |
// C{tag_c}
//
//
// Replacement:
//
// A{tag_a} B{tag_b} // A{tag_a} B{tag_b}
// | | // | |
// E{tag_e} | // E{tag_e} |
// | | // | |
// D{tag_c, tag_d} // C -> D{tag_d}
//
//
// After:
//
// A{tag_a} B{tag_b}
// \ /
// E{tag_c, tag_d, tag_e} /
// \ /
// D{tag_c, tag_d}
// //
// Comment: // Comment:
// * D is the replacement root replacing C and creating a new argument node E // * D is the replacement root replacing C and creating a new argument node E
...@@ -252,11 +303,13 @@ TEST(provenance, provenance) ...@@ -252,11 +303,13 @@ TEST(provenance, provenance)
auto f = make_shared<Function>(c, ParameterVector{x, y}); auto f = make_shared<Function>(c, ParameterVector{x, y});
auto e = make_shared<op::Subtract>(a, x); auto e = make_shared<op::Subtract>(a, x);
e->add_provenance_tag("tag_e");
auto d = make_shared<op::Subtract>(e, b); auto d = make_shared<op::Subtract>(e, b);
d->add_provenance_tag("tag_d"); d->add_provenance_tag("tag_d");
replace_node(c, d); replace_node(c, d);
EXPECT_EQ(d->get_provenance_tags(), (ProvSet{"tag_c", "tag_d"})); EXPECT_EQ(d->get_provenance_tags(), (ProvSet{"tag_c", "tag_d"}));
EXPECT_EQ(e->get_provenance_tags(), (ProvSet{"tag_c", "tag_d", "tag_e"}));
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment