Unverified Commit 21525b8e authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge branch 'master' into master

parents e165a460 5713b34d
......@@ -186,7 +186,9 @@ namespace ngraph
auto& arg0_tensor = external_function->get_tensor_data(args[0].get_name());
auto& arg1_tensor = external_function->get_tensor_data(args[1].get_name());
auto& arg2_tensor = external_function->get_tensor_data(args[2].get_name());
auto& arg3_tensor = external_function->get_tensor_data(args[3].get_name());
auto& out_tensor = external_function->get_tensor_data(out[0].get_name());
size_t arg3_size = args[3].get_size();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
......@@ -196,8 +198,14 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
auto functor = [&, conv_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
auto functor = [&, conv_index, arg3_size](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
if (out_tensor != arg3_tensor)
{
memcpy(static_cast<char*>(out_tensor),
static_cast<char*>(arg3_tensor),
arg3_size);
}
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[0], arg0_tensor);
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[1], arg1_tensor);
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[2], arg2_tensor);
......@@ -219,7 +227,9 @@ namespace ngraph
auto& arg0_tensor = external_function->get_tensor_data(args[0].get_name());
auto& arg1_tensor = external_function->get_tensor_data(args[1].get_name());
auto& arg2_tensor = external_function->get_tensor_data(args[2].get_name());
auto& out_tensor = external_function->get_tensor_data(out[0].get_name());
size_t arg2_size = args[2].get_size();
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
......@@ -228,8 +238,14 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
auto functor = [&, conv_index](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
auto functor = [&, conv_index, arg2_size](CPURuntimeContext* ctx,
CPUExecutionContext* ectx) {
if (out_tensor != arg2_tensor)
{
memcpy(static_cast<char*>(out_tensor),
static_cast<char*>(arg2_tensor),
arg2_size);
}
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[0], arg0_tensor);
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[1], arg1_tensor);
cpu::mkldnn_utils::set_memory_ptr(ctx, deps[2], out_tensor);
......
......@@ -2410,6 +2410,11 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(qconv_index);
writer << "if (" << out[0].get_name() << " != " << args[3].get_name() << ")\n";
writer.block_begin();
writer << "memcpy(" << out[0].get_name() << ", " << args[3].get_name() << ", "
<< args[3].get_size() * args[3].get_element_type().size() << ");\n";
writer.block_end();
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << args[0].get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
......@@ -2441,6 +2446,11 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(qconv_index);
writer << "if (" << out[0].get_name() << " != " << args[3].get_name() << ")\n";
writer.block_begin();
writer << "memcpy(" << out[0].get_name() << ", " << args[3].get_name() << ", "
<< args[3].get_size() * args[3].get_element_type().size() << ");\n";
writer.block_end();
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << args[0].get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
......@@ -2500,6 +2510,11 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
writer << "if (" << out[0].get_name() << " != " << args[3].get_name() << ")\n";
writer.block_begin();
writer << "memcpy(" << out[0].get_name() << ", " << args[3].get_name() << ", "
<< args[3].get_size() * args[3].get_element_type().size() << ");\n";
writer.block_end();
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << args[0].get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
......@@ -2527,6 +2542,11 @@ namespace ngraph
node, args, out);
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
writer << "if (" << out[0].get_name() << " != " << args[2].get_name() << ")\n";
writer.block_begin();
writer << "memcpy(" << out[0].get_name() << ", " << args[2].get_name() << ", "
<< args[2].get_size() * args[2].get_element_type().size() << ");\n";
writer.block_end();
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << args[0].get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
......
......@@ -772,10 +772,18 @@ size_t MKLDNNEmitter::build_reorder(const mkldnn::memory::desc& input_desc,
size_t input_index = build_memory_primitive(input_desc);
size_t result_index = build_memory_primitive(result_desc);
size_t primitive_index = insert_primitive(
new mkldnn::reorder(*m_mkldnn_primitives[input_index], *m_mkldnn_primitives[result_index]));
size_t primitive_index = 0;
try
{
primitive_index = insert_primitive(new mkldnn::reorder(*m_mkldnn_primitives[input_index],
*m_mkldnn_primitives[result_index]));
m_primitive_deps[primitive_index] = {input_index, result_index};
}
catch (const mkldnn::error& e)
{
throw ngraph_error("Could not create mkldnn primitive " + e.message);
}
m_primitive_deps[primitive_index] = {input_index, result_index};
return primitive_index;
}
......
......@@ -679,6 +679,68 @@ TEST(builder, scaled_QC_with_bias_signed_add_and_relu)
read_vector<uint8_t>(result));
}
TEST(builder, scaled_QC_with_bias_signed_add_and_relu_nhwc)
{
Shape shape_a{1, 3, 4, 1}; // input shape
Shape shape_b{1, 3, 3, 1}; // filter shape
Shape shape_r{1, 1, 3, 4}; // output shape
vector<uint8_t> a_data = {1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4};
vector<int8_t> b_data = {1, 2, 3, 4, 5, 0, 0, 1, 2};
vector<int32_t> c_data = {5};
vector<int8_t> conv_2_data = {-1, -2, -3, -4, -5, -6, -10, 0, 1, 2, 3, 4};
auto A = make_shared<op::Parameter>(element::u8, shape_a);
auto A_reshape = make_shared<op::Reshape>(A, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 4});
auto B = make_shared<op::Parameter>(element::i8, shape_b);
auto B_reshape = make_shared<op::Reshape>(B, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 3});
auto Add = make_shared<op::Parameter>(element::i8, shape_a);
auto Add_reshape = make_shared<op::Reshape>(Add, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 4});
auto Bias = make_shared<op::Parameter>(element::i32, Shape{1});
auto C = op::Constant::create(element::f32, Shape{}, {0.0f});
auto D = op::Constant::create(element::f32, Shape{}, {255.0f});
auto E = op::Constant::create(element::f32, Shape{}, {-127.0f});
auto F = op::Constant::create(element::f32, Shape{}, {127.0f});
auto G = op::Constant::create(element::f32, Shape{}, {22.0f});
auto H = op::Constant::create(element::f32, Shape{}, {90.0f});
auto I = op::Constant::create(element::f32, Shape{}, {22.0f});
auto J = op::Constant::create(element::f32, Shape{}, {90.0f});
auto CV =
ngraph::builder::ScaledQuantizedConvolutionBiasSignedAdd(A_reshape,
B_reshape,
Bias,
Add_reshape,
Strides{1, 1}, // move_strides
Strides{1, 1}, // filter_dilation
CoordinateDiff{1, 1}, // below_pads
CoordinateDiff{1, 1}, // above_pads
Strides{1, 1}, // data_dilation
C,
D,
E,
F,
G,
H,
I,
J,
true);
auto f = make_shared<Function>(NodeVector{CV}, ParameterVector{A, B, Bias, Add});
constant_fold(f);
auto backend = runtime::Backend::create("CPU");
// Create some tensors for input/output
auto a = backend->create_tensor(element::u8, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::i8, shape_b);
copy_data(b, b_data);
auto c = backend->create_tensor(element::i32, Shape{1});
copy_data(c, c_data);
auto d = backend->create_tensor(element::i8, shape_a);
copy_data(d, conv_2_data);
auto result = backend->create_tensor(element::u8, shape_r);
auto handle = backend->compile(f);
backend->call_with_validate(handle, {result}, {a, b, c, d});
EXPECT_EQ((vector<uint8_t>{74, 106, 93, 97, 112, 127, 127, 127, 110, 127, 127, 127}),
read_vector<uint8_t>(result));
}
TEST(builder, dynamic_scaled_QC_with_bias_signed_add_and_relu)
{
Shape shape_a{1, 1, 3, 4}; // input shape
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment