Unverified Commit 7ab47c2e authored by Louis Feng's avatar Louis Feng Committed by GitHub

Merge pull request #540 from NervanaSystems/louisfeng/NGMX-296-conv_bias

NGMX-296 Convolution + Bias with MKLDNN
parents 0acaea58 a5476da6
...@@ -186,6 +186,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND ...@@ -186,6 +186,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
runtime/cpu/mkldnn_emitter.cpp runtime/cpu/mkldnn_emitter.cpp
runtime/cpu/mkldnn_invoke.cpp runtime/cpu/mkldnn_invoke.cpp
runtime/cpu/mkldnn_utils.cpp runtime/cpu/mkldnn_utils.cpp
runtime/cpu/ops/conv_bias.cpp
runtime/cpu/ops/convert_layout.cpp runtime/cpu/ops/convert_layout.cpp
runtime/cpu/ops/sigmoid.cpp runtime/cpu/ops/sigmoid.cpp
runtime/cpu/ops/matmul_bias.cpp runtime/cpu/ops/matmul_bias.cpp
......
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#include "ngraph/runtime/cpu/cpu_kernel_emitters.hpp" #include "ngraph/runtime/cpu/cpu_kernel_emitters.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/runtime/cpu/ops/convert_layout.hpp" #include "ngraph/runtime/cpu/ops/convert_layout.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp" #include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/ops/sigmoid.hpp" #include "ngraph/runtime/cpu/ops/sigmoid.hpp"
...@@ -2487,6 +2488,134 @@ namespace ngraph ...@@ -2487,6 +2488,134 @@ namespace ngraph
} }
} }
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::ConvolutionBias)
{
auto convolution = static_cast<const ngraph::op::ConvolutionBias*>(node);
const TensorViewWrapper& data = args[0];
const TensorViewWrapper& weights = args[1];
const TensorViewWrapper& bias = args[2];
const TensorViewWrapper& result = out[0];
using namespace runtime::cpu::mkldnn_utils;
if (mkldnn_utils::use_mkldnn_kernel(node))
{
auto data_format = mkldnn_utils::get_input_mkldnn_format(node, 0);
auto weights_format = mkldnn_utils::get_input_mkldnn_format(node, 1);
auto bias_format = mkldnn_utils::get_input_mkldnn_format(node, 2);
auto result_format = mkldnn_utils::get_output_mkldnn_format(node, 0);
auto& mkldnn_emitter = external_function->get_mkldnn_emitter();
auto data_desc = mkldnn_emitter->build_memory_descriptor(data, data_format);
auto weights_desc =
mkldnn_emitter->build_memory_descriptor(weights, weights_format);
auto bias_desc = mkldnn_emitter->build_memory_descriptor(bias, bias_format);
auto result_desc =
mkldnn_emitter->build_memory_descriptor(result, result_format);
// For dilation, MKLDNN wants to know how many elements to insert between, not how far
// apart to space the elements like nGraph. So we have to subtract 1 from each pos.
Strides window_dilation_strides_adjusted;
for (size_t s : convolution->get_window_dilation_strides())
{
window_dilation_strides_adjusted.push_back(s - 1);
}
size_t conv_index = mkldnn_emitter->build_convolution_forward(
data_desc,
weights_desc,
bias_desc,
result_desc,
convolution->get_window_movement_strides(),
window_dilation_strides_adjusted,
convolution->get_padding_below(),
convolution->get_padding_above());
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << data.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
<< ", " << weights.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[2])
<< ", " << bias.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[3])
<< ", " << result.get_name() << ");\n";
writer << "cpu::mkldnn_utils::mkldnn_invoke_primitive(ctx, "
<< to_string(conv_index) << ");\n";
}
else
{
throw ngraph_error("ConvolutionBias is only supported with MKLDNN kernel.");
}
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::ConvolutionBiasBackpropFiltersBias)
{
auto convolution =
static_cast<const ngraph::op::ConvolutionBiasBackpropFiltersBias*>(node);
const TensorViewWrapper& data = args[0];
const TensorViewWrapper& delta = args[1];
const TensorViewWrapper& weights_delta = out[0];
const TensorViewWrapper& bias_delta = out[1];
using namespace runtime::cpu::mkldnn_utils;
if (mkldnn_utils::use_mkldnn_kernel(node))
{
Strides window_dilation_strides_adjusted;
for (size_t s : convolution->get_window_dilation_strides_forward())
{
window_dilation_strides_adjusted.push_back(s - 1);
}
auto data_format = mkldnn_utils::get_input_mkldnn_format(node, 0);
auto delta_format = mkldnn_utils::get_input_mkldnn_format(node, 1);
auto weights_delta_format = mkldnn_utils::get_output_mkldnn_format(node, 0);
auto bias_delta_format = mkldnn_utils::get_output_mkldnn_format(node, 1);
auto& mkldnn_emitter = external_function->get_mkldnn_emitter();
auto data_desc = mkldnn_emitter->build_memory_descriptor(data, data_format);
auto delta_desc = mkldnn_emitter->build_memory_descriptor(delta, delta_format);
auto weights_delta_desc = mkldnn_emitter->build_memory_descriptor(
weights_delta, weights_delta_format);
auto bias_delta_desc =
mkldnn_emitter->build_memory_descriptor(bias_delta, bias_delta_format);
size_t conv_index = mkldnn_emitter->build_convolution_backward_weights_bias(
data_desc,
delta_desc,
weights_delta_desc,
bias_delta_desc,
convolution->get_window_movement_strides_forward(),
window_dilation_strides_adjusted,
convolution->get_padding_below_forward(),
convolution->get_padding_above_forward());
auto& deps = mkldnn_emitter->get_primitive_deps(conv_index);
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[0])
<< ", " << data.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[1])
<< ", " << delta.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[2])
<< ", " << weights_delta.get_name() << ");\n";
writer << "cpu::mkldnn_utils::set_memory_ptr(ctx, " << to_string(deps[3])
<< ", " << bias_delta.get_name() << ");\n";
writer << "cpu::mkldnn_utils::mkldnn_invoke_primitive(ctx, "
<< to_string(conv_index) << ");\n";
}
else
{
throw ngraph_error(
"ConvolutionBiasBackpropFiltersBias is only supported with MKLDNN kernel.");
}
}
template <> template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Not) void CPU_Emitter::EMITTER_DECL(ngraph::op::Not)
{ {
......
...@@ -109,6 +109,7 @@ ...@@ -109,6 +109,7 @@
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp" #include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#include "ngraph/runtime/cpu/cpu_tracing.hpp" #include "ngraph/runtime/cpu/cpu_tracing.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/runtime/cpu/ops/convert_layout.hpp" #include "ngraph/runtime/cpu/ops/convert_layout.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp" #include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/ops/sigmoid.hpp" #include "ngraph/runtime/cpu/ops/sigmoid.hpp"
...@@ -226,6 +227,10 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -226,6 +227,10 @@ static const runtime::cpu::OpMap dispatcher{
&runtime::cpu::CPU_Emitter::emit<op::ConvolutionBackpropFilters>}, &runtime::cpu::CPU_Emitter::emit<op::ConvolutionBackpropFilters>},
{TI(ngraph::op::ConvolutionBackpropData), {TI(ngraph::op::ConvolutionBackpropData),
&runtime::cpu::CPU_Emitter::emit<op::ConvolutionBackpropData>}, &runtime::cpu::CPU_Emitter::emit<op::ConvolutionBackpropData>},
{TI(ngraph::op::ConvolutionBias), &runtime::cpu::CPU_Emitter::emit<op::ConvolutionBias>},
// conv+bias backprop for data share the same implementation as ConvolutionBackpropData
{TI(ngraph::op::ConvolutionBiasBackpropFiltersBias),
&runtime::cpu::CPU_Emitter::emit<op::ConvolutionBiasBackpropFiltersBias>},
{TI(ngraph::runtime::cpu::op::ConvertLayout), {TI(ngraph::runtime::cpu::op::ConvertLayout),
&runtime::cpu::CPU_Emitter::emit<runtime::cpu::op::ConvertLayout>}, &runtime::cpu::CPU_Emitter::emit<runtime::cpu::op::ConvertLayout>},
{TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>}, {TI(ngraph::op::Not), &runtime::cpu::CPU_Emitter::emit<op::Not>},
......
...@@ -83,6 +83,7 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu ...@@ -83,6 +83,7 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu
const mkldnn::memory::desc& weights_desc, const mkldnn::memory::desc& weights_desc,
const mkldnn::memory::desc& result_desc, const mkldnn::memory::desc& result_desc,
const ngraph::Strides& strides, const ngraph::Strides& strides,
const ngraph::Strides& dilation_strides,
const ngraph::CoordinateDiff& padding_below, const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above) const ngraph::CoordinateDiff& padding_above)
{ {
...@@ -97,6 +98,7 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu ...@@ -97,6 +98,7 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu
weights_desc, weights_desc,
result_desc, result_desc,
mkldnn::memory::dims(strides.begin(), strides.end()), mkldnn::memory::dims(strides.begin(), strides.end()),
mkldnn::memory::dims(dilation_strides.begin(), dilation_strides.end()),
mkldnn::memory::dims(padding_below.begin(), padding_below.end()), mkldnn::memory::dims(padding_below.begin(), padding_below.end()),
mkldnn::memory::dims(padding_above.begin(), padding_above.end()), mkldnn::memory::dims(padding_above.begin(), padding_above.end()),
mkldnn::padding_kind::zero}, mkldnn::padding_kind::zero},
...@@ -111,21 +113,24 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu ...@@ -111,21 +113,24 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu
size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& input_data_desc, size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& input_data_desc,
const mkldnn::memory::desc& weights_desc, const mkldnn::memory::desc& weights_desc,
const mkldnn::memory::desc& bias_desc,
const mkldnn::memory::desc& result_desc, const mkldnn::memory::desc& result_desc,
const ngraph::Strides& strides, const ngraph::Strides& strides,
const ngraph::Strides& dilation_strides, const ngraph::Strides& dilation_strides,
const ngraph::CoordinateDiff& padding_below, const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above) const ngraph::CoordinateDiff& padding_above)
{ {
size_t input_data_index = build_memory_primitive(input_data_desc); const size_t input_data_index = build_memory_primitive(input_data_desc);
size_t weights_index = build_memory_primitive(weights_desc); const size_t weights_index = build_memory_primitive(weights_desc);
size_t result_index = build_memory_primitive(result_desc); const size_t bias_index = build_memory_primitive(bias_desc);
const size_t result_index = build_memory_primitive(result_desc);
size_t conv_index = insert_primitive(new mkldnn::convolution_forward( const size_t conv_index = insert_primitive(new mkldnn::convolution_forward(
{{mkldnn::prop_kind::forward, {{mkldnn::prop_kind::forward,
mkldnn::algorithm::convolution_direct, mkldnn::algorithm::convolution_direct,
input_data_desc, input_data_desc,
weights_desc, weights_desc,
bias_desc,
result_desc, result_desc,
mkldnn::memory::dims(strides.begin(), strides.end()), mkldnn::memory::dims(strides.begin(), strides.end()),
mkldnn::memory::dims(dilation_strides.begin(), dilation_strides.end()), mkldnn::memory::dims(dilation_strides.begin(), dilation_strides.end()),
...@@ -135,9 +140,68 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu ...@@ -135,9 +140,68 @@ size_t MKLDNNEmitter::build_convolution_forward(const mkldnn::memory::desc& inpu
mkldnn_utils::global_cpu_engine}, mkldnn_utils::global_cpu_engine},
*m_mkldnn_primitives[input_data_index], *m_mkldnn_primitives[input_data_index],
*m_mkldnn_primitives[weights_index], *m_mkldnn_primitives[weights_index],
*m_mkldnn_primitives[bias_index],
*m_mkldnn_primitives[result_index])); *m_mkldnn_primitives[result_index]));
m_primitive_deps[conv_index] = {input_data_index, weights_index, result_index}; m_primitive_deps[conv_index] = {input_data_index, weights_index, bias_index, result_index};
return conv_index;
}
size_t MKLDNNEmitter::build_convolution_backward_weights_bias(
const mkldnn::memory::desc& in_data_desc,
const mkldnn::memory::desc& in_delta_desc,
const mkldnn::memory::desc& out_weights_delta_desc,
const mkldnn::memory::desc& out_bias_delta_desc,
const ngraph::Strides& ng_strides,
const ngraph::Strides& ng_dilation_strides,
const ngraph::CoordinateDiff& ng_padding_below,
const ngraph::CoordinateDiff& ng_padding_above)
{
const size_t in_data_index = build_memory_primitive(in_data_desc);
const size_t in_delta_index = build_memory_primitive(in_delta_desc);
const size_t out_weights_delta_index = build_memory_primitive(out_weights_delta_desc);
const size_t out_bias_delta_index = build_memory_primitive(out_bias_delta_desc);
mkldnn::memory::dims strides(ng_strides.begin(), ng_strides.end());
mkldnn::memory::dims dilation(ng_dilation_strides.begin(), ng_dilation_strides.end());
mkldnn::memory::dims padding_l(ng_padding_below.begin(), ng_padding_below.end());
mkldnn::memory::dims padding_r(ng_padding_above.begin(), ng_padding_above.end());
mkldnn::convolution_forward::primitive_desc fwd_pd{{mkldnn::prop_kind::forward,
mkldnn::algorithm::convolution_direct,
in_data_desc,
out_weights_delta_desc,
out_bias_delta_desc,
in_delta_desc,
strides,
dilation,
padding_l,
padding_r,
mkldnn::padding_kind::zero},
mkldnn_utils::global_cpu_engine};
mkldnn::convolution_backward_weights::primitive_desc bwd_pd{
{mkldnn::algorithm::convolution_direct,
in_data_desc,
out_weights_delta_desc,
out_bias_delta_desc,
in_delta_desc,
strides,
dilation,
padding_l,
padding_r,
mkldnn::padding_kind::zero},
mkldnn_utils::global_cpu_engine,
fwd_pd};
const size_t conv_index = insert_primitive(
new mkldnn::convolution_backward_weights(bwd_pd,
*m_mkldnn_primitives[in_data_index],
*m_mkldnn_primitives[in_delta_index],
*m_mkldnn_primitives[out_weights_delta_index],
*m_mkldnn_primitives[out_bias_delta_index]));
m_primitive_deps[conv_index] = {
in_data_index, in_delta_index, out_weights_delta_index, out_bias_delta_index};
return conv_index; return conv_index;
} }
......
...@@ -57,11 +57,16 @@ namespace ngraph ...@@ -57,11 +57,16 @@ namespace ngraph
const mkldnn::memory::desc& weights_desc, const mkldnn::memory::desc& weights_desc,
const mkldnn::memory::desc& result_desc, const mkldnn::memory::desc& result_desc,
const ngraph::Strides& strides, const ngraph::Strides& strides,
const ngraph::Strides& dilation_strides,
const ngraph::CoordinateDiff& padding_below, const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above); const ngraph::CoordinateDiff& padding_above);
/**
* Convolution + bias forward
*/
size_t build_convolution_forward(const mkldnn::memory::desc& input_data_desc, size_t build_convolution_forward(const mkldnn::memory::desc& input_data_desc,
const mkldnn::memory::desc& weights_desc, const mkldnn::memory::desc& weights_desc,
const mkldnn::memory::desc& bias_desc,
const mkldnn::memory::desc& result_desc, const mkldnn::memory::desc& result_desc,
const ngraph::Strides& strides, const ngraph::Strides& strides,
const ngraph::Strides& dilation_strides, const ngraph::Strides& dilation_strides,
...@@ -84,7 +89,18 @@ namespace ngraph ...@@ -84,7 +89,18 @@ namespace ngraph
const ngraph::Strides& dilation_strides, const ngraph::Strides& dilation_strides,
const ngraph::CoordinateDiff& padding_below, const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above); const ngraph::CoordinateDiff& padding_above);
/**
* Convolution + bias backprop for weights and bias
*/
size_t build_convolution_backward_weights_bias(
const mkldnn::memory::desc& in_data_desc,
const mkldnn::memory::desc& in_delta_desc,
const mkldnn::memory::desc& out_weights_delta_desc,
const mkldnn::memory::desc& out_bias_delta_desc,
const ngraph::Strides& ng_strides,
const ngraph::Strides& ng_dilation_strides,
const ngraph::CoordinateDiff& ng_padding_below,
const ngraph::CoordinateDiff& ng_padding_above);
size_t build_pooling_forward(mkldnn::algorithm pooling_algorithm, size_t build_pooling_forward(mkldnn::algorithm pooling_algorithm,
const mkldnn::memory::desc& input_desc, const mkldnn::memory::desc& input_desc,
const mkldnn::memory::desc& result_desc, const mkldnn::memory::desc& result_desc,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "ngraph/ops/relu.hpp" #include "ngraph/ops/relu.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/types/element_type.hpp" #include "ngraph/types/element_type.hpp"
#include "mkldnn_utils.hpp" #include "mkldnn_utils.hpp"
...@@ -47,6 +48,8 @@ static const std::unordered_set<std::type_index> s_op_registry{ ...@@ -47,6 +48,8 @@ static const std::unordered_set<std::type_index> s_op_registry{
TI(ngraph::op::Convolution), TI(ngraph::op::Convolution),
TI(ngraph::op::ConvolutionBackpropData), TI(ngraph::op::ConvolutionBackpropData),
TI(ngraph::op::ConvolutionBackpropFilters), TI(ngraph::op::ConvolutionBackpropFilters),
TI(ngraph::op::ConvolutionBias),
TI(ngraph::op::ConvolutionBiasBackpropFiltersBias),
TI(ngraph::op::MaxPool), TI(ngraph::op::MaxPool),
TI(ngraph::op::MaxPoolBackprop), TI(ngraph::op::MaxPoolBackprop),
TI(ngraph::op::Relu), TI(ngraph::op::Relu),
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <numeric>
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/get_output_element.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
op::ConvolutionBias::ConvolutionBias(const std::shared_ptr<op::Convolution>& conv,
const std::shared_ptr<Node>& bias)
: RequiresTensorViewArgs("ConvolutionBias",
{conv->get_input_op(0), conv->get_input_op(1), bias})
, m_window_movement_strides(conv->get_window_movement_strides())
, m_window_dilation_strides(conv->get_window_dilation_strides())
, m_padding_below(conv->get_padding_below())
, m_padding_above(conv->get_padding_above())
, m_data_dilation_strides(conv->get_data_dilation_strides())
{
if (conv->get_element_type() != bias->get_element_type())
{
throw ngraph_error("Convolution's element type isn't equal to bias!");
}
set_value_type_checked(conv->get_element_type(), conv->get_shape());
}
op::ConvolutionBias::ConvolutionBias(const std::shared_ptr<Node>& data_batch,
const std::shared_ptr<Node>& filters,
const std::shared_ptr<Node>& bias,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides)
: RequiresTensorViewArgs("ConvolutionBias", {data_batch, filters, bias})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
{
}
std::shared_ptr<Node> op::ConvolutionBias::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::shared_ptr<Node>(new ConvolutionBias(new_args.at(0),
new_args.at(1),
new_args.at(2),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides()));
}
void op::ConvolutionBias::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto data = get_input_op(0);
const auto data_shape = data->get_shape();
auto filter = get_input_op(1);
const auto filter_shape = filter->get_shape();
auto bias = get_input_op(2);
const auto bias_shape = bias->get_shape();
// using regular convolution backprop for data
adjoints.add_delta(data,
std::make_shared<op::ConvolutionBackpropData>(data_shape,
filter,
delta,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides));
auto filter_bias_backprop =
std::make_shared<op::ConvolutionBiasBackpropFiltersBias>(data,
filter_shape,
bias_shape,
delta,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides);
auto filter_delta = std::make_shared<op::GetOutputElement>(filter_bias_backprop, 0);
auto bias_delta = std::make_shared<op::GetOutputElement>(filter_bias_backprop, 1);
adjoints.add_delta(filter, filter_delta);
adjoints.add_delta(bias, bias_delta);
}
op::ConvolutionBiasBackpropFiltersBias::ConvolutionBiasBackpropFiltersBias(
const std::shared_ptr<Node>& data_batch,
const Shape& filters_shape,
const Shape& bias_shape,
const std::shared_ptr<Node>& output_delta,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: RequiresTensorViewArgs("ConvolutionBiasBackpropFiltersBias", {data_batch, output_delta})
, m_filters_shape(filters_shape)
, m_bias_shape(bias_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
, m_window_dilation_strides_forward(window_dilation_strides_forward)
, m_padding_below_forward(padding_below_forward)
, m_padding_above_forward(padding_above_forward)
, m_data_dilation_strides_forward(data_dilation_strides_forward)
{
auto& data_batch_shape = get_input_shape(0);
auto& data_batch_et = get_input_element_type(0);
auto& output_delta_et = get_input_element_type(1);
//
// Make sure data batch and output delta element types match.
//
if (data_batch_et != output_delta_et)
{
throw ngraph_error(
"ConvolutionBiasBackpropFilterBias data batch and output delta element types do not "
"match");
}
// Forward Backward
// Window movement strides q p_f
// Window dilation strides p_f q
// Padding below a_x a_x
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q
// Data dilation strides p_x p_x
for (size_t i = 0; i < filters_shape.size() - 2; i++)
{
m_window_movement_strides_backward.push_back(window_dilation_strides_forward[i]);
m_window_dilation_strides_backward.push_back(window_movement_strides_forward[i]);
m_padding_below_backward.push_back(padding_below_forward[i]);
m_padding_above_backward.push_back(
padding_above_forward[i] -
(padding_below_forward[i] +
(data_batch_shape[i + 2] - 1) * data_dilation_strides_forward[i] +
padding_above_forward[i] -
(filters_shape[i + 2] - 1) * window_dilation_strides_forward[i]) %
window_movement_strides_forward[i]);
m_data_dilation_strides_backward.push_back(data_dilation_strides_forward[i]);
}
add_output(data_batch_et, filters_shape);
add_output(data_batch_et, bias_shape);
}
std::shared_ptr<Node>
op::ConvolutionBiasBackpropFiltersBias::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<ConvolutionBiasBackpropFiltersBias>(new_args.at(0),
m_filters_shape,
m_bias_shape,
new_args.at(1),
m_window_movement_strides_forward,
m_window_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_data_dilation_strides_forward);
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
/// \brief Convolution + bias forward prop for batched convolution operation.
class ConvolutionBias : public util::RequiresTensorViewArgs
{
public:
ConvolutionBias(const std::shared_ptr<op::Convolution>& conv,
const std::shared_ptr<Node>& bias);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
std::shared_ptr<Node> get_bias() { return get_input_op(2); }
std::shared_ptr<Node> get_filters() { return get_input_op(1); }
std::shared_ptr<Node> get_data_batch() { return get_input_op(0); }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
private:
ConvolutionBias(const std::shared_ptr<Node>& data_batch,
const std::shared_ptr<Node>& filters,
const std::shared_ptr<Node>& bias,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides);
};
/// \brief Filters and bias backprop for batched convolution operation. Data backprop is
/// the same as regular convolution backprop for data.
class ConvolutionBiasBackpropFiltersBias : public util::RequiresTensorViewArgs
{
public:
ConvolutionBiasBackpropFiltersBias(const std::shared_ptr<Node>& data_batch,
const Shape& filters_shape,
const Shape& bias_shape,
const std::shared_ptr<Node>& output_delta,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The filters tensor shape.
const Shape& get_filters_shape() const { return m_filters_shape; }
/// \return The bias tensor shape.
const Shape& get_bias_shape() const { return m_bias_shape; }
/// \return The window movement strides from the forward prop.
const Strides& get_window_movement_strides_forward() const
{
return m_window_movement_strides_forward;
}
/// \return The window dilation strides from the forward prop.
const Strides& get_window_dilation_strides_forward() const
{
return m_window_dilation_strides_forward;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_below_forward() const
{
return m_padding_below_forward;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_above_forward() const
{
return m_padding_above_forward;
}
/// \return The data dilation strides from the forward prop.
const Strides& get_data_dilation_strides_forward() const
{
return m_data_dilation_strides_forward;
}
/// \return The window movement strides for the backward prop.
const Strides& get_window_movement_strides_backward() const
{
return m_window_movement_strides_backward;
}
/// \return The window dilation strides for the backward prop.
const Strides& get_window_dilation_strides_backward() const
{
return m_window_dilation_strides_backward;
}
/// \return The padding-below sizes (possibly negative) for the backward prop.
const CoordinateDiff& get_padding_below_backward() const
{
return m_padding_below_backward;
}
/// \return The padding-above sizes (possibly negative) for the backward prop.
const CoordinateDiff& get_padding_above_backward() const
{
return m_padding_above_backward;
}
/// \return The data dilation strides for the backward prop.
const Strides& get_data_dilation_strides_backward() const
{
return m_data_dilation_strides_backward;
}
protected:
Shape m_filters_shape;
Shape m_bias_shape;
Strides m_window_movement_strides_forward;
Strides m_window_dilation_strides_forward;
CoordinateDiff m_padding_below_forward;
CoordinateDiff m_padding_above_forward;
Strides m_data_dilation_strides_forward;
Strides m_window_movement_strides_backward;
Strides m_window_dilation_strides_backward;
CoordinateDiff m_padding_below_backward;
CoordinateDiff m_padding_above_backward;
Strides m_data_dilation_strides_backward;
};
}
}
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "ngraph/ops/relu.hpp" #include "ngraph/ops/relu.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/runtime/cpu/ops/sigmoid.hpp" #include "ngraph/runtime/cpu/ops/sigmoid.hpp"
using namespace std; using namespace std;
...@@ -153,6 +154,59 @@ namespace ngraph ...@@ -153,6 +154,59 @@ namespace ngraph
} }
} }
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBias)
{
auto convolution = static_cast<op::ConvolutionBias*>(node);
auto data_shape = node->get_input_shape(0);
auto weights_shape = node->get_input_shape(1);
auto result_shape = node->get_output_shape(0);
auto data_rank = data_shape.size();
auto weights_rank = weights_shape.size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && data_rank == 4 && weights_rank == 4 &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
convolution->set_op_annotations(op_annotations);
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBiasBackpropFiltersBias)
{
auto convolution = static_cast<op::ConvolutionBiasBackpropFiltersBias*>(node);
auto data_shape = node->get_input_shape(0);
auto delta_shape = node->get_input_shape(1);
auto data_rank = data_shape.size();
auto delta_rank = delta_shape.size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides_forward())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && data_rank == 4 && delta_rank == 4 &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
convolution->set_op_annotations(op_annotations);
}
}
template <> template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::AvgPool) void CPUAssignment::ASSIGN_DECL(ngraph::op::AvgPool)
{ {
...@@ -266,6 +320,10 @@ static const runtime::cpu::pass::AssignOpMap s_dispatcher{ ...@@ -266,6 +320,10 @@ static const runtime::cpu::pass::AssignOpMap s_dispatcher{
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBackpropData>}, &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBackpropData>},
{TI(ngraph::op::ConvolutionBackpropFilters), {TI(ngraph::op::ConvolutionBackpropFilters),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBackpropFilters>}, &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBackpropFilters>},
{TI(ngraph::op::ConvolutionBias),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBias>},
{TI(ngraph::op::ConvolutionBiasBackpropFiltersBias),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBiasBackpropFiltersBias>},
{TI(ngraph::op::AvgPool), &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::AvgPool>}, {TI(ngraph::op::AvgPool), &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::AvgPool>},
{TI(ngraph::op::AvgPoolBackprop), {TI(ngraph::op::AvgPoolBackprop),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::AvgPoolBackprop>}, &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::AvgPoolBackprop>},
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/any.hpp" #include "ngraph/pattern/op/any.hpp"
#include "ngraph/pattern/op/label.hpp" #include "ngraph/pattern/op/label.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp" #include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/ops/sigmoid.hpp" #include "ngraph/runtime/cpu/ops/sigmoid.hpp"
...@@ -566,3 +567,37 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_sigmoid() ...@@ -566,3 +567,37 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_sigmoid()
auto m = std::make_shared<ngraph::pattern::Matcher>(divide_1_over_exp, callback); auto m = std::make_shared<ngraph::pattern::Matcher>(divide_1_over_exp, callback);
this->add_matcher(m); this->add_matcher(m);
} }
void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias()
{
Shape shape{2, 2, 1, 1};
auto data_batch = std::make_shared<pattern::op::Label>(element::f32, shape);
auto filters = std::make_shared<pattern::op::Label>(element::f32, shape);
auto pbias = std::make_shared<pattern::op::Label>(element::f32, Shape{});
auto pbroadcast = std::make_shared<op::Broadcast>(pbias, shape, AxisSet{0, 1, 2, 3});
auto pconv1 = std::make_shared<op::Convolution>(data_batch,
filters,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto p_conv_bias = pbroadcast + pconv1;
ngraph::pattern::gr_callback_fn callback = [](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for construct_conv_bias against node = "
<< m.match_root()->get_name();
auto pattern_map = m.get_pattern_map();
std::shared_ptr<Node> nn;
auto conv = std::dynamic_pointer_cast<op::Convolution>(m.match_root()->get_input_op(0));
auto bias = m.match_root()->get_input_op(1)->get_input_op(0);
auto conv_bias = std::shared_ptr<Node>(new op::ConvolutionBias(conv, bias));
return conv_bias;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(p_conv_bias, callback);
this->add_matcher(m);
}
...@@ -44,11 +44,13 @@ public: ...@@ -44,11 +44,13 @@ public:
construct_zero_padded_reshaped_conv(); construct_zero_padded_reshaped_conv();
construct_zero_padded_conv(); construct_zero_padded_conv();
construct_sigmoid(); construct_sigmoid();
construct_conv_bias();
} }
private: private:
void construct_matmul_pattern(); void construct_matmul_pattern();
void construct_matmulbias_pattern(); void construct_matmulbias_pattern();
void construct_conv_bias();
void construct_fprop_bn(); void construct_fprop_bn();
void construct_sigmoid(); void construct_sigmoid();
void construct_zero_padded_reshaped_conv(); void construct_zero_padded_reshaped_conv();
......
This diff is collapsed.
...@@ -21,14 +21,18 @@ ...@@ -21,14 +21,18 @@
#include <memory> #include <memory>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/ops/batch_norm.hpp" #include "ngraph/ops/batch_norm.hpp"
#include "ngraph/ops/get_output_element.hpp" #include "ngraph/ops/get_output_element.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/sum.hpp" #include "ngraph/ops/sum.hpp"
#include "ngraph/pass/graph_rewrite.hpp" #include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/reshape_elimination.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/any.hpp" #include "ngraph/pattern/op/any.hpp"
#include "ngraph/pattern/op/label.hpp" #include "ngraph/pattern/op/label.hpp"
...@@ -36,6 +40,7 @@ ...@@ -36,6 +40,7 @@
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/pass/reshape_elimination.hpp" #include "ngraph/pass/reshape_elimination.hpp"
#include "ngraph/pass/visualize_tree.hpp" #include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp" #include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/ops/sigmoid.hpp" #include "ngraph/runtime/cpu/ops/sigmoid.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
...@@ -672,6 +677,190 @@ TEST(cpu_fusion, non_zero_padded_conv) ...@@ -672,6 +677,190 @@ TEST(cpu_fusion, non_zero_padded_conv)
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1); ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
} }
TEST(cpu_fusion, fuse_conv_bias)
{
pass::Manager pass_manager;
pass_manager.register_pass<ngraph::pass::ReshapeElimination>();
pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
const string json_path = file_util::path_join(SERIALIZED_ZOO, "conv_bias.json");
const string json_string = file_util::read_file_to_string(json_path);
stringstream ss(json_string);
shared_ptr<Function> func = ngraph::deserialize(ss);
pass_manager.run_passes(func);
size_t cb = count_ops_of_type<op::ConvolutionBias>(func);
ASSERT_GT(cb, 0);
}
struct ConvolutionBiasTestData
{
size_t n{0};
size_t c{0};
size_t filter{0};
size_t kernel_size{0};
size_t w{0};
size_t h{0};
shared_ptr<runtime::TensorView> data_val;
shared_ptr<runtime::TensorView> weights_val;
shared_ptr<runtime::TensorView> bias_val;
shared_ptr<runtime::TensorView> result_val;
shared_ptr<runtime::TensorView> delta_val;
shared_ptr<runtime::TensorView> d_data_val;
shared_ptr<runtime::TensorView> d_weights_val;
shared_ptr<runtime::TensorView> d_bias_val;
vector<float> expected_result_val;
vector<float> expected_d_data_val;
vector<float> expected_d_weights_val;
vector<float> expected_d_bias_val;
Shape data_shape;
Shape weights_shape;
Shape bias_shape;
Shape result_shape;
shared_ptr<op::Parameter> data;
shared_ptr<op::Parameter> weights;
shared_ptr<op::Parameter> bias;
shared_ptr<op::Parameter> delta;
void n1c1h3w3(shared_ptr<runtime::Backend> backend)
{
n = 1;
c = 1;
filter = 1;
kernel_size = 3;
w = 3;
h = w;
data_shape = Shape{n, c, h, w};
data = make_shared<op::Parameter>(element::f32, data_shape);
weights_shape = Shape{filter, c, kernel_size, kernel_size};
weights = make_shared<op::Parameter>(element::f32, weights_shape);
bias_shape = Shape{filter};
bias = make_shared<op::Parameter>(element::f32, bias_shape);
result_shape = Shape{n, filter, 1, 1};
data_val = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(data_val,
vector<float>{-0.67765152f,
0.10073948f,
0.57595438f,
-0.3469252f,
-0.22134334f,
-1.80471897f,
-0.80642909f,
1.22033095f,
2.23235631f});
weights_val = backend->make_primary_tensor_view(element::f32, weights_shape);
copy_data(weights_val,
vector<float>{0.20070229f,
-0.54968649f,
-0.19819015f,
-0.38577855f,
1.37109005f,
-0.23789984f,
0.14867957f,
-0.49851316f,
-0.84815776f});
bias_val = backend->make_primary_tensor_view(element::f32, bias_shape);
copy_data(bias_val, vector<float>{0.07811152f});
result_val = backend->make_primary_tensor_view(element::f32, result_shape);
copy_data(result_val, vector<float>{0});
delta = make_shared<op::Parameter>(element::f32, result_shape);
delta_val = backend->make_primary_tensor_view(element::f32, result_shape);
copy_data(delta_val, vector<float>{-2.58936238f});
d_data_val = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(d_data_val, vector<float>{0, 0, 0, 0, 0, 0, 0, 0, 0});
d_weights_val = backend->make_primary_tensor_view(element::f32, weights_shape);
copy_data(d_weights_val, vector<float>{0, 0, 0, 0, 0, 0, 0, 0, 0});
d_bias_val = backend->make_primary_tensor_view(element::f32, bias_shape);
copy_data(d_bias_val, vector<float>{0});
expected_result_val = vector<float>{-2.58936238f};
expected_d_data_val = vector<float>{-0.51969099f,
1.42333758f,
0.5131861f,
0.99892044f,
-3.5502491f,
0.61600888f,
-0.3849853f,
1.29083121f,
2.19618773f};
expected_d_weights_val = vector<float>{1.7546854f,
-0.26085103f,
-1.49135458f,
0.89831507f,
0.57313812f,
4.67307138f,
2.08813715f,
-3.15987897f,
-5.7803793f};
expected_d_bias_val = vector<float>{-2.58936238f};
}
};
TEST(cpu_fusion, conv_bias_fprop_n1c1h3w3)
{
auto manager = runtime::Manager::get("CPU");
auto backend = manager->allocate_backend();
ConvolutionBiasTestData conv_test;
conv_test.n1c1h3w3(backend);
auto convolution = make_shared<op::Convolution>(conv_test.data, conv_test.weights);
auto convolution_bias = make_shared<op::ConvolutionBias>(convolution, conv_test.bias);
auto f = make_shared<Function>(
convolution_bias, op::ParameterVector{conv_test.data, conv_test.weights, conv_test.bias});
auto external = manager->compile(f);
auto cf = backend->make_call_frame(external);
cf->call({conv_test.data_val, conv_test.weights_val, conv_test.bias_val},
{conv_test.result_val});
auto result_vec = read_vector<float>(conv_test.result_val);
EXPECT_TRUE(
test::all_close(conv_test.expected_result_val, read_vector<float>(conv_test.result_val)));
}
TEST(cpu_fusion, conv_bias_bprop_n1c1h3w3)
{
auto manager = runtime::Manager::get("CPU");
auto backend = manager->allocate_backend();
ConvolutionBiasTestData conv_test;
conv_test.n1c1h3w3(backend);
auto convolution = make_shared<op::Convolution>(conv_test.data, conv_test.weights);
auto convolution_bias = make_shared<op::ConvolutionBias>(convolution, conv_test.bias);
auto f = make_shared<Function>(
convolution_bias, op::ParameterVector{conv_test.data, conv_test.weights, conv_test.bias});
auto d_data = convolution_bias->backprop_node(conv_test.data, conv_test.delta);
auto d_weights = convolution_bias->backprop_node(conv_test.weights, conv_test.delta);
auto d_bias = convolution_bias->backprop_node(conv_test.bias, conv_test.delta);
auto df = make_shared<Function>(
NodeVector{d_data, d_weights, d_bias},
op::ParameterVector{conv_test.data, conv_test.weights, conv_test.bias, conv_test.delta});
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->call({conv_test.data_val, conv_test.weights_val, conv_test.bias_val, conv_test.delta_val},
{conv_test.d_data_val, conv_test.d_weights_val, conv_test.d_bias_val});
EXPECT_TRUE(
test::all_close(conv_test.expected_d_data_val, read_vector<float>(conv_test.d_data_val)));
EXPECT_TRUE(test::all_close(conv_test.expected_d_weights_val,
read_vector<float>(conv_test.d_weights_val)));
EXPECT_TRUE(
test::all_close(conv_test.expected_d_bias_val, read_vector<float>(conv_test.d_bias_val)));
}
TEST(cpu_fusion, sigmoid_fprop_fusion) TEST(cpu_fusion, sigmoid_fprop_fusion)
{ {
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment