Commit 51bcb92d authored by Jayaram Bobba's avatar Jayaram Bobba Committed by Robert Kimball

Move to MKLDNN v0.16. Git hash picks up some bug fixes post v0.16 (#1599)

* Move to MKLDNN v0.16. Git hash picks up some bug fixes post v0.16

* Temporary workaround for an MKLDNN bug
parent dc45b5ac
......@@ -43,19 +43,19 @@ endif()
# This section sets up MKL as an external project to be used later by MKLDNN
set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/v0.14/")
set(MKLVERSION "2018.0.3.20180406")
set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/v0.16/")
set(MKLVERSION "2019.0.20180710")
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(MKLPACKAGE "mklml_lnx_${MKLVERSION}.tgz")
set(MKL_SHA1_HASH aea0d9ce65773cfcf5d8292b8db553bde965fc8f)
set(MKL_SHA1_HASH e7c34105d486908b87b4b8c667c3a089f079ca51)
set(MKL_LIBS libiomp5.so libmklml_intel.so)
elseif (APPLE)
set(MKLPACKAGE "mklml_mac_${MKLVERSION}.tgz")
set(MKL_SHA1_HASH d76083fd5a79767a96572ad0e23e7f4c892818f2)
set(MKL_SHA1_HASH c873d2bd36a0100344d1aac1b1e56c8c3a43a845)
set(MKL_LIBS libmklml.dylib libiomp5.dylib)
elseif (WIN32)
set(MKLPACKAGE "mklml_win_${MKLVERSION}.zip")
set(MKL_SHA1_HASH d607ca92d7bfc101f0828c0b005098b75531669b)
set(MKL_SHA1_HASH 3767d9a1ad679d647b8c6edf1f97c767881d0c5c)
set(MKL_LIBS mklml.dll libiomp5md.dll)
endif()
set(MKLURL ${MKLURLROOT}${MKLPACKAGE})
......@@ -82,7 +82,7 @@ foreach(LIB ${MKL_LIBS})
endforeach()
set(MKLDNN_GIT_REPO_URL https://github.com/intel/mkl-dnn)
set(MKLDNN_GIT_TAG "0e7ca73")
set(MKLDNN_GIT_TAG "b9558fd")
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
if(${CMAKE_VERSION} VERSION_LESS 3.2)
......
......@@ -118,26 +118,47 @@ namespace ngraph
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Convolution)
template <typename T>
bool can_use_mkldnn_conv(ngraph::Node* node)
{
auto convolution = static_cast<op::Convolution*>(node);
auto arg0_shape = node->get_input_shape(0);
auto arg1_shape = node->get_input_shape(1);
auto result_shape = node->get_output_shape(0);
auto arg0_rank = arg0_shape.size();
auto arg1_rank = arg1_shape.size();
auto convolution = static_cast<const T*>(node);
auto arg0_rank = node->get_input_shape(0).size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
if (s != 1)
return false;
}
if (arg0_rank != 4 && arg0_rank != 5)
{
return false;
}
if (node->get_input_element_type(0) != element::f32)
{
return false;
}
// Temporarily disable MKLDNN for large paddings due to
// a bug in v0.16 - MKFDNN-982
for (auto s : convolution->get_padding_below())
{
if (s >= 7)
return false;
}
for (auto s : convolution->get_padding_above())
{
if (s >= 7)
return false;
}
if (!data_dilated && ((arg0_rank == 4 && arg1_rank == 4) ||
(arg0_rank == 5 && arg1_rank == 5)) &&
node->get_input_element_type(0) == element::f32)
return true;
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::Convolution)
{
auto convolution = static_cast<op::Convolution*>(node);
if (can_use_mkldnn_conv<ngraph::op::Convolution>(node))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......@@ -151,20 +172,7 @@ namespace ngraph
{
auto convolution = static_cast<op::GroupConvolution*>(node);
auto arg0_shape = node->get_input_shape(0);
auto arg1_shape = node->get_input_shape(1);
auto result_shape = node->get_output_shape(0);
auto arg0_rank = arg0_shape.size();
auto arg1_rank = arg1_shape.size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
if (can_use_mkldnn_conv<ngraph::op::GroupConvolution>(node))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......@@ -178,17 +186,7 @@ namespace ngraph
{
auto convolution = static_cast<op::ConvolutionRelu*>(node);
auto arg0_rank = node->get_input_shape(0).size();
auto arg1_rank = node->get_input_shape(1).size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
if (can_use_mkldnn_conv<ngraph::op::ConvolutionRelu>(node))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......@@ -202,17 +200,7 @@ namespace ngraph
{
auto convolution = static_cast<op::ConvolutionBiasAdd*>(node);
auto arg0_rank = node->get_input_shape(0).size();
auto arg1_rank = node->get_input_shape(1).size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
if (can_use_mkldnn_conv<ngraph::op::ConvolutionBiasAdd>(node))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......@@ -229,17 +217,7 @@ namespace ngraph
{
auto convolution = static_cast<op::ConvolutionAdd*>(node);
auto arg0_rank = node->get_input_shape(0).size();
auto arg1_rank = node->get_input_shape(1).size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
if (can_use_mkldnn_conv<ngraph::op::ConvolutionAdd>(node))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment