Commit 5cc8690b authored by Amy Zhuang's avatar Amy Zhuang Committed by Scott Cyphers

Update MKLDNN to v1.0.4. (#3951)

* Update MKLDNN to v1.0.4.

Build MKLDNN-v1 by default.

* Add bf16 support check.

* Modify visibility.
parent d2603f4d
......@@ -164,7 +164,7 @@ option(NGRAPH_UNIT_TEST_ENABLE "Control the building of unit tests" TRUE)
option(NGRAPH_DOC_BUILD_ENABLE "Control the building of documentation" FALSE)
option(NGRAPH_TOOLS_ENABLE "Control the building of tool" TRUE)
option(NGRAPH_CPU_ENABLE "Control the building of the CPU backend" TRUE)
option(NGRAPH_USE_LEGACY_MKLDNN "Use legacy MKLDNN" TRUE)
option(NGRAPH_USE_LEGACY_MKLDNN "Use legacy MKLDNN" FALSE)
option(NGRAPH_MLIR_ENABLE "Control the building of MLIR backend" FALSE)
option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backend" TRUE)
option(NGRAPH_NOP_ENABLE "Control the building of the NOP backend" TRUE)
......
......@@ -18,10 +18,12 @@ include(ExternalProject)
# Includes blas 3.8.0 in mkldnn
set(NGRAPH_MKLDNN_SHORT_VERSION 1)
set(NGRAPH_MKLDNN_FULL_VERSION 1.0.0.0)
set(NGRAPH_MKLDNN_VERSION "v1.0")
set(NGRAPH_MKLDNN_SUB_VERSION "2019.0.5.20190502")
set(NGRAPH_MKLDNN_GIT_TAG "553c23f")
set(NGRAPH_MKLDNN_FULL_VERSION 1.0.4.0)
set(NGRAPH_MKLDNN_MKLML_ASSET_VERSION "v0.21")
set(NGRAPH_MKLDNN_VERSION "v1.0.4")
set(NGRAPH_MKLDNN_MKLML_VERSION "2019.0.5.20190502")
set(NGRAPH_MKLDNN_MKLML_WIN32_VERSION "2020.0.20190813")
set(NGRAPH_MKLDNN_GIT_TAG "v1.0.4")
#------------------------------------------------------------------------------
# Fetch and install MKL-DNN
......@@ -88,8 +90,9 @@ endif()
# This section sets up MKL as an external project to be used later by MKLDNN
set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/v0.19-rc/")
set(MKLVERSION ${NGRAPH_MKLDNN_SUB_VERSION})
set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/${NGRAPH_MKLDNN_MKLML_ASSET_VERSION}/")
set(MKLVERSION ${NGRAPH_MKLDNN_MKLML_VERSION})
set(MKLWIN32VERSION ${NGRAPH_MKLDNN_MKLML_WIN32_VERSION})
if (LINUX)
set(MKLPACKAGE "mklml_lnx_${MKLVERSION}.tgz")
set(MKL_SHA1_HASH 6ab490f0b358124338d04ee9383c3cbc536969d8)
......@@ -97,8 +100,8 @@ elseif (APPLE)
set(MKLPACKAGE "mklml_mac_${MKLVERSION}.tgz")
set(MKL_SHA1_HASH a1c42af04f990b0e515a1c31946424b2e68fccc9)
elseif (WIN32)
set(MKLPACKAGE "mklml_win_${MKLVERSION}.zip")
set(MKL_SHA1_HASH 9d6ff4d5a486689338158093e96c43ee442b65f0)
set(MKLPACKAGE "mklml_win_${MKLWIN32VERSION}.zip")
set(MKL_SHA1_HASH cc117093e658d50a8e4e3d1cf192c300b6bac0fc)
endif()
set(MKL_LIBS ${MKLML_LIB} ${OMP_LIB})
set(MKLURL ${MKLURLROOT}${MKLPACKAGE})
......
......@@ -63,18 +63,18 @@ index 99970659..ef88a0a7 100644
# Compilation happens with OpenMP to enable `#pragma omp simd`
# but during linkage OpenMP dependency should be avoided
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 60bb0c94..cc3fc9d6 100644
index f99ec31ce..b3c1d9bb8 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -73,8 +73,10 @@ endif()
add_library(${LIB_NAME}
${MKLDNN_LIBRARY_TYPE} ${HEADERS} ${${LIB_NAME}_SUB_OBJS})
-set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${PROJECT_VERSION}.0")
-set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "0")
-set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${MKLDNN_VERSION_MAJOR}.${MKLDNN_VERSION_MINOR}")
-set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "${MKLDNN_VERSION_MAJOR}")
+if(MKLDNN_LIB_VERSIONING_ENABLE)
+ set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${PROJECT_VERSION}.0")
+ set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "0")
+ set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${MKLDNN_VERSION_MAJOR}.${MKLDNN_VERSION_MINOR}")
+ set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "${MKLDNN_VERSION_MAJOR}")
+endif()
set_property(TARGET ${LIB_NAME} PROPERTY PUBLIC_HEADER ${HEADERS})
......
......@@ -271,6 +271,7 @@ std::map<element::Type, const mkldnn::memory::data_type>&
// Mapping from POD types to MKLDNN data types
static std::map<element::Type, const mkldnn::memory::data_type> s_mkldnn_data_type_map = {
{element::boolean, mkldnn::memory::data_type::s8},
{element::bf16, mkldnn::memory::data_type::bf16},
{element::f32, mkldnn::memory::data_type::f32},
{element::f64, mkldnn::memory::data_type::data_undef},
{element::i8, mkldnn::memory::data_type::s8},
......@@ -290,6 +291,7 @@ std::map<element::Type, const std::string>&
{
static std::map<element::Type, const std::string> s_mkldnn_data_type_string_map{
{element::boolean, "mkldnn::memory::data_type::s8"},
{element::bf16, "mkldnn::memory::data_type::bf16"},
{element::f32, "mkldnn::memory::data_type::f32"},
{element::f64, "mkldnn::memory::data_type::data_undef"},
{element::i8, "mkldnn::memory::data_type::s8"},
......@@ -778,6 +780,26 @@ mkldnn::memory::desc runtime::cpu::mkldnn_utils::create_blocked_mkldnn_md_helper
return memory::desc(md);
}
bool runtime::cpu::mkldnn_utils::is_bf16_supported()
{
try
{
mkldnn::memory::dims dims{2, 3, 4, 5};
auto input_desc =
mkldnn::memory::desc(dims, mkldnn::memory::data_type::f32, memory::format::nchw);
auto result_desc =
mkldnn::memory::desc(dims, mkldnn::memory::data_type::bf16, memory::format::nchw);
auto reorder_prim_desc = mkldnn::reorder::primitive_desc(
{input_desc, executor::global_cpu_engine}, {result_desc, executor::global_cpu_engine});
}
catch (const mkldnn::error& e)
{
return false;
}
return true;
}
#else
std::map<element::Type, const mkldnn::memory::data_type>&
runtime::cpu::mkldnn_utils::get_mkldnn_data_type_map()
......@@ -1719,4 +1741,22 @@ bool runtime::cpu::mkldnn_utils::is_mkldnn_desc_blocked_data_format(
#endif
return blk.inner_nblks != 0;
}
bool runtime::cpu::mkldnn_utils::is_bf16_supported()
{
try
{
mkldnn::memory::dims dims{2, 3, 4, 5};
mkldnn::memory::dims strides{60, 20, 5, 1};
auto input_desc = mkldnn::memory::desc(dims, mkldnn::memory::data_type::f32, strides);
auto result_desc = mkldnn::memory::desc(dims, mkldnn::memory::data_type::bf16, strides);
auto reorder_prim_desc = mkldnn::reorder::primitive_desc(
executor::global_cpu_engine, input_desc, executor::global_cpu_engine, result_desc);
}
catch (const mkldnn::error& e)
{
return false;
}
return true;
}
#endif
......@@ -20,6 +20,7 @@
#include "ngraph/axis_vector.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/op/batch_norm_relu.hpp"
#include "ngraph/type/element_type.hpp"
......@@ -149,6 +150,8 @@ namespace ngraph
bool can_use_mkldnn_batchnorm_fprop(const ngraph::Node* node);
bool can_use_mkldnn_batchnorm_bprop(const ngraph::Node* node);
bool CPU_BACKEND_API is_bf16_supported();
//
// Intel(R) MKL-DNN supports the Winograd algorithm for convolutions with the
// following sizes:
......@@ -226,6 +229,15 @@ namespace ngraph
{
return false;
}
// Check if bf16 is supported on the platform
if (!is_bf16_supported() && (node->get_input_element_type(0) == element::bf16 ||
node->get_input_element_type(1) == element::bf16 ||
node->get_output_element_type(0) == element::bf16))
{
return false;
}
return true;
}
......
......@@ -411,7 +411,8 @@ namespace ngraph
(node->get_input_element_type(0) == element::f32 ||
node->get_input_element_type(0) == element::u8 ||
node->get_input_element_type(0) == element::i8 ||
node->get_input_element_type(0) == element::bf16))
(node->get_input_element_type(0) == element::bf16 &&
runtime::cpu::mkldnn_utils::is_bf16_supported())))
{
runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node);
}
......
......@@ -2155,9 +2155,15 @@ TEST(cpu_test, tensor_copy_from_different_layout)
EXPECT_EQ((vector<uint8_t>{1, 4, 2, 5, 3, 6}), read_vector<uint8_t>(b));
}
#if MKLDNN_VERSION_MAJOR >= 1
TEST(cpu_test, max_pool_bf16)
{
if (!runtime::cpu::mkldnn_utils::is_bf16_supported())
{
// TODO change to skip when there is a new release of gtest
NGRAPH_WARN << "This test is skipped for platform without bf16 support.";
return;
}
Shape shape_a{1, 1, 3, 5};
Shape window_shape{2, 3};
auto window_movement_strides = Strides{1, 1};
......@@ -2186,6 +2192,13 @@ TEST(cpu_test, max_pool_bf16)
TEST(cpu_test, convolution_simple_bf16)
{
if (!runtime::cpu::mkldnn_utils::is_bf16_supported())
{
// TODO change to skip when there is a new release of gtest
NGRAPH_WARN << "This test is skipped for platform without bf16 support.";
return;
}
Shape shape_a{1, 2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{2, 2, 1, 1};
......@@ -2221,7 +2234,6 @@ TEST(cpu_test, convolution_simple_bf16)
EXPECT_EQ((vector<bfloat16>{18.0, 24.0, 30.0, 36.0, 18.0, 24.0, 30.0, 36.0}),
read_vector<bfloat16>(result));
}
#endif
// This tests a backend's implementation of the three parameter version of create_tensor
// Testing using this tensor as a Function input
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment