Commit 13c281ae authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge pull request #90 from NervanaSystems/jmenon/ninja

CMake: Ninja fixes and MKLDNN integration
parents 8fbbb3af a9751f84
......@@ -21,14 +21,24 @@ set(EXTERNAL_INSTALL_LOCATION ${CMAKE_BINARY_DIR}/external)
#----------------------------------------------------------------------------------------------------------
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
ExternalProject_Add(
eigen
URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/eigen
INSTALL_COMMAND make install
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION}
)
if (${CMAKE_VERSION} VERSION_LESS 3.2)
ExternalProject_Add(
eigen
URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/eigen
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION}
)
else()
ExternalProject_Add(
eigen
URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/eigen
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION}
BUILD_BYPRODUCTS "${EXTERNAL_INSTALL_LOCATION}/include/eigen3"
)
endif()
#----------------------------------------------------------------------------------------------------------
......
# Copyright 2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ExternalProject)
#----------------------------------------------------------------------------------------------------------
# Fetch and install MKL-DNN
#----------------------------------------------------------------------------------------------------------
if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
SET(MKLDNN_GIT_REPO_URL https://github.com/01org/mkl-dnn)
set(EXTERNAL_INSTALL_LOCATION ${CMAKE_BINARY_DIR}/external)
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
if(${CMAKE_VERSION} VERSION_LESS 3.2)
ExternalProject_Add(
ext_mkldnn
GIT_REPOSITORY ${MKLDNN_GIT_REPO_URL}
UPDATE_COMMAND ""
PATCH_COMMAND git am ${CMAKE_SOURCE_DIR}/third-party/patches/mkldnn-cmake-openmp.patch
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION}
)
else()
ExternalProject_Add(
ext_mkldnn
GIT_REPOSITORY ${MKLDNN_GIT_REPO_URL}
UPDATE_COMMAND ""
PATCH_COMMAND git am ${CMAKE_SOURCE_DIR}/third-party/patches/mkldnn-cmake-openmp.patch
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EXTERNAL_INSTALL_LOCATION}
BUILD_BYPRODUCTS "${EXTERNAL_INSTALL_LOCATION}/include/mkldnn.hpp"
)
endif()
ExternalProject_Get_Property(ext_mkldnn source_dir binary_dir)
ExternalProject_Add_Step(
ext_mkldnn
PrepareMKL
COMMAND ${source_dir}/scripts/prepare_mkl.sh
DEPENDEES download
DEPENDERS configure
)
set(MKLDNN_INCLUDE_DIR "${EXTERNAL_INSTALL_LOCATION}/include" PARENT_SCOPE)
set(MKLDNN_LIB_DIR "${EXTERNAL_INSTALL_LOCATION}/lib" PARENT_SCOPE)
endif()
......@@ -41,11 +41,22 @@ set (SRC
uuid.cpp
)
if(MKLDNN_INCLUDE_DIR)
include_directories(${MKLDNN_INCLUDE_DIR})
link_directories(${MKLDNN_LIB_DIR})
set(SRC ${SRC} mkldnn.cpp)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCURDIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}\\\"")
add_executable(unit-test ${SRC})
if(MKLDNN_INCLUDE_DIR)
target_link_libraries(unit-test mkldnn)
add_dependencies(unit-test ext_mkldnn)
endif()
target_link_libraries(unit-test ngraph libgtest pthread)
target_link_libraries(unit-test ${CMAKE_DL_LIBS})
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iostream>
#include <vector>
#include <mkldnn.hpp>
#include "gtest/gtest.h"
static int tensor_volume(const mkldnn::memory::dims &t)
{
int x = 1;
for (const auto i : t)
x *= i;
return x;
}
TEST(mkldnn, engine)
{
using namespace mkldnn;
#pragma GCC diagnostic ignored "-Wgnu-statement-expression"
EXPECT_NO_THROW(({
auto cpu_engine = engine(engine::cpu, 0);
const int mb = 2;
const int groups = 2;
memory::dims input_tz = {mb, 256, 13, 13};
memory::dims weights_tz = {groups, 384/groups, 256/groups, 3, 3};
memory::dims bias_tz = {384};
memory::dims strides = {1, 1};
memory::dims padding = {0, 0};
memory::dims output_tz = {mb, 384,
(input_tz[2] + 2*padding[0] - weights_tz[3])/strides[0] + 1,
(input_tz[3] + 2*padding[1] - weights_tz[4])/strides[1] + 1,
};
std::vector<float> input(tensor_volume(input_tz), .0f);
std::vector<float> weights(tensor_volume(weights_tz), .0f);
std::vector<float> bias(tensor_volume(bias_tz), .0f);
std::vector<float> output(tensor_volume(output_tz), .0f);
auto c3_src_desc = memory::desc({input_tz}, memory::data_type::f32, memory::format::nchw);
auto c3_weights_desc = memory::desc({weights_tz}, memory::data_type::f32, memory::format::goihw);
auto c3_bias_desc = memory::desc({bias_tz}, memory::data_type::f32, memory::format::x);
auto c3_dst_desc = memory::desc({output_tz}, memory::data_type::f32, memory::format::nchw);
auto c3_src = memory({c3_src_desc, cpu_engine}, input.data());
auto c3_weights = memory({c3_weights_desc, cpu_engine}, weights.data());
auto c3_bias = memory({c3_bias_desc, cpu_engine}, bias.data());
auto c3_dst = memory({c3_dst_desc, cpu_engine}, output.data());
auto c3 = convolution_forward(convolution_forward::primitive_desc(convolution_forward::desc(prop_kind::forward,
algorithm::convolution_direct,
c3_src_desc, c3_weights_desc, c3_bias_desc, c3_dst_desc,
strides, padding, padding, padding_kind::zero),
cpu_engine), c3_src, c3_weights, c3_bias, c3_dst);
stream(stream::kind::eager).submit({c3}).wait();
}));
}
......@@ -13,3 +13,4 @@
include( ../cmake/external_gtest.cmake )
include( ../cmake/external_eigen.cmake )
include( ../cmake/external_mkldnn.cmake )
From 6ca95a7c6802eef486ef1d47ad76094eb1164a7c Mon Sep 17 00:00:00 2001
From: Jaikrishnan Menon <jaikrishnan.menon@intel.com>
Date: Fri, 8 Sep 2017 15:01:59 -0700
Subject: [PATCH] CMake: Detect the right OpenMP flags on the host platform
instead of hardcoding '-fopenmp'
---
CMakeLists.txt | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1e19215..3cb71bd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -81,8 +81,18 @@ if(WIN32)
add_definitions(/Qpar)
endif()
elseif(UNIX OR APPLE)
- set(OPENMP_FLAGS "-fopenmp")
- set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} ${OPENMP_FLAGS} ${CCXX_WARN_FLAGS} -DMKLDNN_DLL -DMKLDNN_DLL_EXPORTS -fvisibility=internal")
+ # Discover the right OpenMP flags
+ set(OPENMP_FLAGS "")
+ find_package(OpenMP)
+ if(OpenMP_C_FOUND)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
+ endif()
+ if(OpenMP_CXX_FOUND)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
+ set(OPENMP_FLAGS "${OpenMP_CXX_FLAGS}")
+ endif()
+
+ set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} ${CCXX_WARN_FLAGS} -DMKLDNN_DLL -DMKLDNN_DLL_EXPORTS -fvisibility=internal")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CCXX_FLAGS} -std=c99")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CCXX_FLAGS} -std=c++11 -fvisibility-inlines-hidden")
set(CCXX_WARN_FLAGS "-Wall -Werror -Wno-unknown-pragmas")
--
1.9.1
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment