Unverified Commit 45a0fb47 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Windows support. (#2394)

* fix windows build

* wip

* mkldnn seems to build

* address various errors building cpu backend with MSVC

* wip

* wip

* Windows support.

    * Delete dependency of LLVM when building with MSVC.

* Define EIGEN_HAS_CONSTEXPR when using MSVS.

* Fix MSVC build errors.

    * Incorrect argument to 'decltype'. It is VC bug. Work around the
    error with rename the function into different name.

    * MINMAX issue in matmul_bias.cpp.

    * Correct TBB_LINK_LIBS on Windows.

* Fix MSVC link errors.

    1. redefine problems in cpu_builder.obj and convert_layout.obj. It
    is because cpu_builder.hpp contains an implicit implement of
    function runtime::cpu::Builder::build for cpu::op::ConvertLayout.
    The fix is deleting the registration item in cpu_builder.cpp and
    using REGISTER_CPU_OP_BUILDER in convert_layout.cpp.

    2. Fix the dependent libraries path on Windows. It should be *.lib
    not *.dll when linking these libraries.

* Set visibility for CPU backend to fix the MSVC linker error.

    MSVC complain that the .def file exceed the size limitatoin
    when using CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS. All the functions
    with CPU_BACKEND_API are used by unit test or nbench.

* Fix unit test build errors on Windows.

    * backend_unary_elementwise.in.cpp: Use all_close_f to test case
    BACKEDND sqrt

    * cpu_fustion.cpp: Fix 'NUM_STEPS' cannot be implicitly
    captured because no default capture mode has been specified

    * cpu_test.cpp: Use portable setenv and unsetenv from misc.hpp.

    * tools.cpp: Use portable fpopen from misc.hpp.

    * misc.hpp/misc.cpp: Add new files to host misc functions that Linux and
    Windows using different implementation.

* Make Debug mode work with MSVC.

* style

* fix line ending
parent 1efd0bfd
...@@ -332,7 +332,7 @@ include(cmake/external_cldnn.cmake) ...@@ -332,7 +332,7 @@ include(cmake/external_cldnn.cmake)
if (NGRAPH_USE_PREBUILT_LLVM OR DEFINED LLVM_TARBALL_URL) if (NGRAPH_USE_PREBUILT_LLVM OR DEFINED LLVM_TARBALL_URL)
include(cmake/external_llvm_prebuilt.cmake) include(cmake/external_llvm_prebuilt.cmake)
else() elseif (NOT NGRAPH_DEX_ONLY OR NOT MSVS)
include(cmake/external_llvm.cmake) include(cmake/external_llvm.cmake)
endif() endif()
......
...@@ -72,9 +72,7 @@ ExternalProject_Get_Property(ext_gtest SOURCE_DIR BINARY_DIR) ...@@ -72,9 +72,7 @@ ExternalProject_Get_Property(ext_gtest SOURCE_DIR BINARY_DIR)
add_library(libgtest INTERFACE) add_library(libgtest INTERFACE)
add_dependencies(libgtest ext_gtest) add_dependencies(libgtest ext_gtest)
target_include_directories(libgtest SYSTEM INTERFACE ${SOURCE_DIR}/googletest/include) target_include_directories(libgtest SYSTEM INTERFACE ${SOURCE_DIR}/googletest/include)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
set(GTEST_LIB_NAME gtestd) target_link_libraries(libgtest INTERFACE
else() debug ${GTEST_OUTPUT_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}gtestd${CMAKE_STATIC_LIBRARY_SUFFIX}
set(GTEST_LIB_NAME gtest) optimized ${GTEST_OUTPUT_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}gtest${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
target_link_libraries(libgtest INTERFACE ${GTEST_OUTPUT_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${GTEST_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
...@@ -62,7 +62,7 @@ elseif (APPLE) ...@@ -62,7 +62,7 @@ elseif (APPLE)
elseif (WIN32) elseif (WIN32)
set(MKLPACKAGE "mklml_win_${MKLVERSION}.zip") set(MKLPACKAGE "mklml_win_${MKLVERSION}.zip")
set(MKL_SHA1_HASH 97f01ab854d8ee88cc0429f301df84844d7cce6b) set(MKL_SHA1_HASH 97f01ab854d8ee88cc0429f301df84844d7cce6b)
set(MKL_LIBS mklml.dll libiomp5md.dll) set(MKL_LIBS mklml.lib libiomp5md.lib)
endif() endif()
set(MKLURL ${MKLURLROOT}${MKLPACKAGE}) set(MKLURL ${MKLURLROOT}${MKLPACKAGE})
...@@ -84,7 +84,11 @@ set(MKL_SOURCE_DIR ${source_dir}) ...@@ -84,7 +84,11 @@ set(MKL_SOURCE_DIR ${source_dir})
add_library(libmkl INTERFACE) add_library(libmkl INTERFACE)
add_dependencies(libmkl ext_mkl) add_dependencies(libmkl ext_mkl)
foreach(LIB ${MKL_LIBS}) foreach(LIB ${MKL_LIBS})
list(APPEND TMP_PATHS ${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/${LIB}) if (WIN32)
list(APPEND TMP_PATHS ${EXTERNAL_PROJECTS_ROOT}/mkl/src/ext_mkl/lib/${LIB})
else()
list(APPEND TMP_PATHS ${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/${LIB})
endif()
endforeach() endforeach()
set(MKL_LIBS ${TMP_PATHS}) set(MKL_LIBS ${TMP_PATHS})
target_link_libraries(libmkl INTERFACE ${MKL_LIBS}) target_link_libraries(libmkl INTERFACE ${MKL_LIBS})
...@@ -185,9 +189,15 @@ add_custom_command(TARGET ext_mkldnn POST_BUILD ...@@ -185,9 +189,15 @@ add_custom_command(TARGET ext_mkldnn POST_BUILD
add_library(libmkldnn INTERFACE) add_library(libmkldnn INTERFACE)
add_dependencies(libmkldnn ext_mkldnn) add_dependencies(libmkldnn ext_mkldnn)
target_include_directories(libmkldnn SYSTEM INTERFACE ${EXTERNAL_PROJECTS_ROOT}/mkldnn/include) target_include_directories(libmkldnn SYSTEM INTERFACE ${EXTERNAL_PROJECTS_ROOT}/mkldnn/include)
target_link_libraries(libmkldnn INTERFACE if (WIN32)
target_link_libraries(libmkldnn INTERFACE
${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/mkldnn.lib
libmkl
)
else()
target_link_libraries(libmkldnn INTERFACE
${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mkldnn${CMAKE_SHARED_LIBRARY_SUFFIX} ${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mkldnn${CMAKE_SHARED_LIBRARY_SUFFIX}
libmkl libmkl
) )
endif()
install(DIRECTORY ${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/ DESTINATION ${NGRAPH_INSTALL_LIB} OPTIONAL) install(DIRECTORY ${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/ DESTINATION ${NGRAPH_INSTALL_LIB} OPTIONAL)
...@@ -17,8 +17,11 @@ ...@@ -17,8 +17,11 @@
add_subdirectory(interpreter) add_subdirectory(interpreter)
add_subdirectory(hybrid) add_subdirectory(hybrid)
# With CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS, when creating cpu_backend.dll, link reports error: library limit of 65535 objects exceeded
if (NGRAPH_CPU_ENABLE) if (NGRAPH_CPU_ENABLE)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS FALSE)
add_subdirectory(cpu) add_subdirectory(cpu)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
endif() endif()
if (NGRAPH_INTELGPU_ENABLE) if (NGRAPH_INTELGPU_ENABLE)
......
...@@ -194,6 +194,13 @@ if (NGRAPH_CPU_ENABLE) ...@@ -194,6 +194,13 @@ if (NGRAPH_CPU_ENABLE)
message(WARNING "The build toolset doesn't support OpenMP. This will impact performance and lead to slowdowns.") message(WARNING "The build toolset doesn't support OpenMP. This will impact performance and lead to slowdowns.")
endif() endif()
if (MSVS)
target_compile_definitions(cpu_backend PRIVATE EIGEN_HAS_CONSTEXPR)
# under debug mode, more files besides builder/dot.cpp rises the error
target_compile_options(cpu_backend PRIVATE "/bigobj" )
endif()
target_compile_definitions(cpu_backend PRIVATE CPU_BACKEND_DLL_EXPORTS)
if(NGRAPH_DISTRIBUTED_ENABLE) if(NGRAPH_DISTRIBUTED_ENABLE)
target_compile_definitions(cpu_backend PRIVATE NGRAPH_DISTRIBUTED) target_compile_definitions(cpu_backend PRIVATE NGRAPH_DISTRIBUTED)
target_include_directories(cpu_backend SYSTEM PRIVATE libmlsl) target_include_directories(cpu_backend SYSTEM PRIVATE libmlsl)
...@@ -207,7 +214,7 @@ if (NGRAPH_CPU_ENABLE) ...@@ -207,7 +214,7 @@ if (NGRAPH_CPU_ENABLE)
endif() endif()
target_include_directories(cpu_backend SYSTEM PUBLIC libmkldnn) target_include_directories(cpu_backend SYSTEM PUBLIC libmkldnn)
set_target_properties(cpu_backend PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${NGRAPH_BUILD_DIR}) set_target_properties(cpu_backend PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${NGRAPH_BUILD_DIR})
if (NOT APPLE) if (NOT APPLE AND NOT MSVS)
# CPU backend uses third-party libraries like Eigen that might be linked in and # CPU backend uses third-party libraries like Eigen that might be linked in and
# exported by other DSOs as well. In the absence of versioning, this could lead to the # exported by other DSOs as well. In the absence of versioning, this could lead to the
# CPU backend picking up the wrong version or even multiple versions of the # CPU backend picking up the wrong version or even multiple versions of the
......
...@@ -86,6 +86,7 @@ namespace ngraph ...@@ -86,6 +86,7 @@ namespace ngraph
}; };
functors.emplace_back(functor); functors.emplace_back(functor);
} }
REGISTER_CPU_OP_BUILDER(ConvertLayout);
} }
} }
} }
...@@ -187,9 +187,9 @@ namespace ngraph ...@@ -187,9 +187,9 @@ namespace ngraph
return; return;
} }
std::function<decltype(runtime::cpu::kernel::dot<float>)> kernel; std::function<decltype(runtime::cpu::kernel::dot_ref<float>)> kernel;
SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::dot); SELECT_KERNEL(kernel, out[0].get_element_type(), runtime::cpu::kernel::dot_ref);
auto functor = auto functor =
[&, kernel, arg0_shape, arg1_shape, result_shape, reduction_axes_count]( [&, kernel, arg0_shape, arg1_shape, result_shape, reduction_axes_count](
......
...@@ -78,12 +78,12 @@ namespace ngraph ...@@ -78,12 +78,12 @@ namespace ngraph
k, k,
1.0f, 1.0f,
static_cast<float*>(arg0_tensor), static_cast<float*>(arg0_tensor),
max(1UL, lda), max<size_t>(1, lda),
static_cast<float*>(arg1_tensor), static_cast<float*>(arg1_tensor),
max(1UL, ldb), max<size_t>(1, ldb),
beta, beta,
static_cast<float*>(out0_tensor), static_cast<float*>(out0_tensor),
max(1UL, arg2_shape[1])); max<size_t>(1, arg2_shape[1]));
}; };
CPUKernelFunctor bias_functor = [](CPURuntimeContext* ctx, CPUKernelFunctor bias_functor = [](CPURuntimeContext* ctx,
...@@ -111,10 +111,10 @@ namespace ngraph ...@@ -111,10 +111,10 @@ namespace ngraph
ones_row.data(), ones_row.data(),
1UL, 1UL,
static_cast<float*>(arg2_tensor), static_cast<float*>(arg2_tensor),
max(1UL, arg2_shape[1]), max<size_t>(1, arg2_shape[1]),
1.0f, 1.0f,
static_cast<float*>(out0_tensor), static_cast<float*>(out0_tensor),
max(1UL, arg2_shape[1])); max<size_t>(1, arg2_shape[1]));
}; };
} }
else else
...@@ -132,10 +132,10 @@ namespace ngraph ...@@ -132,10 +132,10 @@ namespace ngraph
static_cast<float*>(arg2_tensor), static_cast<float*>(arg2_tensor),
1UL, 1UL,
ones_col.data(), ones_col.data(),
max(1UL, arg2_shape[1]), max<size_t>(1, arg2_shape[1]),
1.0f, 1.0f,
static_cast<float*>(out0_tensor), static_cast<float*>(out0_tensor),
max(1UL, arg2_shape[1])); max<size_t>(1, arg2_shape[1]));
}; };
} }
} }
...@@ -161,10 +161,10 @@ namespace ngraph ...@@ -161,10 +161,10 @@ namespace ngraph
ones_scalar.data(), ones_scalar.data(),
1UL, 1UL,
bias.data(), bias.data(),
max(1UL, arg2_shape[1]), max<size_t>(1, arg2_shape[1]),
1.0f, 1.0f,
static_cast<float*>(out0_tensor), static_cast<float*>(out0_tensor),
max(1UL, arg2_shape[1])); max<size_t>(1, arg2_shape[1]));
}; };
} }
} }
...@@ -262,8 +262,8 @@ namespace ngraph ...@@ -262,8 +262,8 @@ namespace ngraph
size_t m = shape_a[1]; size_t m = shape_a[1];
size_t k = shape_a[2]; size_t k = shape_a[2];
size_t n = shape_b[2]; size_t n = shape_b[2];
size_t lda = std::max(1UL, k); size_t lda = std::max<size_t>(1, k);
size_t ldb = std::max(1UL, n); size_t ldb = std::max<size_t>(1, n);
cblas::Transpose ctranspose_a = cblas::Transpose::None; cblas::Transpose ctranspose_a = cblas::Transpose::None;
cblas::Transpose ctranspose_b = cblas::Transpose::None; cblas::Transpose ctranspose_b = cblas::Transpose::None;
...@@ -272,15 +272,15 @@ namespace ngraph ...@@ -272,15 +272,15 @@ namespace ngraph
ctranspose_a = cblas::Transpose::Transpose; ctranspose_a = cblas::Transpose::Transpose;
m = shape_a[2]; m = shape_a[2];
k = shape_a[1]; k = shape_a[1];
lda = std::max(1UL, m); lda = std::max<size_t>(1, m);
} }
if (transpose_b) if (transpose_b)
{ {
ctranspose_b = cblas::Transpose::Transpose; ctranspose_b = cblas::Transpose::Transpose;
n = shape_b[1]; n = shape_b[1];
ldb = std::max(1UL, k); ldb = std::max<size_t>(1, k);
} }
size_t ldc = std::max(1UL, n); size_t ldc = std::max<size_t>(1, n);
CblasGemmOptions options(data_a, data_b, data_c); CblasGemmOptions options(data_a, data_b, data_c);
......
...@@ -72,9 +72,10 @@ namespace ngraph ...@@ -72,9 +72,10 @@ namespace ngraph
{ {
auto padding_interior = pad->get_padding_interior(); auto padding_interior = pad->get_padding_interior();
std::function<decltype(runtime::cpu::kernel::pad<float>)> kernel; std::function<decltype(runtime::cpu::kernel::pad_ref<float>)> kernel;
SELECT_KERNEL(kernel, args[0].get_element_type(), runtime::cpu::kernel::pad); SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::pad_ref);
auto functor = [&, auto functor = [&,
kernel, kernel,
......
...@@ -113,9 +113,10 @@ namespace ngraph ...@@ -113,9 +113,10 @@ namespace ngraph
} }
else else
{ {
std::function<decltype(runtime::cpu::kernel::reshape<float>)> ref_kernel; std::function<decltype(runtime::cpu::kernel::reshape_ref<float>)> ref_kernel;
SELECT_KERNEL(ref_kernel, result_element_type, runtime::cpu::kernel::reshape); SELECT_KERNEL(
ref_kernel, result_element_type, runtime::cpu::kernel::reshape_ref);
auto functor = [&, ref_kernel, arg_shape, input_order, result_shape]( auto functor = [&, ref_kernel, arg_shape, input_order, result_shape](
CPURuntimeContext* ctx, CPUExecutionContext* ectx) { CPURuntimeContext* ctx, CPUExecutionContext* ectx) {
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <tbb/tbb_stddef.h> #include <tbb/tbb_stddef.h>
#include "cpu_backend_visibility.h"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/runtime/backend_manager.hpp" #include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/cpu/cpu_backend.hpp" #include "ngraph/runtime/cpu/cpu_backend.hpp"
...@@ -27,14 +28,14 @@ ...@@ -27,14 +28,14 @@
using namespace ngraph; using namespace ngraph;
using namespace std; using namespace std;
extern "C" runtime::Backend* new_backend(const char* configuration_string) extern "C" CPU_BACKEND_API runtime::Backend* new_backend(const char* configuration_string)
{ {
// Force TBB to link to the backend // Force TBB to link to the backend
tbb::TBB_runtime_interface_version(); tbb::TBB_runtime_interface_version();
return new runtime::cpu::CPU_Backend(); return new runtime::cpu::CPU_Backend();
} }
extern "C" void delete_backend(runtime::Backend* backend) extern "C" CPU_BACKEND_API void delete_backend(runtime::Backend* backend)
{ {
delete backend; delete backend;
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include "cpu_backend_visibility.h"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
namespace ngraph namespace ngraph
...@@ -30,7 +31,7 @@ namespace ngraph ...@@ -30,7 +31,7 @@ namespace ngraph
class CPU_ExternalFunction; class CPU_ExternalFunction;
class CPU_CallFrame; class CPU_CallFrame;
class CPU_Backend : public runtime::Backend class CPU_BACKEND_API CPU_Backend : public runtime::Backend
{ {
public: public:
std::shared_ptr<CPU_CallFrame> std::shared_ptr<CPU_CallFrame>
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// https://gcc.gnu.org/wiki/Visibility
// Generic helper definitions for shared library support
#if defined _WIN32 || defined __CYGWIN__
#define CPU_BACKEND_HELPER_DLL_IMPORT __declspec(dllimport)
#define CPU_BACKEND_HELPER_DLL_EXPORT __declspec(dllexport)
#define CPU_BACKEND_HELPER_DLL_LOCAL
#else
#if __GNUC__ >= 4
#define CPU_BACKEND_HELPER_DLL_IMPORT __attribute__((visibility("default")))
#define CPU_BACKEND_HELPER_DLL_EXPORT __attribute__((visibility("default")))
#define CPU_BACKEND_HELPER_DLL_LOCAL __attribute__((visibility("hidden")))
#else
#define CPU_BACKEND_HELPER_DLL_IMPORT
#define CPU_BACKEND_HELPER_DLL_EXPORT
#define CPU_BACKEND_HELPER_DLL_LOCAL
#endif
#endif
// Now we use the generic helper definitions above to define CPU_BACKEND_API and CPU_BACKEND_LOCAL.
// CPU_BACKEND_API is used for the public API symbols. It either DLL imports or DLL exports
// (or does nothing for static build)
// CPU_BACKEND_LOCAL is used for non-api symbols.
// #ifdef CPU_BACKEND_DLL // defined if CPU_BACKEND is compiled as a DLL
#ifdef CPU_BACKEND_DLL_EXPORTS // defined if we are building the CPU_BACKEND DLL (instead of using it)
#define CPU_BACKEND_API CPU_BACKEND_HELPER_DLL_EXPORT
#else
#define CPU_BACKEND_API CPU_BACKEND_HELPER_DLL_IMPORT
#endif // CPU_BACKEND_DLL_EXPORTS
#define CPU_BACKEND_LOCAL CPU_BACKEND_HELPER_DLL_LOCAL
// #else // CPU_BACKEND_DLL is not defined: this means CPU_BACKEND is a static lib.
// #define CPU_BACKEND_API
// #define CPU_BACKEND_LOCAL
// #endif // CPU_BACKEND_DLL
...@@ -366,8 +366,6 @@ namespace ngraph ...@@ -366,8 +366,6 @@ namespace ngraph
{ {
static BuildOpMap build_dispatcher{ static BuildOpMap build_dispatcher{
{TI(ngraph::op::Parameter), &runtime::cpu::Builder::nop}, {TI(ngraph::op::Parameter), &runtime::cpu::Builder::nop},
{TI(ngraph::runtime::cpu::op::ConvertLayout),
&runtime::cpu::Builder::build<ngraph::runtime::cpu::op::ConvertLayout>},
{TI(ngraph::runtime::cpu::op::LoopKernel), {TI(ngraph::runtime::cpu::op::LoopKernel),
&runtime::cpu::Builder::build<ngraph::runtime::cpu::op::LoopKernel>}, &runtime::cpu::Builder::build<ngraph::runtime::cpu::op::LoopKernel>},
{TI(ngraph::runtime::cpu::op::HalideOp), {TI(ngraph::runtime::cpu::op::HalideOp),
......
...@@ -242,6 +242,17 @@ ...@@ -242,6 +242,17 @@
} \ } \
} __register_##OP##_builder_instance; } __register_##OP##_builder_instance;
#define REGISTER_CPU_OP_BUILDER(OP) \
static struct __register_##OP##_builder \
{ \
__register_##OP##_builder() \
{ \
GetGlobalBuildDispatcher().insert( \
{type_index(typeid(ngraph::runtime::cpu::op::OP)), \
&runtime::cpu::Builder::build<ngraph::runtime::cpu::op::OP>}); \
} \
} __register_##OP##_builder_instance;
namespace ngraph namespace ngraph
{ {
namespace runtime namespace runtime
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
#include "cpu_backend_visibility.h"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/runtime/cpu/cpu_call_frame.hpp" #include "ngraph/runtime/cpu/cpu_call_frame.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
...@@ -36,7 +37,7 @@ namespace ngraph ...@@ -36,7 +37,7 @@ namespace ngraph
{ {
namespace cpu namespace cpu
{ {
class CPU_CountTracepoint class CPU_BACKEND_API CPU_CountTracepoint
{ {
public: public:
/// \brief A convenience class that wraps user's callback to run it every *count* iterations /// \brief A convenience class that wraps user's callback to run it every *count* iterations
...@@ -56,7 +57,7 @@ namespace ngraph ...@@ -56,7 +57,7 @@ namespace ngraph
size_t m_iteration; size_t m_iteration;
}; };
class CPU_Debugger class CPU_BACKEND_API CPU_Debugger
{ {
public: public:
CPU_Debugger(CPU_CallFrame& callframe); CPU_Debugger(CPU_CallFrame& callframe);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <mkldnn.hpp> #include <mkldnn.hpp>
#include "cpu_backend_visibility.h"
#include "ngraph/descriptor/layout/tensor_layout.hpp" #include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
...@@ -32,7 +33,7 @@ namespace ngraph ...@@ -32,7 +33,7 @@ namespace ngraph
{ {
namespace cpu namespace cpu
{ {
class LayoutDescriptor : public ngraph::descriptor::layout::TensorLayout class CPU_BACKEND_API LayoutDescriptor : public ngraph::descriptor::layout::TensorLayout
{ {
public: public:
LayoutDescriptor(const ngraph::descriptor::Tensor& tv); LayoutDescriptor(const ngraph::descriptor::Tensor& tv);
......
...@@ -168,13 +168,13 @@ namespace ngraph ...@@ -168,13 +168,13 @@ namespace ngraph
} }
template <typename ElementType> template <typename ElementType>
void dot(void* arg0, void dot_ref(void* arg0,
void* arg1, void* arg1,
void* out, void* out,
const Shape& arg0_shape, const Shape& arg0_shape,
const Shape& arg1_shape, const Shape& arg1_shape,
const Shape& out_shape, const Shape& out_shape,
size_t reduction_axes_count) size_t reduction_axes_count)
{ {
reference::dot(static_cast<const ElementType*>(arg0), reference::dot(static_cast<const ElementType*>(arg0),
static_cast<const ElementType*>(arg1), static_cast<const ElementType*>(arg1),
......
...@@ -60,15 +60,15 @@ namespace ngraph ...@@ -60,15 +60,15 @@ namespace ngraph
} }
template <typename ElementType> template <typename ElementType>
void pad(const void* arg0, void pad_ref(const void* arg0,
const void* arg1, const void* arg1,
void* out, void* out,
const Shape& arg0_shape, const Shape& arg0_shape,
const Shape& out_shape, const Shape& out_shape,
const Shape& padding_below, const Shape& padding_below,
const Shape& padding_above, const Shape& padding_above,
const Shape& padding_interior, const Shape& padding_interior,
int arena) int arena)
{ {
reference::pad(static_cast<const ElementType*>(arg0), reference::pad(static_cast<const ElementType*>(arg0),
static_cast<const ElementType*>(arg1), static_cast<const ElementType*>(arg1),
......
...@@ -145,12 +145,12 @@ namespace ngraph ...@@ -145,12 +145,12 @@ namespace ngraph
} }
template <typename ElementType> template <typename ElementType>
void reshape(const void* arg, void reshape_ref(const void* arg,
void* out, void* out,
const Shape& in_shape, const Shape& in_shape,
const AxisVector& in_axis_order, const AxisVector& in_axis_order,
const Shape& out_shape, const Shape& out_shape,
int arena) int arena)
{ {
reference::reshape(static_cast<const ElementType*>(arg), reference::reshape(static_cast<const ElementType*>(arg),
static_cast<ElementType*>(out), static_cast<ElementType*>(out),
......
...@@ -79,7 +79,7 @@ mkldnn::memory::desc MKLDNNEmitter::build_memory_descriptor(const TensorViewWrap ...@@ -79,7 +79,7 @@ mkldnn::memory::desc MKLDNNEmitter::build_memory_descriptor(const TensorViewWrap
fmt); fmt);
} }
mkldnn::memory::desc MKLDNNEmitter::build_memory_descriptor(const Shape& shape, mkldnn::memory::desc MKLDNNEmitter::build_memory_descriptor(const ngraph::Shape& shape,
const ngraph::element::Type& et, const ngraph::element::Type& et,
mkldnn::memory::format fmt) const mkldnn::memory::format fmt) const
{ {
......
...@@ -13,8 +13,9 @@ ...@@ -13,8 +13,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//***************************************************************************** //*****************************************************************************
#include "cpu_backend_visibility.h"
extern "C" const char* get_ngraph_version_string() extern "C" CPU_BACKEND_API const char* get_ngraph_version_string()
{ {
return NGRAPH_VERSION; return NGRAPH_VERSION;
} }
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp" #include "ngraph/node_vector.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
namespace ngraph namespace ngraph
...@@ -30,10 +31,10 @@ namespace ngraph ...@@ -30,10 +31,10 @@ namespace ngraph
class BatchNormTrainingRelu : public Op class BatchNormTrainingRelu : public Op
{ {
public: public:
BatchNormTrainingRelu(double eps, CPU_BACKEND_API BatchNormTrainingRelu(double eps,
std::shared_ptr<Node> gamma, std::shared_ptr<Node> gamma,
std::shared_ptr<Node> beta, std::shared_ptr<Node> beta,
std::shared_ptr<Node> input); std::shared_ptr<Node> input);
double get_eps_value() const { return m_epsilon; } double get_eps_value() const { return m_epsilon; }
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ngraph/op/convolution.hpp" #include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -27,19 +28,19 @@ namespace ngraph ...@@ -27,19 +28,19 @@ namespace ngraph
class ConvolutionBias : public Op class ConvolutionBias : public Op
{ {
public: public:
ConvolutionBias(const std::shared_ptr<op::Convolution>& conv, CPU_BACKEND_API ConvolutionBias(const std::shared_ptr<op::Convolution>& conv,
const std::shared_ptr<Node>& bias, const std::shared_ptr<Node>& bias,
const bool with_relu = false); const bool with_relu = false);
ConvolutionBias(const std::shared_ptr<Node>& data_batch, CPU_BACKEND_API ConvolutionBias(const std::shared_ptr<Node>& data_batch,
const std::shared_ptr<Node>& filters, const std::shared_ptr<Node>& filters,
const std::shared_ptr<Node>& bias, const std::shared_ptr<Node>& bias,
const Strides& window_movement_strides, const Strides& window_movement_strides,
const Strides& window_dilation_strides, const Strides& window_dilation_strides,
const CoordinateDiff& padding_below, const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above, const CoordinateDiff& padding_above,
const Strides& data_dilation_strides, const Strides& data_dilation_strides,
const bool with_relu = false); const bool with_relu = false);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; } const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; } const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ngraph/op/convolution.hpp" #include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/runtime/cpu/op/conv_bias.hpp" #include "ngraph/runtime/cpu/op/conv_bias.hpp"
namespace ngraph namespace ngraph
...@@ -28,15 +29,15 @@ namespace ngraph ...@@ -28,15 +29,15 @@ namespace ngraph
class ConvolutionRelu : public Op class ConvolutionRelu : public Op
{ {
public: public:
ConvolutionRelu(const std::shared_ptr<op::Convolution>& conv); CPU_BACKEND_API ConvolutionRelu(const std::shared_ptr<op::Convolution>& conv);
ConvolutionRelu(const std::shared_ptr<Node>& data_batch, CPU_BACKEND_API ConvolutionRelu(const std::shared_ptr<Node>& data_batch,
const std::shared_ptr<Node>& filters, const std::shared_ptr<Node>& filters,
const Strides& window_movement_strides, const Strides& window_movement_strides,
const Strides& window_dilation_strides, const Strides& window_dilation_strides,
const CoordinateDiff& padding_below, const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above, const CoordinateDiff& padding_above,
const Strides& data_dilation_strides); const Strides& data_dilation_strides);
const Strides& get_window_movement_strides() const { return m_window_movement_strides; } const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; } const Strides& get_window_dilation_strides() const { return m_window_dilation_strides; }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -34,11 +35,11 @@ namespace ngraph ...@@ -34,11 +35,11 @@ namespace ngraph
class ConvertLayout : public ngraph::op::Op class ConvertLayout : public ngraph::op::Op
{ {
public: public:
ConvertLayout( CPU_BACKEND_API ConvertLayout(
const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& arg,
const std::shared_ptr<ngraph::runtime::cpu::LayoutDescriptor>& layout); const std::shared_ptr<ngraph::runtime::cpu::LayoutDescriptor>& layout);
ConvertLayout( CPU_BACKEND_API ConvertLayout(
const std::shared_ptr<Node>& arg, const std::shared_ptr<Node>& arg,
size_t output_index, size_t output_index,
const std::shared_ptr<ngraph::runtime::cpu::LayoutDescriptor>& layout); const std::shared_ptr<ngraph::runtime::cpu::LayoutDescriptor>& layout);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ngraph/op/convolution.hpp" #include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -27,15 +28,15 @@ namespace ngraph ...@@ -27,15 +28,15 @@ namespace ngraph
class GroupConvolution : public Op class GroupConvolution : public Op
{ {
public: public:
GroupConvolution(const std::shared_ptr<Node>& data_batch, CPU_BACKEND_API GroupConvolution(const std::shared_ptr<Node>& data_batch,
const std::shared_ptr<Node>& filters, const std::shared_ptr<Node>& filters,
const Strides& window_movement_strides, const Strides& window_movement_strides,
const Strides& window_dilation_strides, const Strides& window_dilation_strides,
const CoordinateDiff& padding_below, const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above, const CoordinateDiff& padding_above,
const Strides& data_dilation_strides, const Strides& data_dilation_strides,
size_t groups, size_t groups,
const Shape& output_shape); const Shape& output_shape);
Shape get_weights_dimensions() const; Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const { return m_window_movement_strides; } const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ngraph/axis_set.hpp" #include "ngraph/axis_set.hpp"
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -26,14 +27,14 @@ namespace ngraph ...@@ -26,14 +27,14 @@ namespace ngraph
class MatmulBias : public Op class MatmulBias : public Op
{ {
public: public:
MatmulBias(std::shared_ptr<Node> W, CPU_BACKEND_API MatmulBias(std::shared_ptr<Node> W,
std::shared_ptr<Node> x, std::shared_ptr<Node> x,
std::shared_ptr<Node> b, std::shared_ptr<Node> b,
Shape shape_w, Shape shape_w,
Shape shape_x, Shape shape_x,
bool transpose_w, bool transpose_w,
bool transpose_x, bool transpose_x,
AxisSet axes = AxisSet{}); AxisSet axes = AxisSet{});
void validate_and_infer_types() override; void validate_and_infer_types() override;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
namespace ngraph namespace ngraph
...@@ -46,17 +47,17 @@ namespace ngraph ...@@ -46,17 +47,17 @@ namespace ngraph
class Rnn : public Op class Rnn : public Op
{ {
public: public:
Rnn(std::shared_ptr<Node> src_layer, CPU_BACKEND_API Rnn(std::shared_ptr<Node> src_layer,
std::shared_ptr<Node> src_iter, std::shared_ptr<Node> src_iter,
std::shared_ptr<Node> weights_layer, std::shared_ptr<Node> weights_layer,
std::shared_ptr<Node> weights_iter, std::shared_ptr<Node> weights_iter,
std::shared_ptr<Node> bias, std::shared_ptr<Node> bias,
size_t num_timesteps, size_t num_timesteps,
size_t num_gates_per_cell, size_t num_gates_per_cell,
size_t src_sequence_length, size_t src_sequence_length,
size_t num_cell_states, size_t num_cell_states,
size_t direction, size_t direction,
size_t num_fused_layers); size_t num_fused_layers);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
size_t get_num_timesteps() const { return m_num_timesteps; } size_t get_num_timesteps() const { return m_num_timesteps; }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include <array> #include <array>
...@@ -39,10 +40,10 @@ namespace ngraph ...@@ -39,10 +40,10 @@ namespace ngraph
}; };
/// Input nodes are expected to be actual inputs where the corresponding input /// Input nodes are expected to be actual inputs where the corresponding input
/// FunctionType will be applied to those inputs in the fused operation. /// FunctionType will be applied to those inputs in the fused operation.
SigmoidMultiply(std::shared_ptr<Node> input_0, CPU_BACKEND_API SigmoidMultiply(std::shared_ptr<Node> input_0,
std::shared_ptr<Node> input_1, std::shared_ptr<Node> input_1,
const FunctionType input_0_type, const FunctionType input_0_type,
const FunctionType input_1_type); const FunctionType input_1_type);
/// WARNING: copy_with_new_args() implicitly expects new args must match the original input function types. /// WARNING: copy_with_new_args() implicitly expects new args must match the original input function types.
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
...@@ -53,7 +54,8 @@ namespace ngraph ...@@ -53,7 +54,8 @@ namespace ngraph
return m_input_type[index]; return m_input_type[index];
} }
/// Identifies the corresponding FunctionType for the input node. /// Identifies the corresponding FunctionType for the input node.
static FunctionType identify_node_type(const std::shared_ptr<ngraph::Node>& node); static CPU_BACKEND_API FunctionType
identify_node_type(const std::shared_ptr<ngraph::Node>& node);
private: private:
std::array<FunctionType, 2> m_input_type; std::array<FunctionType, 2> m_input_type;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/pass/graph_rewrite.hpp" #include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -32,7 +33,7 @@ namespace ngraph ...@@ -32,7 +33,7 @@ namespace ngraph
} }
} }
class ngraph::runtime::cpu::pass::CPUFusion : public ngraph::pass::GraphRewrite class CPU_BACKEND_API ngraph::runtime::cpu::pass::CPUFusion : public ngraph::pass::GraphRewrite
{ {
public: public:
CPUFusion(ngraph::pass::FusionType fusions = ngraph::pass::ALL_FUSIONS) CPUFusion(ngraph::pass::FusionType fusions = ngraph::pass::ALL_FUSIONS)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/pass/pass.hpp" #include "ngraph/pass/pass.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -26,13 +27,13 @@ namespace ngraph ...@@ -26,13 +27,13 @@ namespace ngraph
{ {
namespace pass namespace pass
{ {
class CPURnnMatFusion : public ngraph::pass::FunctionPass class CPU_BACKEND_API CPURnnMatFusion : public ngraph::pass::FunctionPass
{ {
public: public:
virtual bool virtual bool
run_on_function(std::shared_ptr<ngraph::Function> function) override; run_on_function(std::shared_ptr<ngraph::Function> function) override;
}; };
class CPUBatchFusion : public ngraph::pass::FunctionPass class CPU_BACKEND_API CPUBatchFusion : public ngraph::pass::FunctionPass
{ {
public: public:
CPUBatchFusion(ngraph::pass::FusionType type = ngraph::pass::ALL_FUSIONS) CPUBatchFusion(ngraph::pass::FusionType type = ngraph::pass::ALL_FUSIONS)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#pragma once #pragma once
#include "ngraph/pass/graph_rewrite.hpp" #include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -31,7 +32,8 @@ namespace ngraph ...@@ -31,7 +32,8 @@ namespace ngraph
} }
} }
class ngraph::runtime::cpu::pass::CPUPostLayoutOptimizations : public ngraph::pass::GraphRewrite class CPU_BACKEND_API ngraph::runtime::cpu::pass::CPUPostLayoutOptimizations
: public ngraph::pass::GraphRewrite
{ {
public: public:
CPUPostLayoutOptimizations() CPUPostLayoutOptimizations()
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#pragma once #pragma once
#include "ngraph/pass/graph_rewrite.hpp" #include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
namespace ngraph namespace ngraph
...@@ -35,7 +36,7 @@ namespace ngraph ...@@ -35,7 +36,7 @@ namespace ngraph
} }
} }
class ngraph::runtime::cpu::pass::LSTMFusion : public ngraph::pass::GraphRewrite class CPU_BACKEND_API ngraph::runtime::cpu::pass::LSTMFusion : public ngraph::pass::GraphRewrite
{ {
public: public:
LSTMFusion() LSTMFusion()
...@@ -50,7 +51,8 @@ private: ...@@ -50,7 +51,8 @@ private:
void construct_lstm_fprop(); void construct_lstm_fprop();
}; };
class ngraph::runtime::cpu::pass::RNNFusion : public ngraph::pass::RecurrentGraphRewrite class CPU_BACKEND_API ngraph::runtime::cpu::pass::RNNFusion
: public ngraph::pass::RecurrentGraphRewrite
{ {
public: public:
RNNFusion() RNNFusion()
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ngraph/node_vector.hpp" #include "ngraph/node_vector.hpp"
#include "ngraph/pass/pass.hpp" #include "ngraph/pass/pass.hpp"
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
namespace ngraph namespace ngraph
{ {
...@@ -33,7 +34,8 @@ namespace ngraph ...@@ -33,7 +34,8 @@ namespace ngraph
} }
} }
class ngraph::runtime::cpu::pass::CPUWorkspaceInsertion : public ngraph::pass::FunctionPass class CPU_BACKEND_API ngraph::runtime::cpu::pass::CPUWorkspaceInsertion
: public ngraph::pass::FunctionPass
{ {
public: public:
CPUWorkspaceInsertion(ngraph::NodeVector& indices_list, bool return_indices = true) CPUWorkspaceInsertion(ngraph::NodeVector& indices_list, bool return_indices = true)
......
...@@ -39,7 +39,11 @@ if(NGRAPH_TBB_ENABLE) ...@@ -39,7 +39,11 @@ if(NGRAPH_TBB_ENABLE)
endif() endif()
add_executable(resource_generator EXCLUDE_FROM_ALL ${SRC}) add_executable(resource_generator EXCLUDE_FROM_ALL ${SRC})
add_dependencies(resource_generator ext_llvm)
if (NOT NGRAPH_DEX_ONLY)
add_dependencies(resource_generator ext_llvm)
endif()
if(NGRAPH_CPU_ENABLE) if(NGRAPH_CPU_ENABLE)
add_dependencies(resource_generator ext_eigen ext_mkldnn) add_dependencies(resource_generator ext_eigen ext_mkldnn)
endif() endif()
......
...@@ -40,6 +40,7 @@ set(SRC ...@@ -40,6 +40,7 @@ set(SRC
includes.cpp includes.cpp
input_output_assign.cpp input_output_assign.cpp
main.cpp main.cpp
misc.cpp
nop_elimination.cpp nop_elimination.cpp
op.cpp op.cpp
partial_shape.cpp partial_shape.cpp
...@@ -245,10 +246,19 @@ if (NGRAPH_ONNXIFI_ENABLE) ...@@ -245,10 +246,19 @@ if (NGRAPH_ONNXIFI_ENABLE)
target_link_libraries(unit-test PRIVATE onnxifi-ngraph) target_link_libraries(unit-test PRIVATE onnxifi-ngraph)
endif() endif()
add_custom_target(unit-test-check # If all the runtime libraries are installed into one location, that will make life easier.
COMMAND ${PROJECT_BINARY_DIR}/test/unit-test \${ARGS} if (MSVS)
DEPENDS unit-test add_custom_target(unit-test-check
) COMMAND set "PATH=${EXTERNAL_PROJECTS_ROOT}/src/ngraph/Release;${EXTERNAL_PROJECTS_ROOT}/mkldnn/lib/;${EXTERNAL_PROJECTS_ROOT}/mkl/src/ext_mkl/lib/;${EXTERNAL_PROJECTS_ROOT}/ext_tbb-prefix/src/ext_tbb/tbb2019_20181203oss/bin/intel64/vc14;%PATH%"
COMMAND ${PROJECT_BINARY_DIR}/test/unit-test \${ARGS}
DEPENDS unit-test
)
else()
add_custom_target(unit-test-check
COMMAND ${PROJECT_BINARY_DIR}/test/unit-test \${ARGS}
DEPENDS unit-test
)
endif()
add_custom_target(check add_custom_target(check
DEPENDS DEPENDS
......
...@@ -392,7 +392,7 @@ NGRAPH_TEST(${BACKEND_NAME}, sqrt) ...@@ -392,7 +392,7 @@ NGRAPH_TEST(${BACKEND_NAME}, sqrt)
auto handle = backend->compile(f); auto handle = backend->compile(f);
backend->call_with_validate(handle, {result}, {a}); backend->call_with_validate(handle, {result}, {a});
EXPECT_EQ((vector<float>{4, 2, 9, 10, 100, 0}), read_vector<float>(result)); EXPECT_TRUE(test::all_close_f(vector<float>{4, 2, 9, 10, 100, 0}, read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, tan) NGRAPH_TEST(${BACKEND_NAME}, tan)
......
...@@ -1339,9 +1339,9 @@ TEST(cpu_fusion, rnn_fusion_from_json_model) ...@@ -1339,9 +1339,9 @@ TEST(cpu_fusion, rnn_fusion_from_json_model)
shared_ptr<Function> func = ngraph::deserialize(ss); shared_ptr<Function> func = ngraph::deserialize(ss);
pass_manager.run_passes(func); pass_manager.run_passes(func);
const size_t NUM_STEPS = 10; const size_t NUM_STEPS = 10;
auto mmb_predicate = [](std::shared_ptr<Node> node) { auto mmb_predicate = [=](std::shared_ptr<Node> node) {
auto users = node->get_users(); auto users = node->get_users();
return users.size() == NUM_STEPS && return (users.size() == NUM_STEPS) &&
std::all_of(begin(users), end(users), [](std::shared_ptr<Node> n) { std::all_of(begin(users), end(users), [](std::shared_ptr<Node> n) {
return std::dynamic_pointer_cast<op::Slice>(n) != nullptr; return std::dynamic_pointer_cast<op::Slice>(n) != nullptr;
}); });
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <memory> #include <memory>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "misc.hpp"
#include "ngraph/autodiff/adjoints.hpp" #include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
...@@ -124,7 +125,7 @@ TEST(cpu_test, abc_tbb) ...@@ -124,7 +125,7 @@ TEST(cpu_test, abc_tbb)
bool use_tbb = (getenv("NGRAPH_CPU_USE_TBB") != nullptr); bool use_tbb = (getenv("NGRAPH_CPU_USE_TBB") != nullptr);
if (!use_tbb) if (!use_tbb)
{ {
setenv("NGRAPH_CPU_USE_TBB", "1", 1); set_environment("NGRAPH_CPU_USE_TBB", "1", 1);
} }
Shape shape{2, 2}; Shape shape{2, 2};
...@@ -160,7 +161,7 @@ TEST(cpu_test, abc_tbb) ...@@ -160,7 +161,7 @@ TEST(cpu_test, abc_tbb)
if (!use_tbb) if (!use_tbb)
{ {
unsetenv("NGRAPH_CPU_USE_TBB"); unset_environment("NGRAPH_CPU_USE_TBB");
} }
} }
#endif // NGRAPH_TBB_ENABLE #endif // NGRAPH_TBB_ENABLE
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "misc.hpp"
FILE* port_open(const char* command, const char* type)
{
#ifdef _WIN32
return _popen(command, type);
#elif defined(__linux) || defined(__APPLE__)
return popen(command, type);
#endif
}
int port_close(FILE* stream)
{
#ifdef _WIN32
return _pclose(stream);
#elif defined(__linux) || defined(__APPLE__)
return pclose(stream);
#endif
}
int set_environment(const char* name, const char* value, int overwrite)
{
#ifdef _WIN32
return _putenv_s(name, value);
#elif defined(__linux) || defined(__APPLE__)
return setenv(name, value, overwrite);
#endif
}
int unset_environment(const char* name)
{
#ifdef _WIN32
return _putenv_s(name, "");
#elif defined(__linux) || defined(__APPLE__)
return unsetenv(name);
#endif
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <stdio.h>
#include <stdlib.h>
FILE* port_open(const char* command, const char* type);
int port_close(FILE* stream);
int set_environment(const char* name, const char* value, int overwrite);
int unset_environment(const char* name);
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <sstream> #include <sstream>
#include "misc.hpp"
#include "ngraph/cpio.hpp" #include "ngraph/cpio.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
...@@ -33,7 +34,7 @@ TEST(tools, nbench_functional) ...@@ -33,7 +34,7 @@ TEST(tools, nbench_functional)
ss << NBENCH_PATH << " -f " << model_path << " -b INTERPRETER -i 2 -w 2"; ss << NBENCH_PATH << " -f " << model_path << " -b INTERPRETER -i 2 -w 2";
auto cmd = ss.str(); auto cmd = ss.str();
auto f = popen(cmd.c_str(), "r"); auto f = port_open(cmd.c_str(), "r");
if (f) if (f)
{ {
stringstream str; stringstream str;
...@@ -45,7 +46,7 @@ TEST(tools, nbench_functional) ...@@ -45,7 +46,7 @@ TEST(tools, nbench_functional)
str << s; str << s;
} }
string output = str.str(); string output = str.str();
auto status = pclose(f); auto status = port_close(f);
ASSERT_EQ(status, 0) << output; ASSERT_EQ(status, 0) << output;
} }
else else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment