Unverified Commit 44b75607 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

New manifest driven method for disabling backend unit tests (#983)

* Add mechanism for disabling specific backend unit tests from a manifest file.
Populate the test manifest files for CPU, GPU and INTERPRETER.

* update docs for new manifest controlled transformer unit tests
parent 4e2715b5
......@@ -155,8 +155,13 @@ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
endif()
endif()
include(unit_test_control)
set(UNIT_TEST_CONFIG_LIST "" CACHE INTERNAL "")
unit_test_control(BACKEND INTERPRETER MANIFEST src/ngraph/runtime/interpreter/unit_test.manifest)
# Set true if CPU backend is built by default
if (NGRAPH_CPU_ENABLE)
unit_test_control(BACKEND CPU MANIFEST src/ngraph/runtime/cpu/unit_test.manifest)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNGRAPH_CPU_ENABLE")
endif()
......@@ -182,6 +187,7 @@ if(NGRAPH_GPU_ENABLE)
"Please select a correct compiler version\n"
)
endif()
unit_test_control(BACKEND GPU MANIFEST src/ngraph/runtime/gpu/unit_test.manifest)
elseif(NGRAPH_GPU_ENABLE)
message(FATAL_ERROR "GPU was required but CUDA library was not found")
endif()
......
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
function(UNIT_TEST_CONTROL)
set(options)
set(oneValueArgs BACKEND MANIFEST)
set(multiValueArgs)
cmake_parse_arguments(UNIT_TEST_CONTROL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if (UNIT_TEST_CONTROL_MANIFEST)
get_filename_component(UNIT_TEST_CONTROL_MANIFEST ${UNIT_TEST_CONTROL_MANIFEST} ABSOLUTE)
set(CONFIG_STRING "${UNIT_TEST_CONTROL_BACKEND}@${UNIT_TEST_CONTROL_MANIFEST}")
else()
set(CONFIG_STRING "${UNIT_TEST_CONTROL_BACKEND}@")
endif()
set(UNIT_TEST_CONFIG_LIST "${UNIT_TEST_CONFIG_LIST};${CONFIG_STRING}" CACHE INTERNAL "")
endfunction()
......@@ -117,16 +117,22 @@ File Names
* Transformer-independent tests:
- File name is ``file_name.in.cpp``
- Add ``#include "test_control.hpp"`` to the file's includes
- Add the line ``static std::string s_manifest = "${MANIFEST}";`` to the top of the file.
- Use
.. code-block:: sh
TEST(${BACKEND_NAME}, test_name)
NGRAPH_TEST(${BACKEND_NAME}, test_name)
for each test. Fies will be
generated for each transformer and the ``${BACKEND_NAME}`` will be replaced
for each test. Files are
generated for each transformer and the ``${BACKEND_NAME}`` is replaced
with the transformer name.
Individual unit tests may be disabled by adding the name of the test to the
``unit_test.manifest`` file found in
the transformer's source file directory.
Formatting
~~~~~~~~~~
......
......@@ -168,8 +168,7 @@ endif()
include_directories("${NGRAPH_INCLUDE_PATH}")
include_directories(SYSTEM "${EIGEN_INCLUDE_DIR}")
if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND
MKLDNN_INCLUDE_DIR)
if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR)
find_package(ZLIB REQUIRED)
if (NGRAPH_TBB_ENABLE)
......
batch_norm_one_output
batch_norm_three_outputs
one_hot_matrix_0
one_hot_scalar_0_in_3
one_hot_scalar_1_in_3
one_hot_scalar_2_in_3
one_hot_scalar_fp_nonint_in_3
one_hot_scalar_oob_in_3
one_hot_vector_0
one_hot_vector_1
one_hot_vector_1_barely_oob
one_hot_vector_1_far_oob
one_hot_vector_1_fp
one_hot_vector_1_fp_nonint
......@@ -643,7 +643,6 @@ size_t runtime::gpu::CUDAEmitter::build_elementwise_n_to_1(const GPURuntimeConte
CudaKernelBuilder::get_elementwise_op(writer, kernel_name.str(), op, dtypes);
std::string kernel = writer.get_code();
compiled_kernel = ctx->compiled_kernel_pool->set(kernel_name.str(), writer.get_code());
}
size_t nthreads = shape_size(tensor_shape);
......
......@@ -112,7 +112,7 @@ namespace
v |= v >> 8;
v |= v >> 16;
return multiply_de_Bruijn_bit_position[(uint32_t)(v * 0x07C4ACDDU) >> 27];
return multiply_de_Bruijn_bit_position[static_cast<uint32_t>(v * 0x07C4ACDDU) >> 27];
}
int msbU64(uint64_t val)
......
abc_int64
abc_tbb
aliased_output
avg_pool_1d_1channel_1image
avg_pool_1d_1channel_2image
avg_pool_1d_2channel_2image
avg_pool_2d_1channel_1image_padded
avg_pool_2d_1channel_1image_strided
avg_pool_2d_2channel_2image
avg_pool_2d_2channel_2image_padded
avg_pool_2d_2channel_2image_padded_3x3
avg_pool_2d_2channel_2image_padded_3x3_strided
avg_pool_2d_2channel_2image_padded_3x3_strided_uneven
avg_pool_2d_2channel_2image_padded_only_above
avg_pool_2d_2channel_2image_padded_only_below
backwards_avgpool_n1_c1_hw2x2
backwards_avgpool_n1_c1_hw4x4
backwards_avgpool_n2_c2_hw2x2_win_2x2_str_1x1_padding_numeric
backwards_avgpool_n2_c2_hw4x4
backwards_avgpool_n2_c2_hw4x4_numeric
backwards_avgpool_n2_c2_hw4x4_win_2x2_str_1x1_numeric
backwards_broadcast0
backwards_broadcast1
backwards_concat_axis_0
backwards_concat_axis_1
backwards_concat_vector
backwards_divide
backwards_dot_tensor_vector
backwards_dot_tensor2_tensor2
backwards_dot_tensor3_tensor3
backwards_log
backwards_power
backwards_relu
backwards_replace_slice
backwards_reverse_3d_02
backwards_select
backwards_select_nested
backwards_sign
backwards_slice
backwards_softmax_3d
backwards_softmax_all
backwards_softmax_axis
backwards_softmax_underflow
backwards_subtract
backwards_sum_m2s
backwards_sum_m2v_0
backwards_sum_m2v_1
backwards_sum_v2s
backwards_tan
backwards_tanh
batch_norm_one_output
batch_norm_three_outputs
broadcast_vector_rowwise_int64
broadcast_vector_rowwise_reversed
computation_reuse
concat_matrix_int64
constant_broadcast
constant_equality_bool
convolution_2d_1item_1o1i_data_dilated
convolution_2d_1item_2o1i_data_dilated
convolution_2d_1item_2o2i_data_dilated
convolution_2d_1item_5o3i_data_dilated
convolution_2d_1item_padded_2_3x4_5
convolution_2d_2item_5o3i_data_dilated
convolution_2d_2items_dilated_padded
convolution_2d_2items_strided_padded
convolution_2d_8item_large_5o3i_data_dilated
convolution_2d_8item_large_5o3i_uneven_filter_data_dilated
convolution_2d_8item_large_5o3i_uneven_filter_uneven_data_dilation_data_dilated
convolution_3d_1item_large_5o3i_padded_uneven_filter_uneven_data_dilation_data_dilated
convolution_3d_2item_large_5o3i_padded_strided_uneven_filter_uneven_data_dilation_data_dilated
convolution_3d_2item_large_5o3i_padded_strided_uneven_filter_uneven_data_dilation_filter_dilated_data_dilated
convolution_3d_2item_large_5o3i_uneven_filter_uneven_data_dilation_data_dilated
convolution_4d_2items
convolution_4d_4items
convolution_4d_4items_dilated
convolution_4d_4items_padded_neg
convolution_4d_4items_strided
convolution_4d_4items_strided_dilated
convolution_4d_4items_strided_dilated_padded
convolution_4d_4items_strided_dilated_padded_neg
convolution_4d_4items_strided_dilated_padded_same
divide_adjoint_stability
divide_by_zero_float32
divide_by_zero_int32
dot_3d_multi_axis
dot_3d_one_axis_arbitrary
dot_4d_5d_multi_axis
dot_4d_5d_multi_axis_big_fp64_VERY_SLOW
dot_4d_5d_multi_axis_more
dot_matrix_vector_int64
function_call
logical_and
logical_or
mkldnn_layouts
negative
numeric_double_inf
numeric_double_nan
numeric_float_inf
numeric_float_nan
one_hot_scalar_fp_nonint_in_3
one_hot_scalar_oob_in_3
one_hot_vector_1_barely_oob
one_hot_vector_1_far_oob
one_hot_vector_1_fp_nonint
parameter_as_output
product_3d_eliminate_zero_dim
product_3d_to_matrix_least_sig
product_3d_to_matrix_most_sig
product_3d_to_scalar
product_3d_to_vector
product_matrix_cols_zero
product_matrix_columns
product_matrix_rows
product_matrix_rows_zero
product_matrix_to_scalar_zero_by_zero
product_to_scalar
product_trivial
product_trivial_5d
product_vector_zero
reduce_3d_to_vector
reduce_matrix_cols_zero
reduce_matrix_columns
reduce_matrix_rows
reduce_matrix_rows_zero
reduce_matrix_to_scalar_zero_by_zero
reduce_to_scalar
reduce_trivial
reduce_vector_zero
reduce_window_emulating_max_pool_1d_1channel_1image
reduce_window_emulating_max_pool_1d_1channel_2image
reduce_window_emulating_max_pool_1d_2channel_2image
reduce_window_emulating_max_pool_2d_1channel_1image_strided
reduce_window_emulating_max_pool_2d_2channel_2image
replace_slice_3d
replace_slice_3d_strided
replace_slice_3d_strided_different_strides
replace_slice_matrix
replace_slice_scalar
replace_slice_vector
scalar_constant_float32
scalar_constant_int64
select_and_scatter_3d_without_overlap
select_and_scatter_with_overlap
select_and_scatter_without_overlap
softmax_all
softmax_axis
softmax_underflow
tensor_constant
tensor_constant_float32
tensor_constant_int64
tensor_constant_with_op
tensorview_custom_mem
zero_sized_abs
zero_sized_acos
zero_sized_asin
zero_sized_atan
zero_sized_ceiling
zero_sized_cos
zero_sized_cosh
zero_sized_divide
zero_sized_eq
zero_sized_exp
zero_sized_floor
zero_sized_greater
zero_sized_greatereq
zero_sized_less
zero_sized_lesseq
zero_sized_log
zero_sized_not
zero_sized_not_equal
zero_sized_power
zero_sized_sign
zero_sized_sin
zero_sized_sinh
zero_sized_subtract
zero_sized_tan
zero_sized_tanh
batchnorm_bprop_n4c3h2w2
batchnorm_fprop_b1c2h2w2
batchnorm_fprop_b2c2h2w1
batchnorm_fprop_globalstats_b2c2w2h1
batchnorm_fprop_inference_b2c2h2w1
computation_reuse
mkldnn_layouts
......@@ -46,6 +46,7 @@ set (SRC
shape.cpp
reshape_elimination.cpp
tensor.cpp
test_control.cpp
type_prop.cpp
util.cpp
uuid.cpp
......@@ -108,10 +109,16 @@ if(NGRAPH_DISTRIBUTED_ENABLE AND MPI_C_INCLUDE_PATH)
foreach(BACKEND_NAME ${BACKEND_NAMES})
configure_file(distributed.cpp distributed_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/distributed_${BACKEND_NAME}.cpp)
endforeach()
endforeach()
endif()
foreach(BACKEND_NAME ${BACKEND_NAMES})
foreach(TEST_CONFIG ${UNIT_TEST_CONFIG_LIST})
string(FIND ${TEST_CONFIG} "@" OFFSET)
string(SUBSTRING ${TEST_CONFIG} 0 ${OFFSET} BACKEND_NAME)
math(EXPR OFFSET ${OFFSET}+1)
string(SUBSTRING ${TEST_CONFIG} ${OFFSET} -1 MANIFEST)
configure_file(backend_test.in.cpp backend_test_${BACKEND_NAME}.cpp)
configure_file(convolution_test.in.cpp convolution_test_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/backend_test_${BACKEND_NAME}.cpp)
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <fstream>
#include <unordered_map>
#include <unordered_set>
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
#include "test_control.hpp"
using namespace std;
using namespace ngraph;
static unordered_map<string, unordered_set<string>> s_blacklists;
string ngraph::prepend_disabled(const string& test_case_name,
const string& test_name,
const string& manifest)
{
string rc = test_name;
unordered_set<string>& blacklist = s_blacklists[test_case_name];
if (blacklist.empty() && !manifest.empty())
{
ifstream f(manifest);
string line;
while (getline(f, line))
{
if (line.size() > 1 && line[0] != '#')
{
line = trim(line);
blacklist.insert(line);
}
}
}
if (contains(blacklist, test_name))
{
rc = "DISABLED_" + test_name;
}
return rc;
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <string>
#include "gtest/gtest.h"
// Copied from gtest
namespace ngraph
{
std::string prepend_disabled(const std::string& test_case_name,
const std::string& test_name,
const std::string& manifest);
}
#define NGRAPH_GTEST_TEST_(test_case_name, test_name, parent_class, parent_id) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public parent_class \
{ \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
private: \
virtual void TestBody(); \
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
\
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::test_info_ = \
::testing::internal::MakeAndRegisterTestInfo( \
#test_case_name, \
::ngraph::prepend_disabled(#test_case_name, #test_name, s_manifest).c_str(), \
nullptr, \
nullptr, \
::testing::internal::CodeLocation(__FILE__, __LINE__), \
(parent_id), \
parent_class::SetUpTestCase, \
parent_class::TearDownTestCase, \
new ::testing::internal::TestFactoryImpl<GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)>); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
#define NGRAPH_TEST(test_case_name, test_name) \
NGRAPH_GTEST_TEST_( \
test_case_name, test_name, ::testing::Test, ::testing::internal::GetTestTypeId())
......@@ -26,20 +26,6 @@
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/serializer.hpp"
#define SKIP_TEST_FOR(backend_to_skip, current_backend) \
if (backend_to_skip == current_backend) \
{ \
NGRAPH_INFO << "Skipped test for " << current_backend; \
return; \
}
#define ONLY_ENABLE_TEST_FOR(backend_to_enable, current_backend) \
if (backend_to_enable != current_backend) \
{ \
NGRAPH_INFO << "Skipped test for " << current_backend; \
return; \
}
namespace ngraph
{
class Node;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment