Unverified Commit b17b4066 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

remove test that relies on CPU if CPU is not built (#1403)

* remove test that relies on CPU if CPU is not built

* fix docker build

* change onnx to use INTERPRETER

* run unit-test-check
parent 8c6a5be0
......@@ -31,10 +31,9 @@ COPY . /root/ngraph
RUN mkdir /root/ngraph/build
WORKDIR /root/ngraph/build
RUN cmake .. -DNGRAPH_CPU_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_TOOLS_ENABLE=FALSE \
-DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="~/ngraph_dist"
RUN cmake .. -DNGRAPH_CPU_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE \
-DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="~/ngraph_dist"
RUN make style-check
RUN make
RUN make install
......@@ -45,7 +44,7 @@ ENV NGRAPH_CPP_BUILD_PATH /root/ngraph_dist
ENV LD_LIBRARY_PATH /root/ngraph_dist/lib
ENV PYBIND_HEADERS_PATH /root/ngraph/python/pybind11
RUN df -k
RUN df -k
# Test nGraph and nGraph Python API
CMD cd /root/ngraph/build && make check ; cd /root/ngraph/python && tox
CMD cd /root/ngraph/build && make unit-test-check ; cd /root/ngraph/python && tox
......@@ -4,7 +4,7 @@ set -e
echo "TASK:" ${TASK}
if [ ${TASK} == "cpp_test" ]; then
docker run -w '/root/ngraph/build' test_ngraph make check
docker run -w '/root/ngraph/build' test_ngraph make unit-test-check
fi
if [ ${TASK} == "python2_test" ]; then
......
......@@ -21,7 +21,6 @@ set(SRC
build_graph.cpp
constant_folding.cpp
copy.cpp
core_fusion.cpp
cpio.cpp
cse.cpp
element_type.cpp
......@@ -55,6 +54,10 @@ if (NGRAPH_INTERPRETER_ENABLE)
set(SRC ${SRC} backend_debug_api.cpp builder.cpp backend_api.cpp)
endif()
if (NGRAPH_CPU_ENABLE)
set(SRC ${SRC} core_fusion.cpp)
endif()
add_subdirectory(models)
add_subdirectory(files)
add_subdirectory(util)
......
......@@ -26,7 +26,7 @@ TEST(onnx, model_add_abc)
{
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc.onnx"))};
auto backend{ngraph::runtime::Backend::create("CPU")};
auto backend{ngraph::runtime::Backend::create("INTERPRETER")};
ngraph::Shape shape{1};
auto a{backend->create_tensor(ngraph::element::f32, shape)};
......@@ -46,7 +46,7 @@ TEST(onnx, model_add_abc_initializers)
{
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc_initializers.onnx"))};
auto backend{ngraph::runtime::Backend::create("CPU")};
auto backend{ngraph::runtime::Backend::create("INTERPRETER")};
ngraph::Shape shape{2, 2};
......@@ -69,7 +69,7 @@ TEST(onnx, model_split_equal_parts_default)
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors.front());
}
......@@ -88,7 +88,7 @@ TEST(onnx, model_split_equal_parts_2d)
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
}
......@@ -107,7 +107,7 @@ TEST(onnx, model_split_variable_parts_2d)
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment