Commit 925e7b27 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Move unit tests out of backend_test.in.cpp (#1865)

* comparisons

* move more unit test out of backend_test.in.cpp

* move more tests

* move more tests
parent e9b4c104
......@@ -95,7 +95,16 @@ add_subdirectory(util)
# such as ${BACKEND_NAME} with their values, such as CPU, GPU, or INTERPRETER.
set(MULTI_TEST_SRC
autodiff.in.cpp
backend_binary_elementwise.in.cpp
backend_broadcast.in.cpp
backend_comparison.in.cpp
backend_dot.in.cpp
backend_one_hot.in.cpp
backend_reduce.in.cpp
backend_reshape.in.cpp
backend_sum.in.cpp
backend_test.in.cpp
backend_unary_elementwise.in.cpp
convolution_test.in.cpp
)
if(NGRAPH_DISTRIBUTED_ENABLE)
......@@ -141,7 +150,7 @@ endif()
if(NGRAPH_DISTRIBUTED_ENABLE)
find_package(MPI REQUIRED)
target_compile_definitions(unit-test PRIVATE NGRAPH_DISTRIBUTED)
target_include_directories(unit-test
target_include_directories(unit-test
SYSTEM PRIVATE ${MPI_C_INCLUDE_PATH} ${MPI_CXX_INCLUDE_PATH})
target_link_libraries(unit-test PRIVATE ${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES})
endif()
......
This diff is collapsed.
This diff is collapsed.
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, equal)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Equal>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 1, 0, 0, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, notequal)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::NotEqual>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 1, 1, 0, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greater)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Greater>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 1, 0, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greatereq)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::GreaterEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, -8, 8, 0, 0, 0.5, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 1, 1, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, less)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Less>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 0, 1, 0, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::LessEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, -8, 8, 0, 0, 0.5, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 0, 1, 0, 1, 1, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq_bool)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::boolean, shape);
auto B = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::LessEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::boolean, shape);
copy_data(a, vector<char>{1, 1, 1, 1, 1, 1, 1, 1});
auto b = backend->create_tensor(element::boolean, shape);
copy_data(b, vector<char>{0, 0, 0, 0, 0, 0, 0, 0});
auto result = backend->create_tensor(element::boolean, shape);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<char>{1, 1, 1, 1, 1, 1, 1, 1});
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 0, 0, 0, 0, 0, 0}), read_vector<char>(result));
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment