Commit 925e7b27 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Move unit tests out of backend_test.in.cpp (#1865)

* comparisons

* move more unit test out of backend_test.in.cpp

* move more tests

* move more tests
parent e9b4c104
......@@ -95,7 +95,16 @@ add_subdirectory(util)
# such as ${BACKEND_NAME} with their values, such as CPU, GPU, or INTERPRETER.
set(MULTI_TEST_SRC
autodiff.in.cpp
backend_binary_elementwise.in.cpp
backend_broadcast.in.cpp
backend_comparison.in.cpp
backend_dot.in.cpp
backend_one_hot.in.cpp
backend_reduce.in.cpp
backend_reshape.in.cpp
backend_sum.in.cpp
backend_test.in.cpp
backend_unary_elementwise.in.cpp
convolution_test.in.cpp
)
if(NGRAPH_DISTRIBUTED_ENABLE)
......@@ -141,7 +150,7 @@ endif()
if(NGRAPH_DISTRIBUTED_ENABLE)
find_package(MPI REQUIRED)
target_compile_definitions(unit-test PRIVATE NGRAPH_DISTRIBUTED)
target_include_directories(unit-test
target_include_directories(unit-test
SYSTEM PRIVATE ${MPI_C_INCLUDE_PATH} ${MPI_CXX_INCLUDE_PATH})
target_link_libraries(unit-test PRIVATE ${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES})
endif()
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, add)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{6, 8}, {10, 12}})).get_vector());
}
NGRAPH_TEST(${BACKEND_NAME}, add_overload)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(A + B, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{6, 8}, {10, 12}})).get_vector());
}
NGRAPH_TEST(${BACKEND_NAME}, multiply)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Multiply>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{5, 12}, {21, 32}})).get_vector());
}
NGRAPH_TEST(${BACKEND_NAME}, multiply_overload)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(A * B, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data(a, test::NDArray<float, 2>({{1, 2}, {3, 4}}).get_vector());
copy_data(b, test::NDArray<float, 2>({{5, 6}, {7, 8}}).get_vector());
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ(read_vector<float>(result),
(test::NDArray<float, 2>({{5, 12}, {21, 32}})).get_vector());
}
NGRAPH_TEST(${BACKEND_NAME}, divide)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{2, 2, 2, 2}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_overload)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(A / B, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{2, 2, 2, 2}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_adjoint_stability)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto make_external = [&]() {
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto Y_out = f->get_output_op(0);
auto Xs = f->get_parameters();
auto C = std::make_shared<op::Parameter>(Y_out->get_element_type(), Y_out->get_shape());
ngraph::autodiff::Adjoints adjoints(NodeVector{Y_out}, NodeVector{C});
std::vector<std::shared_ptr<Node>> dYdXs(Xs.size());
transform(
Xs.begin(), Xs.end(), dYdXs.begin(), [C, &adjoints](const std::shared_ptr<Node>& X) {
return adjoints.backprop_node(X);
});
std::vector<std::shared_ptr<op::Parameter>> params(Xs);
params.push_back(C);
auto bf = std::make_shared<Function>(dYdXs, params);
return bf;
};
auto bf = make_external();
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0, 0, 1, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{2, 2, 2, 2});
auto c = backend->create_tensor(element::f32, shape);
copy_data(c, vector<float>{1, 1, 1, 1});
auto resulta = backend->create_tensor(element::f32, shape);
auto resultb = backend->create_tensor(element::f32, shape);
backend->call_with_validate(bf, {resulta, resultb}, {a, b, c});
EXPECT_EQ((vector<float>{0.5, 0.5, 0.5, 0.5}), read_vector<float>(resulta));
EXPECT_EQ((vector<float>{-0.0, -0.0, -0.25, -0.25}), read_vector<float>(resultb));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_by_zero_float32)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{0, 0, 0, 0});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide_by_zero_int32)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int>{2, 4, 8, 16});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int>{0, 0, 0, 0});
auto result = backend->create_tensor(element::i32, shape);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wused-but-marked-unused"
#pragma clang diagnostic ignored "-Wcovered-switch-default"
::testing::FLAGS_gtest_death_test_style = "threadsafe";
EXPECT_DEATH_IF_SUPPORTED(
{
try
{
backend->call_with_validate(f, {result}, {a, b});
}
catch (...)
{
abort();
}
},
"");
#pragma clang diagnostic pop
}
NGRAPH_TEST(${BACKEND_NAME}, maximum)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 8, 4, 17, 0, 0.5, 2, 1.5}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, minimum)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Minimum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 2, -8, 8, -.5, 0, 1, 1}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, minimum_int32)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Minimum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 8, -8, 17, -5, 67635216, 2, 1});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{1, 2, 4, 8, 0, 18448, 1, 6});
auto result = backend->create_tensor(element::i32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<int32_t>{1, 2, -8, 8, -5, 18448, 1, 1}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, minimum_int64)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i64, shape);
auto B = make_shared<op::Parameter>(element::i64, shape);
auto f = make_shared<Function>(make_shared<op::Minimum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape);
copy_data(a, vector<int64_t>{1, 8, -8, 17, -5, 67635216, 2, 17179887632});
auto b = backend->create_tensor(element::i64, shape);
copy_data(b, vector<int64_t>{1, 2, 4, 8, 0, 18448, 1, 280592});
auto result = backend->create_tensor(element::i64, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<int64_t>{1, 2, -8, 8, -5, 18448, 1, 280592}), read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, maximum_int32)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 8, -8, 17, -5, 67635216, 2, 1});
auto b = backend->create_tensor(element::i32, shape);
copy_data(b, vector<int32_t>{1, 2, 4, 8, 0, 18448, 1, 6});
auto result = backend->create_tensor(element::i32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<int32_t>{1, 8, 4, 17, 0, 67635216, 2, 6}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, maximum_int64)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i64, shape);
auto B = make_shared<op::Parameter>(element::i64, shape);
auto f = make_shared<Function>(make_shared<op::Maximum>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape);
copy_data(a, vector<int64_t>{1, 8, -8, 17, -5, 67635216, 2, 17179887632});
auto b = backend->create_tensor(element::i64, shape);
copy_data(b, vector<int64_t>{1, 2, 4, 8, 0, 18448, 1, 280592});
auto result = backend->create_tensor(element::i64, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<int64_t>{1, 8, 4, 17, 0, 67635216, 2, 17179887632}),
read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, power)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Power>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 5});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{2, 0, 6, 3});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_TRUE(test::all_close(vector<float>{1, 1, 729, 125}, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, subtract)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Subtract>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 2, 4, 8}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, subtract_overload)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(A - B, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 2, 4, 8}), read_vector<float>(result));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_vector)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{4};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{6, 6, 6, 6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_to_non_existent_axis)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{4};
ASSERT_THROW(auto f = make_shared<Function>(
make_shared<op::Broadcast>(A, shape_r, AxisSet{0, 1}), op::ParameterVector{A}),
ngraph_error);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_matrix)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 2};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0, 1}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{6, 6, 6, 6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_tensor)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 2, 2};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0, 1, 2}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{6, 6, 6, 6, 6, 6, 6, 6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_trivial)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape, AxisSet{}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 6, 8, 16, 32, 64, 128});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{2, 4, 6, 8, 16, 32, 64, 128}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_colwise)
{
Shape shape_a{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 4};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{1}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise)
{
Shape shape_a{4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 4};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), read_vector<float>(result));
}
// Test hybrid mechanism after broadcast
NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_reversed)
{
Shape shape_a{4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 4};
auto broadcast = make_shared<op::Broadcast>(A, shape_r, AxisSet{0});
auto reverse = make_shared<op::Reverse>(broadcast, AxisSet{1});
auto f = make_shared<Function>(reverse, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_int64)
{
Shape shape_a{4};
auto A = make_shared<op::Parameter>(element::i64, shape_a);
Shape shape_r{3, 4};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape_a);
copy_data(a, vector<int64_t>{1, 2, 3, 4});
auto result = backend->create_tensor(element::i64, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int64_t>{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int64)
{
Shape shape_a{1};
auto A = make_shared<op::Parameter>(element::i64, shape_a);
Shape shape_r{3, 1};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape_a);
copy_data(a, vector<int64_t>{4});
auto result = backend->create_tensor(element::i64, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int64_t>{4, 4, 4}), read_vector<int64_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int32)
{
Shape shape_a{1};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3, 1};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{4});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int32_t>{4, 4, 4}), read_vector<int32_t>(result));
}
static void broadcast_test_helper(const Shape& shape_a, const Shape& shape_r, const AxisSet& axis)
{
auto A = make_shared<op::Parameter>(element::f32, shape_a);
vector<float> inp_data(shape_size<const Shape>(shape_a));
iota(inp_data.begin(), inp_data.end(), 1);
auto f =
make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, axis), op::ParameterVector{A});
auto ref_backend = runtime::Backend::create("INTERPRETER");
auto wrk_backend = runtime::Backend::create("${BACKEND_NAME}");
auto wrk_a = wrk_backend->create_tensor(element::f32, shape_a);
copy_data(wrk_a, inp_data);
auto ref_a = ref_backend->create_tensor(element::f32, shape_a);
copy_data(ref_a, inp_data);
auto wrk_result = wrk_backend->create_tensor(element::f32, shape_r);
auto ref_result = ref_backend->create_tensor(element::f32, shape_r);
wrk_backend->call_with_validate(f, {wrk_result}, {wrk_a});
ref_backend->call_with_validate(f, {ref_result}, {ref_a});
EXPECT_EQ(read_vector<float>(ref_result), read_vector<float>(wrk_result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_middle)
{
Shape shape_a{2};
Shape shape_r{3, 2, 4};
AxisSet axis{0, 2};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_forward_2)
{
Shape shape_a{2};
Shape shape_r{3, 2};
AxisSet axis{0};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_forward_3)
{
Shape shape_a{2};
Shape shape_r{4, 3, 2};
AxisSet axis{0, 1};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_forward_4)
{
Shape shape_a{2};
Shape shape_r{5, 4, 3, 2};
AxisSet axis{0, 1, 2};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_scalar)
{
Shape shape_a{};
Shape shape_r{5, 4, 3, 2};
AxisSet axis{0, 1, 2, 3};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_backward_2)
{
Shape shape_a{2};
Shape shape_r{2, 3};
AxisSet axis{1};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_backward_3)
{
Shape shape_a{2};
Shape shape_r{2, 3, 4};
AxisSet axis{1, 2};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_vector_backward_4)
{
Shape shape_a{2};
Shape shape_r{2, 3, 4, 5};
AxisSet axis{1, 2, 3};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_matrix_backward_4)
{
Shape shape_a{4, 5};
Shape shape_r{2, 3, 4, 5};
AxisSet axis{0, 1};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_matrix_stride_1)
{
Shape shape_a{3, 5};
Shape shape_r{2, 3, 4, 5};
AxisSet axis{0, 2};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_matrix_stride_2)
{
Shape shape_a{3, 4};
Shape shape_r{2, 3, 4, 5};
AxisSet axis{0, 3};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_matrix_stride_3)
{
Shape shape_a{2, 4};
Shape shape_r{2, 3, 4, 5};
AxisSet axis{1, 3};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_3d_backward)
{
Shape shape_a{2, 3, 4};
Shape shape_r{5, 2, 3, 4};
AxisSet axis{0};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_3d_stride_1)
{
Shape shape_a{2, 3, 4};
Shape shape_r{2, 5, 3, 4};
AxisSet axis{1};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_3d_stride_2)
{
Shape shape_a{2, 3, 4};
Shape shape_r{2, 3, 5, 4};
AxisSet axis{2};
broadcast_test_helper(shape_a, shape_r, axis);
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_0)
{
Shape shape_a{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 2, 2};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{0}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 1, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_1)
{
Shape shape_a{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 2, 2};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{1}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 1, 2, 3, 4, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_2)
{
Shape shape_a{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 2, 2};
auto f = make_shared<Function>(make_shared<op::Broadcast>(A, shape_r, AxisSet{2}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 1, 2, 2, 3, 3, 4, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, constant_broadcast)
{
const string js =
R"([{
"name" : "Function_0",
"ops" : [
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Parameter_4",
"op" : "Parameter",
"outputs" : ["Parameter_4"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Parameter_0",
"op" : "Parameter",
"outputs" : ["Parameter_0"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Constant_1",
"op" : "Constant",
"outputs" : ["Constant_1"],
"shape" : [],
"value" : ["0"]
},
{
"axes" : [ 0, 1 ],
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : ["Constant_1"],
"name" : "Broadcast_2",
"op" : "Broadcast",
"outputs" : ["Broadcast_2"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [ "Parameter_0", "Broadcast_2" ],
"name" : "Maximum_3",
"op" : "Maximum",
"outputs" : ["Maximum_3"]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [ "Maximum_3", "Parameter_4" ],
"name" : "Multiply_5",
"op" : "Multiply",
"outputs" : ["Multiply_5"]
}
],
"parameters" : [ "Parameter_0", "Parameter_4" ],
"result" : ["Multiply_5"],
"result_shape" : [ 3, 4 ],
"result_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false}
}])";
stringstream ss(js);
shared_ptr<Function> f = ngraph::deserialize(ss);
// max(x,broadcast(Constant(0)))
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// If this compiles it works
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, equal)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Equal>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 1, 0, 0, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, notequal)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::NotEqual>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 1, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 8, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 1, 1, 0, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greater)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Greater>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 1, 0, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, greatereq)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::GreaterEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, -8, 8, 0, 0, 0.5, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 1, 1, 1, 0, 1, 1, 0}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, less)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Less>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 0, 1, 0, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::LessEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 0, 2, 1});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, -8, 8, 0, 0, 0.5, 1.5});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{1, 0, 1, 0, 1, 1, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, lesseq_bool)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::boolean, shape);
auto B = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::LessEq>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::boolean, shape);
copy_data(a, vector<char>{1, 1, 1, 1, 1, 1, 1, 1});
auto b = backend->create_tensor(element::boolean, shape);
copy_data(b, vector<char>{0, 0, 0, 0, 0, 0, 0, 0});
auto result = backend->create_tensor(element::boolean, shape);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<char>{1, 1, 1, 1, 1, 1, 1, 1});
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 0, 0, 0, 0, 0, 0}), read_vector<char>(result));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
//
// Numpy test:
//
// from numpy import *
// x = linspace(1,2*3*3*4,2*3*3*4)
// y = linspace(1,3*4*2*3*2,3*4*2*2*3)
// x.shape=(2,3,3,4)
// y.shape=(3,4,2,2,3)
// z = tensordot(x,y,([2,3],[0,1]))
// z.shape = 2*3*2*2*3
// z
//
// array([ 6942., 7020., 7098., 7176., 7254., 7332., 7410.,
// 7488., 7566., 7644., 7722., 7800., 16590., 16812.,
// 17034., 17256., 17478., 17700., 17922., 18144., 18366.,
// 18588., 18810., 19032., 26238., 26604., 26970., 27336.,
// 27702., 28068., 28434., 28800., 29166., 29532., 29898.,
// 30264., 35886., 36396., 36906., 37416., 37926., 38436.,
// 38946., 39456., 39966., 40476., 40986., 41496., 45534.,
// 46188., 46842., 47496., 48150., 48804., 49458., 50112.,
// 50766., 51420., 52074., 52728., 55182., 55980., 56778.,
// 57576., 58374., 59172., 59970., 60768., 61566., 62364.,
// 63162., 63960.])
//
NGRAPH_TEST(${BACKEND_NAME}, dot_4d_5d_multi_axis)
{
vector<float> a_data(2 * 3 * 3 * 4);
for (int i = 0; i < 2 * 3 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
vector<float> b_data(3 * 4 * 2 * 2 * 3);
for (int i = 0; i < 3 * 4 * 2 * 2 * 3; i++)
{
b_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 3, 4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{3, 4, 2, 3, 2};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 3, 2, 3, 2};
auto r = make_shared<op::Dot>(A, B, 2);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ(
(vector<float>{6942., 7020., 7098., 7176., 7254., 7332., 7410., 7488., 7566.,
7644., 7722., 7800., 16590., 16812., 17034., 17256., 17478., 17700.,
17922., 18144., 18366., 18588., 18810., 19032., 26238., 26604., 26970.,
27336., 27702., 28068., 28434., 28800., 29166., 29532., 29898., 30264.,
35886., 36396., 36906., 37416., 37926., 38436., 38946., 39456., 39966.,
40476., 40986., 41496., 45534., 46188., 46842., 47496., 48150., 48804.,
49458., 50112., 50766., 51420., 52074., 52728., 55182., 55980., 56778.,
57576., 58374., 59172., 59970., 60768., 61566., 62364., 63162., 63960.}),
read_vector<float>(result));
}
//
// Numpy test:
//
// from numpy import *
// x = linspace(1,2*3*3*4,2*3*3*4)
// y = linspace(1,2*3*3*4*2,2*3*3*4*2)
// x.shape=(2,3,3,4)
// y.shape=(2,3,3,4,2)
// z = tensordot(x,y,([0,1,2,3],[0,1,2,3]))
// z
//
// array([ 251412., 254040.])
//
NGRAPH_TEST(${BACKEND_NAME}, dot_4d_5d_multi_axis_more)
{
vector<float> a_data(2 * 3 * 3 * 4);
for (int i = 0; i < 2 * 3 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
vector<float> b_data(2 * 3 * 3 * 4 * 2);
for (int i = 0; i < 2 * 3 * 3 * 4 * 2; i++)
{
b_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 3, 4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{2, 3, 3, 4, 2};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2};
auto r = make_shared<op::Dot>(A, B, 4);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{251412., 254040.}), read_vector<float>(result));
}
//
// Numpy test:
//
// from numpy import *
// x = linspace(1,20*30*30*40,20*30*30*40)
// y = linspace(1,20*30*30*40*20,20*30*30*40*20)
// x.shape=(20,30,30,40)
// y.shape=(20,30,30,40,20)
// z = tensordot(x,y,([0,1,2,3],[0,1,2,3]))
// set_printoptions(precision=20)
// z
//
// array([ 2.48832025919525478400e+18, 2.48832051839533977600e+18,
// 2.48832077759658444800e+18, 2.48832103679413504000e+18,
// 2.48832129599669350400e+18, 2.48832155519793971200e+18,
// 2.48832181439802265600e+18, 2.48832207359808000000e+18,
// 2.48832233279813580800e+18, 2.48832259199822028800e+18,
// 2.48832285119946496000e+18, 2.48832311040043008000e+18,
// 2.48832336959957401600e+18, 2.48832362880081817600e+18,
// 2.48832388800090368000e+18, 2.48832414720096000000e+18,
// 2.48832440640101478400e+18, 2.48832466560109772800e+18,
// 2.48832492480234188800e+18, 2.48832518400031897600e+18])
//
// Disabled because this test is very slow.
//
NGRAPH_TEST(DISABLED_${BACKEND_NAME}, dot_4d_5d_multi_axis_big_fp64_VERY_SLOW)
{
vector<double> a_data(20 * 30 * 30 * 40);
for (int i = 0; i < 20 * 30 * 30 * 40; i++)
{
a_data[i] = double(i + 1);
}
vector<double> b_data(20 * 30 * 30 * 40 * 20);
for (int i = 0; i < 20 * 30 * 30 * 40 * 20; i++)
{
b_data[i] = double(i + 1);
}
Shape shape_a{20, 30, 30, 40};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_b{20, 30, 30, 40, 20};
auto B = make_shared<op::Parameter>(element::f64, shape_b);
Shape shape_r{20};
auto r = make_shared<op::Dot>(A, B, 4);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f64, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f64, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_TRUE(test::all_close(
vector<double>{
2.48832025919525478400e+18, 2.48832051839533977600e+18, 2.48832077759658444800e+18,
2.48832103679413504000e+18, 2.48832129599669350400e+18, 2.48832155519793971200e+18,
2.48832181439802265600e+18, 2.48832207359808000000e+18, 2.48832233279813580800e+18,
2.48832259199822028800e+18, 2.48832285119946496000e+18, 2.48832311040043008000e+18,
2.48832336959957401600e+18, 2.48832362880081817600e+18, 2.48832388800090368000e+18,
2.48832414720096000000e+18, 2.48832440640101478400e+18, 2.48832466560109772800e+18,
2.48832492480234188800e+18, 2.48832518400031897600e+18},
read_vector<double>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_0_0)
{
Shape shape{0};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
Shape shape_r{};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<float>{2112});
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_2x0_0x2)
{
Shape shape_a{2, 0};
Shape shape_b{0, 2};
Shape shape_r{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112});
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{0, 0, 0, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_0x2_2x0)
{
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{2, 0};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{0, 0};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_3x2_2x0)
{
Shape shape_a{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{2, 0};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{3, 0};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_0x2)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{0, 2};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{0, 2};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_2x0_0)
{
Shape shape_a{2, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{0};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_r);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<float>{2112, 2112});
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{0, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot1d)
{
Shape shape{4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
Shape shape_r{};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{2, 4, 8, 16});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 4, 8});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{170}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot2d)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
Shape shape_r{2, 2};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{5, 6, 7, 8});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{19, 22, 43, 50}), read_vector<float>(result));
}
//
// Here is what numpy does:
//
// >>> a = linspace(1,2*2*2,2*2*2)
// >>> b = linspace(1,2*2*2,2*2*2)
//
// >>> a.shape=(2,2,2)
// >>> b.shape=(2,2,2)
//
// >>> tensordot(a,b,axes=([2],[0]))
// array([[[[ 11., 14.],
// [ 17., 20.]],
//
// [[ 23., 30.],
// [ 37., 44.]]],
//
//
// [[[ 35., 46.],
// [ 57., 68.]],
//
// [[ 47., 62.],
// [ 77., 92.]]]])
//
NGRAPH_TEST(${BACKEND_NAME}, dot3d_3d)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
Shape shape_r{2, 2, 2, 2};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{11, 14, 17, 20, 23, 30, 37, 44, 35, 46, 57, 68, 47, 62, 77, 92}),
read_vector<float>(result));
}
//
// Here is what numpy does:
//
// >>> from numpy import *
// >>> a = linspace(0,4*2*3-1,4*2*3)
// >>> b = linspace(0,3*4-1,3*4)
//
// >>> a.shape=(4,2,3)
// >>> b.shape=(3,4)
//
// >>> tensordot(a,b,axes=([2],[0]))
// array([[[ 20., 23., 26., 29.],
// [ 56., 68., 80., 92.]],
//
// [[ 92., 113., 134., 155.],
// [ 128., 158., 188., 218.]],
//
// [[ 164., 203., 242., 281.],
// [ 200., 248., 296., 344.]],
//
// [[ 236., 293., 350., 407.],
// [ 272., 338., 404., 470.]]])
//
NGRAPH_TEST(${BACKEND_NAME}, dot3d_2d)
{
Shape shape_a{4, 2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{3, 4};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{4, 2, 4};
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{20, 23, 26, 29, 56, 68, 80, 92, 92, 113, 134,
155, 128, 158, 188, 218, 164, 203, 242, 281, 200, 248,
296, 344, 236, 293, 350, 407, 272, 338, 404, 470}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_tensor_arg0)
{
Shape shape_a{};
Shape shape_b{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
auto result = backend->create_tensor(element::f32, shape_b);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_tensor_arg1)
{
Shape shape_a{2, 2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_a);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_scalar)
{
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{8});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{48}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector_4_3)
{
Shape shape_a{4, 3};
Shape shape_b{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
Shape shape_r{4};
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{17, 18, 19});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{110, 272, 434, 596}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector)
{
Shape shape_a{4, 4};
Shape shape_b{4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
Shape shape_r{4};
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{17, 18, 19, 20});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{190, 486, 782, 1078}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector_int64)
{
Shape shape_a{4, 4};
Shape shape_b{4};
auto A = make_shared<op::Parameter>(element::i64, shape_a);
auto B = make_shared<op::Parameter>(element::i64, shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A, B), op::ParameterVector{A, B});
Shape shape_r{4};
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i64, shape_a);
copy_data(a, vector<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
auto b = backend->create_tensor(element::i64, shape_b);
copy_data(b, vector<int64_t>{17, 18, 19, 20});
auto result = backend->create_tensor(element::i64, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<int64_t>{190, 486, 782, 1078}), read_vector<int64_t>(result));
}
//
// Numpy test:
//
// > from numpy import *
// > x = linspace(1,2*3*4,2*3*4)
// > y = linspace(1,3*4*5,3*4*5)
// > x.shape=(2,3,4)
// > y.shape=(3,4,5)
// > z = tensordot(x,y,([1,2],[0,1]))
// > z.shape = 2*5
// > z
// array([ 2938., 3016., 3094., 3172., 3250., 7042., 7264., 7486.,
// 7708., 7930.])
//
NGRAPH_TEST(${BACKEND_NAME}, dot_3d_multi_axis)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
vector<float> b_data(3 * 4 * 5);
for (int i = 0; i < 3 * 4 * 5; i++)
{
b_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{3, 4, 5};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 5};
auto r = make_shared<op::Dot>(A, B, 2);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{2938., 3016., 3094., 3172., 3250., 7042., 7264., 7486., 7708., 7930.}),
read_vector<float>(result));
}
//
// Numpy test:
//
// > from numpy import *
// > x = array([6,61,2,3,5,21,75,23,23,0,23,2,35,67,1,2,9,16,2,3,6,1,8,0])
// > y = array([9,1,4,6,3,5,1,36,7,3,5,0,1,20,35,2,1,0,1,25,3,6,7,8])
// > x.shape=(2,4,3)
// > y.shape=(3,4,2)
// > z = tensordot(x,y,([2],[0]))
// > z.shape = 2*4*4*2
// > z
// array([ 483, 189, 331, 86, 85, 1262, 2155, 354, 83, 18, 58,
// 543, 77, 241, 325, 286, 859, 144, 438, 1025, 317, 973,
// 1041, 2930, 163, 69, 117, 50, 29, 472, 819, 62, 785,
// 236, 476, 235, 175, 1521, 2387, 1402, 97, 29, 69, 412,
// 63, 286, 429, 218, 45, 11, 29, 162, 27, 106, 149,
// 126, 65, 25, 44, 6, 11, 165, 281, 52])
//
NGRAPH_TEST(${BACKEND_NAME}, dot_3d_one_axis_arbitrary)
{
vector<float> a_data{6, 61, 2, 3, 5, 21, 75, 23, 23, 0, 23, 2,
35, 67, 1, 2, 9, 16, 2, 3, 6, 1, 8, 0};
vector<float> b_data{9, 1, 4, 6, 3, 5, 1, 36, 7, 3, 5, 0,
1, 20, 35, 2, 1, 0, 1, 25, 3, 6, 7, 8};
Shape shape_a{2, 4, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{3, 4, 2};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 4, 4, 2};
auto r = make_shared<op::Dot>(A, B);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((vector<float>{483, 189, 331, 86, 85, 1262, 2155, 354, 83, 18, 58, 543, 77,
241, 325, 286, 859, 144, 438, 1025, 317, 973, 1041, 2930, 163, 69,
117, 50, 29, 472, 819, 62, 785, 236, 476, 235, 175, 1521, 2387,
1402, 97, 29, 69, 412, 63, 286, 429, 218, 45, 11, 29, 162,
27, 106, 149, 126, 65, 25, 44, 6, 11, 165, 281, 52}),
read_vector<float>(result));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_2_in_3)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3};
auto r = make_shared<op::OneHot>(A, Shape{3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{2});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int32_t>{0, 0, 1}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_1_in_3)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3};
auto r = make_shared<op::OneHot>(A, Shape{3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{1});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int32_t>{0, 1, 0}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_0_in_3)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3};
auto r = make_shared<op::OneHot>(A, Shape{3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{0});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int32_t>{1, 0, 0}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_fp_nonint_in_3)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3};
auto r = make_shared<op::OneHot>(A, Shape{3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1.1f});
auto result = backend->create_tensor(element::f32, shape_r);
try
{
backend->call_with_validate(f, {result}, {a});
}
catch (const std::exception& e)
{
EXPECT_EQ(e.what(), std::string("One-hot: non-integral value in input"));
}
catch (...)
{
FAIL() << "Expected a std::out_of_range exception";
}
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_oob_in_3)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3};
auto r = make_shared<op::OneHot>(A, Shape{3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{3000000});
auto result = backend->create_tensor(element::i32, shape_r);
try
{
backend->call_with_validate(f, {result}, {a});
}
catch (const std::exception& e)
{
EXPECT_EQ(e.what(), std::string("One-hot: value is out of category range"));
}
catch (...)
{
FAIL() << "Expected a std::out_of_range exception";
}
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_0)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3, 8};
auto r = make_shared<op::OneHot>(A, Shape{3, 8}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{2, 1, 0, 0, 2, 2, 1, 0});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(
(vector<int32_t>{0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0}),
read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{8, 3};
auto r = make_shared<op::OneHot>(A, Shape{8, 3}, 1);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{2, 1, 0, 0, 2, 2, 1, 0});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(
(vector<int32_t>{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0}),
read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_barely_oob)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{8, 3};
auto r = make_shared<op::OneHot>(A, Shape{8, 3}, 1);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{2, 1, 0, 0, 3, 2, 1, 0});
auto result = backend->create_tensor(element::i32, shape_r);
try
{
backend->call_with_validate(f, {result}, {a});
}
catch (const std::exception& e)
{
EXPECT_EQ(e.what(), std::string("One-hot: value is out of category range"));
}
catch (...)
{
FAIL() << "Expected a std::out_of_range exception";
}
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_far_oob)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{8, 3};
auto r = make_shared<op::OneHot>(A, Shape{8, 3}, 1);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{2, 1, 0, 0, 3000000, 2, 1, 0});
auto result = backend->create_tensor(element::i32, shape_r);
try
{
backend->call_with_validate(f, {result}, {a});
}
catch (const std::exception& e)
{
EXPECT_EQ(e.what(), std::string("One-hot: value is out of category range"));
}
catch (...)
{
FAIL() << "Expected a std::out_of_range exception";
}
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_matrix_0)
{
Shape shape_a{3, 3};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_r{3, 3, 3};
auto r = make_shared<op::OneHot>(A, Shape{3, 3, 3}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a,
vector<int32_t>{
0, 1, 1, 2, 1, 0, 0, 2, 1,
});
auto result = backend->create_tensor(element::i32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<int32_t>{1, 0, 0, 0, 0, 1, 1, 0, 0,
0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 1, 0, 0, 0, 1, 0}),
read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_fp)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{8, 3};
auto r = make_shared<op::OneHot>(A, Shape{8, 3}, 1);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{2, 1, 0, 0, 2, 2, 1, 0});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(
(vector<float>{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_fp_nonint)
{
Shape shape_a{8};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{8, 3};
auto r = make_shared<op::OneHot>(A, Shape{8, 3}, 1);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{2, 1, 0, 0, 2, 2, 1.01f, 0});
auto result = backend->create_tensor(element::f32, shape_r);
try
{
backend->call_with_validate(f, {result}, {a});
}
catch (const std::exception& e)
{
EXPECT_EQ(e.what(), std::string("One-hot: non-integral value in input"));
}
catch (...)
{
FAIL() << "Expected a std::out_of_range exception";
}
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
// Trivial case with no reduction axes.
NGRAPH_TEST(${BACKEND_NAME}, reduce_trivial)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape{2, 2};
auto g_A = make_shared<op::Parameter>(element::f32, shape);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->create_tensor(element::f32, {});
copy_data(b, vector<float>{0});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_to_scalar)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape{2, 2};
auto g_A = make_shared<op::Parameter>(element::f32, shape);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{0, 1}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{0});
auto result = backend->create_tensor(element::f32, Shape{});
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{10}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(a));
EXPECT_EQ((vector<float>{0}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_matrix_columns)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{3, 2};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{2};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{0}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{0});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{9, 12}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a));
EXPECT_EQ((vector<float>{0}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_matrix_rows)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{3, 2};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{3};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{1}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{0});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{3, 7, 11}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a));
EXPECT_EQ((vector<float>{0}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_matrix_rows_zero)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{3, 0};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{3};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{1}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{66});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{66, 66, 66}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
EXPECT_EQ((vector<float>{66}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_matrix_cols_zero)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{2};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{0}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{77});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{77, 77}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
EXPECT_EQ((vector<float>{77}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_vector_zero)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{0}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{88});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{88}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
EXPECT_EQ((vector<float>{88}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_matrix_to_scalar_zero_by_zero)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x+y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f = make_shared<Function>(make_shared<op::Add>(f_A, f_B), op::ParameterVector{f_A, f_B});
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 0};
auto g_A = make_shared<op::Parameter>(element::f32, shape_a);
auto g_B = make_shared<op::Parameter>(element::f32, Shape{});
Shape shape_rt{};
auto g = make_shared<Function>(make_shared<op::Reduce>(g_A, g_B, f, AxisSet{0, 1}),
op::ParameterVector{g_A, g_B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto b = backend->create_tensor(element::f32, Shape{});
copy_data(b, vector<float>{99});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{99}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
EXPECT_EQ((vector<float>{99}), read_vector<float>(b));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_3d_to_vector)
{
// First, the reduction function (f(x:float32[],y:float32[]) = x*y).
auto f_A = make_shared<op::Parameter>(element::f32, Shape{});
auto f_B = make_shared<op::Parameter>(element::f32, Shape{});
auto f =
make_shared<Function>(make_shared<op::Multiply>(f_A, f_B), op::ParameterVector{f_A, f_B});
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_rt{3};
auto g = make_shared<Function>(make_shared<op::Reduce>(A, B, f, AxisSet{0, 1}),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{1});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(g, {result}, {a, b});
EXPECT_EQ((vector<float>{1.0f * 10.0f * 19.0f * 4.0f * 13.0f * 22.0f * 7.0f * 16.0f * 25.0f,
2.0f * 11.0f * 20.0f * 5.0f * 14.0f * 23.0f * 8.0f * 17.0f * 26.0f,
3.0f * 12.0f * 21.0f * 6.0f * 15.0f * 24.0f * 9.0f * 18.0f * 27.0f}),
read_vector<float>(result));
}
//
// The unit tests for ReduceWindow follow exactly what we test for MaxPool---but they use ReduceWindow to do it.
//
NGRAPH_TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_1channel_1image)
{
Shape shape_ra{};
auto RA = make_shared<op::Parameter>(element::f32, shape_ra);
Shape shape_rb{};
auto RB = make_shared<op::Parameter>(element::f32, shape_rb);
auto rf = make_shared<Function>(make_shared<op::Maximum>(RA, RB), op::ParameterVector{RA, RB});
Shape shape_a{1, 1, 14};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 1, 12};
Shape window_shape{1, 1, 3};
auto window_movement_strides = Strides{1, 1, 1};
auto f = make_shared<Function>(
make_shared<op::ReduceWindow>(A, B, rf, window_shape, window_movement_strides),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 3>{{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}}}.get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
-1}); // Really should use -inf but since we know the values in the test vector this should work
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((test::NDArray<float, 3>({{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}}}).get_vector()),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_1channel_2image)
{
Shape shape_ra{};
auto RA = make_shared<op::Parameter>(element::f32, shape_ra);
Shape shape_rb{};
auto RB = make_shared<op::Parameter>(element::f32, shape_rb);
auto rf = make_shared<Function>(make_shared<op::Maximum>(RA, RB), op::ParameterVector{RA, RB});
Shape shape_a{2, 1, 14};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 1, 12};
Shape window_shape{1, 1, 3};
auto window_movement_strides = Strides{1, 1, 1};
auto f = make_shared<Function>(
make_shared<op::ReduceWindow>(A, B, rf, window_shape, window_movement_strides),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}},
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
-1}); // Really should use -inf but since we know the values in the test vector this should work
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((test::NDArray<float, 3>(
{{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}}, {{2, 2, 1, 1, 0, 2, 2, 2, 1, 1, 1, 2}}})
.get_vector()),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_2channel_2image)
{
Shape shape_ra{};
auto RA = make_shared<op::Parameter>(element::f32, shape_ra);
Shape shape_rb{};
auto RB = make_shared<op::Parameter>(element::f32, shape_rb);
auto rf = make_shared<Function>(make_shared<op::Maximum>(RA, RB), op::ParameterVector{RA, RB});
Shape shape_a{2, 2, 14};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 2, 12};
Shape window_shape{1, 1, 3};
auto window_movement_strides = Strides{1, 1, 1};
auto f = make_shared<Function>(
make_shared<op::ReduceWindow>(A, B, rf, window_shape, window_movement_strides),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0},
{0, 0, 0, 2, 0, 0, 2, 3, 0, 1, 2, 0, 1, 0}},
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2},
{2, 1, 0, 0, 1, 0, 2, 0, 0, 0, 1, 1, 2, 0}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
-1}); // Really should use -inf but since we know the values in the test vector this should work
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((test::NDArray<float, 3>(
{{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}, {0, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 1}},
{{2, 2, 1, 1, 0, 2, 2, 2, 1, 1, 1, 2}, {2, 1, 1, 1, 2, 2, 2, 0, 1, 1, 2, 2}}})
.get_vector()),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_2d_2channel_2image)
{
Shape shape_ra{};
auto RA = make_shared<op::Parameter>(element::f32, shape_ra);
Shape shape_rb{};
auto RB = make_shared<op::Parameter>(element::f32, shape_rb);
auto rf = make_shared<Function>(make_shared<op::Maximum>(RA, RB), op::ParameterVector{RA, RB});
Shape shape_a{2, 2, 5, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{2, 2, 4, 3};
Shape window_shape{1, 1, 2, 3};
auto window_movement_strides = Strides{1, 1, 1, 1};
auto f = make_shared<Function>(
make_shared<op::ReduceWindow>(A, B, rf, window_shape, window_movement_strides),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>({{{{0, 1, 0, 2, 1}, // img 0 chan 0
{0, 3, 2, 0, 0},
{2, 0, 0, 0, 1},
{2, 0, 1, 1, 2},
{0, 2, 1, 0, 0}},
{{0, 0, 0, 2, 0}, // img 0 chan 1
{0, 2, 3, 0, 1},
{2, 0, 1, 0, 2},
{3, 1, 0, 0, 0},
{2, 0, 0, 0, 0}}},
{{{0, 2, 1, 1, 0}, // img 1 chan 0
{0, 0, 2, 0, 1},
{0, 0, 1, 2, 3},
{2, 0, 0, 3, 0},
{0, 0, 0, 0, 0}},
{{2, 1, 0, 0, 1}, // img 1 chan 1
{0, 2, 0, 0, 0},
{1, 1, 2, 0, 2},
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
-1}); // Really should use -inf but since we know the values in the test vector this should work
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((test::NDArray<float, 4>({{{{3, 3, 2}, // img 0 chan 0
{3, 3, 2},
{2, 1, 2},
{2, 2, 2}},
{{3, 3, 3}, // img 0 chan 1
{3, 3, 3},
{3, 1, 2},
{3, 1, 0}}},
{{{2, 2, 2}, // img 1 chan 0
{2, 2, 3},
{2, 3, 3},
{2, 3, 3}},
{{2, 2, 1}, // img 1 chan 1
{2, 2, 2},
{2, 2, 2},
{1, 1, 2}}}})
.get_vector()),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_2d_1channel_1image_strided)
{
Shape shape_ra{};
auto RA = make_shared<op::Parameter>(element::f32, shape_ra);
Shape shape_rb{};
auto RB = make_shared<op::Parameter>(element::f32, shape_rb);
auto rf = make_shared<Function>(make_shared<op::Maximum>(RA, RB), op::ParameterVector{RA, RB});
Shape shape_a{1, 1, 8, 8};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 1, 3, 3};
Shape window_shape{1, 1, 2, 3};
auto window_movement_strides = Strides{1, 1, 3, 2};
auto f = make_shared<Function>(
make_shared<op::ReduceWindow>(A, B, rf, window_shape, window_movement_strides),
op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 4>({{{{0, 1, 0, 2, 1, 2, 0, 0},
{0, 3, 2, 0, 0, 0, 1, 0},
{2, 0, 0, 0, 1, 0, 0, 0},
{2, 0, 1, 1, 2, 2, 3, 0},
{0, 2, 1, 0, 0, 0, 1, 0},
{2, 0, 3, 1, 0, 0, 0, 0},
{1, 2, 0, 0, 0, 1, 2, 0},
{1, 0, 2, 0, 0, 0, 1, 0}}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
-1}); // Really should use -inf but since we know the values in the test vector this should work
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a, b});
EXPECT_EQ((test::NDArray<float, 4>({{{{3, 2, 2}, {2, 2, 3}, {2, 2, 2}}}}).get_vector()),
read_vector<float>(result));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012)
{
Shape shape_a{2, 2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{12};
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012)
{
Shape shape_a{1, 1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{};
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120)
{
Shape shape_a{1, 1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{};
auto r = make_shared<op::Reshape>(A, AxisVector{1, 2, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{1, 1, 1, 1, 1, 1};
auto r = make_shared<op::Reshape>(A, AxisVector{}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{42});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{42}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1)
{
Shape shape_a{};
auto A = make_shared<op::Parameter>(element::boolean, shape_a);
Shape shape_r{1};
auto r = make_shared<op::Reshape>(A, AxisVector{}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::boolean, shape_a);
copy_data(a, vector<char>{42});
auto result = backend->create_tensor(element::boolean, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<char>{42}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col)
{
Shape shape_a{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 1};
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row)
{
Shape shape_a{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{1, 3};
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle)
{
Shape shape_a{3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{1, 3, 1};
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_same)
{
Shape shape_a{3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 3};
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_transpose)
{
Shape shape_a{3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 3};
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 4, 7, 2, 5, 8, 3, 6, 9}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_dim_change_transpose)
{
Shape shape_a{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{2, 3};
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 3, 5, 2, 4, 6}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_021)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
Shape shape_r{2, 4, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{0, 2, 1}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12,
13, 17, 21, 14, 18, 22, 15, 19, 23, 16, 20, 24}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_210)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
Shape shape_r{4, 3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{2, 1, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22,
3, 15, 7, 19, 11, 23, 4, 16, 8, 20, 12, 24}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_201)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
Shape shape_r{4, 2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{2, 0, 1}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22,
3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_102)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
Shape shape_r{3, 2, 4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0, 2}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 13, 14, 15, 16, 5, 6, 7, 8,
17, 18, 19, 20, 9, 10, 11, 12, 21, 22, 23, 24}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_120)
{
vector<float> a_data(2 * 3 * 4);
for (int i = 0; i < 2 * 3 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 3, 4};
Shape shape_r{3, 4, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{1, 2, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18,
7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_transpose)
{
vector<float> a_data(2 * 2 * 5 * 5);
for (int i = 0; i < 2 * 2 * 5 * 5; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 2, 5, 5};
Shape shape_r{2, 5, 5, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{0, 2, 3, 1}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(
(vector<float>{1., 26., 2., 27., 3., 28., 4., 29., 5., 30., 6., 31., 7., 32., 8.,
33., 9., 34., 10., 35., 11., 36., 12., 37., 13., 38., 14., 39., 15., 40.,
16., 41., 17., 42., 18., 43., 19., 44., 20., 45., 21., 46., 22., 47., 23.,
48., 24., 49., 25., 50., 51., 76., 52., 77., 53., 78., 54., 79., 55., 80.,
56., 81., 57., 82., 58., 83., 59., 84., 60., 85., 61., 86., 62., 87., 63.,
88., 64., 89., 65., 90., 66., 91., 67., 92., 68., 93., 69., 94., 70., 95.,
71., 96., 72., 97., 73., 98., 74., 99., 75., 100.}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_no_transpose)
{
vector<float> a_data(2 * 2 * 5 * 5);
for (int i = 0; i < 2 * 2 * 5 * 5; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 2, 5, 5};
Shape shape_r{2, 5, 5, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2, 3}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(a_data, read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_transposed_shape_change)
{
Shape shape_a{2, 6};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{12};
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 7, 2, 8, 3, 9, 4, 10, 5, 11, 6, 12}), read_vector<float>(result));
}
//
// Numpy:
//
// >>> x = linspace(1,2*2*3*3*2*4,2*2*3*3*2*4)
// >>> x.shape=(2,2,3,3,2,4)
// >>> y = ascontiguousarray(transpose(x,(2,4,0,5,3,1)))
// >>> y.shape=2*2*3*3*2*4
// >>> y
// array([ 1., 73., 9., 81., 17., 89., 2., 74., 10.,
// 82., 18., 90., 3., 75., 11., 83., 19., 91.,
// 4., 76., 12., 84., 20., 92., 145., 217., 153.,
// 225., 161., 233., 146., 218., 154., 226., 162., 234.,
// 147., 219., 155., 227., 163., 235., 148., 220., 156.,
// 228., 164., 236., 5., 77., 13., 85., 21., 93.,
// 6., 78., 14., 86., 22., 94., 7., 79., 15.,
// 87., 23., 95., 8., 80., 16., 88., 24., 96.,
// 149., 221., 157., 229., 165., 237., 150., 222., 158.,
// 230., 166., 238., 151., 223., 159., 231., 167., 239.,
// 152., 224., 160., 232., 168., 240., 25., 97., 33.,
// 105., 41., 113., 26., 98., 34., 106., 42., 114.,
// 27., 99., 35., 107., 43., 115., 28., 100., 36.,
// 108., 44., 116., 169., 241., 177., 249., 185., 257.,
// 170., 242., 178., 250., 186., 258., 171., 243., 179.,
// 251., 187., 259., 172., 244., 180., 252., 188., 260.,
// 29., 101., 37., 109., 45., 117., 30., 102., 38.,
// 110., 46., 118., 31., 103., 39., 111., 47., 119.,
// 32., 104., 40., 112., 48., 120., 173., 245., 181.,
// 253., 189., 261., 174., 246., 182., 254., 190., 262.,
// 175., 247., 183., 255., 191., 263., 176., 248., 184.,
// 256., 192., 264., 49., 121., 57., 129., 65., 137.,
// 50., 122., 58., 130., 66., 138., 51., 123., 59.,
// 131., 67., 139., 52., 124., 60., 132., 68., 140.,
// 193., 265., 201., 273., 209., 281., 194., 266., 202.,
// 274., 210., 282., 195., 267., 203., 275., 211., 283.,
// 196., 268., 204., 276., 212., 284., 53., 125., 61.,
// 133., 69., 141., 54., 126., 62., 134., 70., 142.,
// 55., 127., 63., 135., 71., 143., 56., 128., 64.,
// 136., 72., 144., 197., 269., 205., 277., 213., 285.,
// 198., 270., 206., 278., 214., 286., 199., 271., 207.,
// 279., 215., 287., 200., 272., 208., 280., 216., 288.])
//
NGRAPH_TEST(${BACKEND_NAME}, reshape_6d)
{
vector<float> a_data(2 * 2 * 3 * 3 * 2 * 4);
for (int i = 0; i < 2 * 2 * 3 * 3 * 2 * 4; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 2, 3, 3, 2, 4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_r{3, 2, 2, 4, 3, 2};
auto r = make_shared<op::Reshape>(A, AxisVector{2, 4, 0, 5, 3, 1}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(
(vector<float>{
1., 73., 9., 81., 17., 89., 2., 74., 10., 82., 18., 90., 3., 75.,
11., 83., 19., 91., 4., 76., 12., 84., 20., 92., 145., 217., 153., 225.,
161., 233., 146., 218., 154., 226., 162., 234., 147., 219., 155., 227., 163., 235.,
148., 220., 156., 228., 164., 236., 5., 77., 13., 85., 21., 93., 6., 78.,
14., 86., 22., 94., 7., 79., 15., 87., 23., 95., 8., 80., 16., 88.,
24., 96., 149., 221., 157., 229., 165., 237., 150., 222., 158., 230., 166., 238.,
151., 223., 159., 231., 167., 239., 152., 224., 160., 232., 168., 240., 25., 97.,
33., 105., 41., 113., 26., 98., 34., 106., 42., 114., 27., 99., 35., 107.,
43., 115., 28., 100., 36., 108., 44., 116., 169., 241., 177., 249., 185., 257.,
170., 242., 178., 250., 186., 258., 171., 243., 179., 251., 187., 259., 172., 244.,
180., 252., 188., 260., 29., 101., 37., 109., 45., 117., 30., 102., 38., 110.,
46., 118., 31., 103., 39., 111., 47., 119., 32., 104., 40., 112., 48., 120.,
173., 245., 181., 253., 189., 261., 174., 246., 182., 254., 190., 262., 175., 247.,
183., 255., 191., 263., 176., 248., 184., 256., 192., 264., 49., 121., 57., 129.,
65., 137., 50., 122., 58., 130., 66., 138., 51., 123., 59., 131., 67., 139.,
52., 124., 60., 132., 68., 140., 193., 265., 201., 273., 209., 281., 194., 266.,
202., 274., 210., 282., 195., 267., 203., 275., 211., 283., 196., 268., 204., 276.,
212., 284., 53., 125., 61., 133., 69., 141., 54., 126., 62., 134., 70., 142.,
55., 127., 63., 135., 71., 143., 56., 128., 64., 136., 72., 144., 197., 269.,
205., 277., 213., 285., 198., 270., 206., 278., 214., 286., 199., 271., 207., 279.,
215., 287., 200., 272., 208., 280., 216., 288.}),
read_vector<float>(result));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
static std::mt19937_64 random_generator;
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
// Trivial case with no summed axes.
NGRAPH_TEST(${BACKEND_NAME}, sum_trivial)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
}
// Failure has been reported at 5D for some reason
NGRAPH_TEST(${BACKEND_NAME}, sum_trivial_5d)
{
Shape shape{2, 2, 2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_to_scalar)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, Shape{});
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{10}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_large_1d_to_scalar)
{
Shape shape{1000000};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
random_generator.seed(2);
vector<float> v_a(1000000, 0);
double r = 0;
for (int i = 0; i < 1000000; i++)
{
v_a[i] = static_cast<float>(random_generator() % 255);
r += static_cast<double>(v_a[i]);
}
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, v_a);
auto result = backend->create_tensor(element::f32, Shape{});
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(
test::all_close_f(vector<float>{static_cast<float>(r)}, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_columns)
{
Shape shape_a{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{9, 12}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_6d)
{
Shape shape_a{2, 6, 4, 5, 7, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2, 4, 5, 3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1, 4}), op::ParameterVector{A});
auto backend_wrk = runtime::Backend::create("${BACKEND_NAME}");
auto backend_ref = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a);
auto a_ref = backend_ref->create_tensor(element::f32, shape_a);
auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt);
auto result_ref = backend_ref->create_tensor(element::f32, shape_rt);
vector<float> inp_data(shape_size<const Shape>(shape_a));
iota(inp_data.begin(), inp_data.end(), 1);
copy_data(a_wrk, inp_data);
copy_data(a_ref, inp_data);
backend_wrk->call_with_validate(f, {result_wrk}, {a_wrk});
backend_ref->call_with_validate(f, {result_ref}, {a_ref});
EXPECT_EQ(read_vector<float>(result_ref), read_vector<float>(result_wrk));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_rows)
{
Shape shape_a{3, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{3, 7, 11}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{0, 0, 0}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{0, 0}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{0}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{0}), read_vector<float>(result));
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
EXPECT_EQ((vector<float>{}), read_vector<float>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_matrix_most_sig)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1 + 10 + 19,
2 + 11 + 20,
3 + 12 + 21,
4 + 13 + 22,
5 + 14 + 23,
6 + 15 + 24,
7 + 16 + 25,
8 + 17 + 26,
9 + 18 + 27}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_matrix_least_sig)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{2}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1 + 2 + 3,
4 + 5 + 6,
7 + 8 + 9,
10 + 11 + 12,
13 + 14 + 15,
16 + 17 + 18,
19 + 20 + 21,
22 + 23 + 24,
25 + 26 + 27}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_vector)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25,
2 + 11 + 20 + 5 + 14 + 23 + 8 + 17 + 26,
3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_scalar)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto f =
make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25 + 2 + 11 + 20 + 5 + 14 + 23 +
8 + 17 + 26 + 3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sum_5d_to_scalar)
{
Shape shape_a{3, 3, 3, 3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2, 3, 4}),
op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, std::vector<float>(std::pow(3, 5), 1));
auto result = backend->create_tensor(element::f32, shape_rt);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ(std::vector<float>{243.}, read_vector<float>(result));
}
This source diff could not be displayed because it is too large. You can view the blob instead.
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, abs)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Abs>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, -2, 0, -4.75f});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, 2, 0, 4.75f}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, acos)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Acos>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{3.14159265f,
2.41885841f,
2.09439510f,
1.82347658f,
1.69612416f,
1.57079633f,
1.44546850f,
1.31811607f,
1.04719755f,
0.72273425f,
0.00000000f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, asin)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Asin>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{-1.57079633f,
-0.84806208f,
-0.52359878f,
-0.25268026f,
-0.12532783f,
0.00000000f,
0.12532783f,
0.25268026f,
0.52359878f,
0.84806208f,
1.57079633f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, atan)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Atan>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{-1.32581766f,
-1.10714872f,
-0.78539816f,
-0.46364761f,
-0.24497866f,
0.00000000f,
0.24497866f,
0.46364761f,
0.78539816f,
1.10714872f,
1.32581766f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, ceiling)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Ceiling>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{-2.5f, -2.0f, 0.3f, 4.8f});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{-2.0f, -2.0f, 1.0f, 5.0f}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, cos)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Cos>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{1.00000000f,
0.96891242f,
0.96891242f,
0.87758256f,
0.87758256f,
0.54030231f,
0.54030231f,
-0.41614684f,
-0.41614684f,
-0.65364362f,
-0.65364362f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, cosh)
{
Shape shape{6};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Cosh>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return coshf(x); });
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, exp)
{
Shape shape{8};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Exp>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{-4, -3, -2, -1, 0, 1, 2, 3});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(
vector<float>{expf(-4), expf(-3), expf(-2), expf(-1), expf(0), expf(1), expf(2), expf(3)},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, floor)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Floor>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{-2.5f, -2.0f, 0.3f, 4.8f});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{-3.0f, -2.0f, 0.0f, 4.0f}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, log)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Log>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f});
vector<float> loga{-2.07944154f,
-1.38629436f,
-0.69314718f,
0.00000000f,
0.69314718f,
1.38629436f,
2.07944154f,
2.77258872f};
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(loga, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, negative)
{
Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Negative>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, -2, 0, -4.75f, 8.75f, -8.75f});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{-1, 2, 0, 4.75f, -8.75f, 8.75f}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, not)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::Not>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::boolean, shape);
copy_data(a, vector<char>{1, 0, 2, 0});
auto result = backend->create_tensor(element::boolean, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<char>{0, 1, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sign)
{
Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sign>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, -2, 0, -4.8f, 4.8f, -0.0f});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{1, -1, 0, -1, 1, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, sin)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sin>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{0.00000000f,
0.24740396f,
-0.24740396f,
0.47942554f,
-0.47942554f,
0.84147098f,
-0.84147098f,
0.90929743f,
-0.90929743f,
-0.75680250f,
0.75680250f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sinh)
{
Shape shape{6};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sinh>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); });
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, sqrt)
{
Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Sqrt>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{16, 4, 81, 100, 10000, 0});
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_EQ((vector<float>{4, 2, 9, 10, 100, 0}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, tan)
{
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Tan>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(vector<float>{0.00000000f,
0.25534192f,
-0.25534192f,
0.54630249f,
-0.54630249f,
1.55740772f,
-1.55740772f,
-2.18503986f,
2.18503986f,
1.15782128f,
-1.15782128f},
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, tanh)
{
Shape shape{6};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Tanh>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
vector<float> input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f};
copy_data(a, input);
auto result = backend->create_tensor(element::f32, shape);
std::transform(
input.begin(), input.end(), input.begin(), [](float x) -> float { return tanhf(x); });
backend->call_with_validate(f, {result}, {a});
EXPECT_TRUE(test::all_close_f(input, read_vector<float>(result)));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment