Commit 818c0bcb authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Finish moving unit tests into test/backend (#3293)

* Finish moving things into test/backend

* Update CODEOWNERS for moved file

* Tweak GPU manifest for renamed test
parent a095c587
......@@ -61,7 +61,7 @@ project/doc-contributor-README.rst @indie
/src/ngraph/type/ @diyessi
/src/ngraph/serializer.*pp @rkimballn1
/test/distributed.in.cpp @wenzhe-nrv @diyessi @jianyinglang
/test/backend/distributed.in.cpp @wenzhe-nrv @diyessi @jianyinglang
# Putting this last so it's not overridden by directory rules
CMakeLists.txt @rkimballn1 @silee2
......
......@@ -98,7 +98,7 @@ all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
dynamic_GPU.all
all_dynamic
# GPU backend uses floats to implement these ops for int32
floor_int32
......
......@@ -207,20 +207,20 @@ add_subdirectory(util)
# backend specific test files must meet the following requirements:
# 1) The must be named <name>.in.cpp
# 2) They must be in the test directory
# 2) They must be in the `test/backend` directory
# 3) Include "util/test_control.hpp" in your cpp file
# 4) add the line `static string s_manifest = "${MANIFEST}";` to your cpp file
# 5) Use the `NGRAPH_TEST` macro in place of `TEST`.
# All such files are configured via cmake which replaces all instances of cmake variables
# such as ${BACKEND_NAME} with their values, such as CPU, GPU, or INTERPRETER.
set(MULTI_TEST_SRC
autodiff.in.cpp
backend/abc.in.cpp
backend/aliased_output.in.cpp
backend/all.in.cpp
backend/any.in.cpp
backend/api.in.cpp
backend/arg_reduce.in.cpp
backend/autodiff.in.cpp
backend/batch_mat_mul.in.cpp
backend/batch_norm.in.cpp
backend/binary_elementwise.in.cpp
......@@ -231,7 +231,13 @@ set(MULTI_TEST_SRC
backend/constant.in.cpp
backend/convert.in.cpp
backend/convolution.in.cpp
backend/convolution_reference.in.cpp
backend/dot.in.cpp
backend/dyn_broadcast.in.cpp
backend/dyn_replace_slice_reference.in.cpp
backend/dyn_reshape.in.cpp
backend/dyn_slice_reference.in.cpp
backend/dynamic.in.cpp
backend/embedding_lookup.in.cpp
backend/function_name.in.cpp
backend/fused_op.in.cpp
......@@ -253,6 +259,7 @@ set(MULTI_TEST_SRC
backend/product.in.cpp
backend/quantize_dequantize.in.cpp
backend/quantized_convolution.in.cpp
backend/range.in.cpp
backend/relu.in.cpp
backend/replace_slice.in.cpp
backend/reshape.in.cpp
......@@ -267,14 +274,11 @@ set(MULTI_TEST_SRC
backend/sum.in.cpp
backend/tensorview_custom_mem.in.cpp
backend/topk.in.cpp
backend/transpose.in.cpp
backend/unhandled_op.in.cpp
backend/unary_elementwise.in.cpp
backend/validate_call.in.cpp
backend/zero_sized.in.cpp
convolution_test.in.cpp
dyn_replace_slice_test.in.cpp
dyn_slice_test.in.cpp
dynamic.in.cpp
)
if (NGRAPH_MLIR_ENABLE)
......@@ -282,11 +286,11 @@ if (NGRAPH_MLIR_ENABLE)
endif()
if(NGRAPH_DISTRIBUTED_ENABLE)
list(APPEND MULTI_TEST_SRC distributed.in.cpp)
list(APPEND MULTI_TEST_SRC backend/distributed.in.cpp)
endif()
if (NGRAPH_CPU_ENABLE)
list(APPEND MULTI_TEST_SRC backend_graph_comparison.in.cpp)
list(APPEND MULTI_TEST_SRC backend/graph_comparison.in.cpp)
endif()
if (NGRAPH_ONNX_IMPORT_ENABLE)
......
......@@ -316,3 +316,53 @@ NGRAPH_TEST(${BACKEND_NAME}, all_change_axis)
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{1, 0, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, all_dynamic)
{
// Create a graph for f(x,axes:int32) = All(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::boolean, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto all = make_shared<op::All>(x, axes_i64);
ASSERT_TRUE(all->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{all}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::boolean, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<char>> inputs{{1, 0, 1, 0, 1, 0},
{1, 0, 1, 0, 0, 1},
{1, 0, 1, 1, 1, 1},
{1, 0, 1, 0, 1, 0},
{1, 0, 1, 0, 1},
{1, 0, 1, 0, 1}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<char>> expected_results{
{1, 0, 1, 0, 1, 0}, {0, 0, 1}, {0, 1}, {0}, {1, 0, 1, 0, 1}, {0}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::boolean, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<char>(t_r);
ASSERT_EQ(results, expected_results[i]);
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, dyn_broadcast)
{
// Create a graph for
// f(x,shape:i32,axes:32) = Broadcast(x,Convert<i64>(shape),Convert<i64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto shape = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto shape_i64 = make_shared<op::Convert>(shape, element::i64);
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto bc = make_shared<op::DynBroadcast>(x, shape_i64, axes_i64);
auto f = make_shared<Function>(NodeVector{bc}, ParameterVector{x, shape, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{Shape{}, Shape{}, Shape{2}, Shape{2}};
std::vector<std::vector<int32_t>> shapes{{2, 2}, {2, 2, 2}, {3, 2}, {2, 3}};
std::vector<std::vector<int32_t>> axeses{{0, 1}, {0, 1, 2}, {0}, {1}};
std::vector<std::vector<float>> inputs{{6}, {7}, {10, 11}, {10, 11}};
std::vector<Shape> expected_result_shapes{
Shape{2, 2}, Shape{2, 2, 2}, Shape{3, 2}, Shape{2, 3}};
std::vector<std::vector<float>> expected_results{
{6, 6, 6, 6}, {7, 7, 7, 7, 7, 7, 7, 7}, {10, 11, 10, 11, 10, 11}, {10, 10, 10, 11, 11, 11}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_shape = backend->create_tensor(element::i32, Shape{shapes[i].size()});
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_shape, shapes[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_shape, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, dyn_reshape)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto build_graph = [&backend](bool zero_flag) {
// Create a graph for f(x,shape) = DynReshape(x,shape,zero_flag=zero_flag).
auto x = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
auto shape = make_shared<op::Parameter>(element::i64, PartialShape::dynamic(1));
auto dyn_reshape = make_shared<op::DynReshape>(x, shape, zero_flag);
EXPECT_TRUE(dyn_reshape->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
auto f = make_shared<Function>(NodeVector{dyn_reshape}, ParameterVector{x, shape});
auto ex = backend->compile(f);
return ex;
};
auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic());
auto ex_flag_off = build_graph(false);
auto ex_flag_on = build_graph(true);
std::vector<std::tuple<bool, Shape, std::vector<int32_t>, std::vector<int64_t>, Shape>> tests;
tests.emplace_back(make_tuple(
false, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{6}, Shape{6}));
tests.emplace_back(make_tuple(
true, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{6}, Shape{6}));
tests.emplace_back(make_tuple(
false, Shape{2, 3}, vector<int32_t>{1, 2, 3, 4, 5, 6}, vector<int64_t>{-1}, Shape{6}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{2, -1},
Shape{2, 3}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, -1},
Shape{3, 2}));
tests.emplace_back(make_tuple(false,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, 2, -1},
Shape{3, 2, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{3, 2, -1},
Shape{3, 2, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{0, 0, -1},
Shape{2, 3, 1}));
tests.emplace_back(make_tuple(true,
Shape{2, 3},
vector<int32_t>{1, 2, 3, 4, 5, 6},
vector<int64_t>{2, 0, -1},
Shape{2, 3, 1}));
tests.emplace_back(make_tuple(
true, Shape{0, 3, 4}, vector<int32_t>{}, vector<int64_t>{3, -1, 2}, Shape{3, 0, 2}));
for (auto& test : tests)
{
bool zero_flag = get<0>(test);
const Shape& in_shape = get<1>(test);
const std::vector<int32_t>& data = get<2>(test);
const std::vector<int64_t>& dims = get<3>(test);
const Shape& out_shape = get<4>(test);
auto t_x = backend->create_tensor(element::i32, in_shape);
auto t_shape = backend->create_tensor(element::i64, Shape{dims.size()});
copy_data(t_x, data);
copy_data(t_shape, dims);
auto ex = zero_flag ? ex_flag_on : ex_flag_off;
ex->call_with_validate({t_r}, {t_x, t_shape});
ASSERT_EQ(t_r->get_element_type(), element::i32);
ASSERT_EQ(t_r->get_shape(), out_shape);
auto results = read_vector<int32_t>(t_r);
ASSERT_EQ(results, data);
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
template <typename T>
struct RangeTest
{
T start;
T stop;
T step;
Shape expected_result_shape;
std::vector<T> expected_result;
};
// TODO(amprocte): We should test this with more than just int32, but there is a bug in the
// handling of element type-changing that is currently blocking doing that easily.
NGRAPH_TEST(${BACKEND_NAME}, range)
{
// Create a graph for f(start,stop,step) = Range(start,stop,step).
auto start = make_shared<op::Parameter>(element::i32, Shape{});
auto stop = make_shared<op::Parameter>(element::i32, Shape{});
auto step = make_shared<op::Parameter>(element::i32, Shape{});
auto range = make_shared<op::Range>(start, stop, step);
ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1)));
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{start, stop, step});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic());
std::vector<RangeTest<int32_t>> int32_tests = {
RangeTest<int32_t>{0, 10, 1, Shape{10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
RangeTest<int32_t>{-5, 6, 3, Shape{4}, {-5, -2, 1, 4}},
RangeTest<int32_t>{10, 0, 1, Shape{0}, {}},
RangeTest<int32_t>{10, 5, -3, Shape{2}, {10, 7}}};
for (auto& test : int32_tests)
{
auto t_start = backend->create_tensor(element::i32, Shape{});
auto t_stop = backend->create_tensor(element::i32, Shape{});
auto t_step = backend->create_tensor(element::i32, Shape{});
copy_data(t_start, std::vector<int32_t>{test.start});
copy_data(t_stop, std::vector<int32_t>{test.stop});
copy_data(t_step, std::vector<int32_t>{test.step});
ex->call_with_validate({t_r}, {t_start, t_stop, t_step});
ASSERT_EQ(t_r->get_element_type(), element::i32);
ASSERT_EQ(t_r->get_shape(), test.expected_result_shape);
auto results = read_vector<int32_t>(t_r);
ASSERT_EQ(results, test.expected_result);
}
}
......@@ -690,3 +690,53 @@ NGRAPH_TEST(${BACKEND_NAME}, sum_stable_simple_double)
#endif
#endif
NGRAPH_TEST(${BACKEND_NAME}, sum_dynamic)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
auto sum = make_shared<op::Sum>(x, axes_i64);
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5},
{1, 2, 3, 4, 5}};
std::vector<Shape> expected_result_shapes{
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_axes, axeses[i]);
ex->call_with_validate({t_r}, {t_x, t_axes});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, transpose)
{
//
// Create a graph for f(x,perm) = Transpose(x,Convert<i64>(perm)). We'll do the permutation in
// i32 and cast it to i64, just for fun (and to mirror the TensorFlow test I am porting here).
//
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto perm = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto perm_i64 = make_shared<op::Convert>(perm, element::i64);
auto x_transpose = make_shared<op::Transpose>(x, perm_i64);
auto f = make_shared<Function>(NodeVector{x_transpose}, ParameterVector{x, perm});
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
std::vector<Shape> x_shapes{Shape{2, 3}, Shape{2, 3}, Shape{2, 2, 3}};
std::vector<std::vector<int32_t>> perms{{0, 1}, {1, 0}, {2, 1, 0}};
std::vector<std::vector<float>> inputs{
{1, 2, 3, 4, 5, 6}, {1, 2, 3, 4, 5, 6}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
std::vector<Shape> expected_result_shapes{Shape{2, 3}, Shape{3, 2}, {3, 2, 2}};
// Generated with numpy, so don't worry. :)
std::vector<std::vector<float>> expected_results{
{1, 2, 3, 4, 5, 6}, {1, 4, 2, 5, 3, 6}, {1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12}};
for (size_t i = 0; i < x_shapes.size(); i++)
{
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
auto t_perm = backend->create_tensor(element::i32, Shape{perms[i].size()});
copy_data(t_x, inputs[i]);
copy_data(t_perm, perms[i]);
ex->call_with_validate({t_r}, {t_x, t_perm});
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
auto results = read_vector<float>(t_r);
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
}
}
......@@ -15,4 +15,4 @@
# limitations under the License.
# ******************************************************************************
declare THIS_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python ${THIS_SCRIPT_DIR}/ref_generators/generate_convolution_ref.py ${THIS_SCRIPT_DIR}/convolution_test.in.cpp
python ${THIS_SCRIPT_DIR}/ref_generators/generate_convolution_ref.py ${THIS_SCRIPT_DIR}/backend/convolution_reference.in.cpp
......@@ -15,4 +15,4 @@
# limitations under the License.
# ******************************************************************************
declare THIS_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python ${THIS_SCRIPT_DIR}/ref_generators/generate_dyn_replace_slice_ref.py ${THIS_SCRIPT_DIR}/dyn_replace_slice_test.in.cpp
python ${THIS_SCRIPT_DIR}/ref_generators/generate_dyn_replace_slice_ref.py ${THIS_SCRIPT_DIR}/backend/dyn_replace_slice_reference.in.cpp
......@@ -15,4 +15,4 @@
# limitations under the License.
# ******************************************************************************
declare THIS_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python ${THIS_SCRIPT_DIR}/ref_generators/generate_dyn_slice_ref.py ${THIS_SCRIPT_DIR}/dyn_slice_test.in.cpp
python ${THIS_SCRIPT_DIR}/ref_generators/generate_dyn_slice_ref.py ${THIS_SCRIPT_DIR}/backend/dyn_slice_reference.in.cpp
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment