Commit da7bc635 authored by Leona C's avatar Leona C

Be sure test file is latest from master branch

parent 2d6f90ab
......@@ -30,6 +30,7 @@
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/quantized_concat.hpp"
#include "ngraph/op/experimental/quantized_conv.hpp"
......@@ -69,7 +70,6 @@
#include "ngraph/runtime/cpu/op/dropout.hpp"
#include "ngraph/runtime/cpu/op/group_conv_bias.hpp"
#include "ngraph/runtime/cpu/op/leaky_relu.hpp"
#include "ngraph/runtime/cpu/op/loop_kernel.hpp"
#include "ngraph/runtime/cpu/op/lstm.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/runtime/cpu/op/rnn.hpp"
......@@ -77,7 +77,6 @@
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/runtime/cpu/op/update_slice.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_loop_kernel_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp"
#include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp"
......@@ -1416,15 +1415,15 @@ TEST(cpu_fusion, backwards_maxpool_with_indices_n4_c1_hw4_2x2_max)
#if defined(NGRAPH_HALIDE)
TEST(cpu_fusion, loop_kernel_one_input_one_output_halide)
TEST(cpu_fusion, compiled_kernel_one_input_one_output_halide)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
auto relu_a = make_shared<op::Relu>(A);
auto relu_relu_a = make_shared<op::Relu>(relu_a);
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
auto ck = make_shared<op::CompiledKernel>(
NodeVector{relu_a, relu_relu_a}, NodeVector{relu_relu_a}, NodeVector{A});
auto f = make_shared<Function>(NodeVector{lk}, ParameterVector{A});
auto f = make_shared<Function>(NodeVector{ck}, ParameterVector{A});
auto backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shapeA);
......@@ -1440,7 +1439,7 @@ TEST(cpu_fusion, loop_kernel_one_input_one_output_halide)
EXPECT_TRUE(test::all_close(read_vector<float>(result), expected));
}
TEST(cpu_fusion, loop_kernel_two_input_two_output_halide)
TEST(cpu_fusion, compiled_kernel_two_input_two_output_halide)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
......@@ -1448,11 +1447,11 @@ TEST(cpu_fusion, loop_kernel_two_input_two_output_halide)
auto relu_a = make_shared<op::Relu>(A);
auto add_ab = make_shared<op::Add>(relu_a, B);
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
auto ck = make_shared<op::CompiledKernel>(
NodeVector{relu_a, add_ab}, NodeVector{relu_a, add_ab}, NodeVector{A, B});
auto goe1 = make_shared<op::GetOutputElement>(lk, 0);
auto goe2 = make_shared<op::GetOutputElement>(lk, 1);
auto goe1 = make_shared<op::GetOutputElement>(ck, 0);
auto goe2 = make_shared<op::GetOutputElement>(ck, 1);
auto f = make_shared<Function>(NodeVector{goe1, goe2}, ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU");
......@@ -1474,7 +1473,7 @@ TEST(cpu_fusion, loop_kernel_two_input_two_output_halide)
EXPECT_TRUE(test::all_close(read_vector<float>(result_relu), expected_relu));
}
TEST(cpu_fusion, loop_kernel_embedded_graph_halide)
TEST(cpu_fusion, compiled_kernel_embedded_graph_halide)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
......@@ -1482,9 +1481,9 @@ TEST(cpu_fusion, loop_kernel_embedded_graph_halide)
auto neg_a = make_shared<op::Negative>(A);
auto neg_b = make_shared<op::Negative>(B);
auto add = neg_a + neg_b;
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
NodeVector{add}, NodeVector{add}, NodeVector{neg_a, neg_b});
auto f = make_shared<Function>(NodeVector{lk}, ParameterVector{A, B});
auto ck =
make_shared<op::CompiledKernel>(NodeVector{add}, NodeVector{add}, NodeVector{neg_a, neg_b});
auto f = make_shared<Function>(NodeVector{ck}, ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shapeA);
......@@ -1501,15 +1500,14 @@ TEST(cpu_fusion, loop_kernel_embedded_graph_halide)
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
}
TEST(cpu_fusion, loop_kernel_two_inputs_one_output_halide)
TEST(cpu_fusion, compiled_kernel_two_inputs_one_output_halide)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
auto B = make_shared<op::Parameter>(element::f32, shapeA);
auto add = A + B;
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
NodeVector{add}, NodeVector{add}, NodeVector{A, B});
auto f = make_shared<Function>(NodeVector{lk}, ParameterVector{A, B});
auto ck = make_shared<op::CompiledKernel>(NodeVector{add}, NodeVector{add}, NodeVector{A, B});
auto f = make_shared<Function>(NodeVector{ck}, ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shapeA);
......@@ -1528,7 +1526,7 @@ TEST(cpu_fusion, loop_kernel_two_inputs_one_output_halide)
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
}
TEST(cpu_fusion, loop_kernel_multiple_outputs_halide)
TEST(cpu_fusion, compiled_kernel_multiple_outputs_halide)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shapeA);
......@@ -1545,13 +1543,13 @@ TEST(cpu_fusion, loop_kernel_multiple_outputs_halide)
auto add_aab = add_ab_abs + A;
auto add_cdd = add_cd_abs + D;
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
auto ck = make_shared<op::CompiledKernel>(
NodeVector{neg_a, neg_b, add_ab, add_cd, add_cd_abs, add_ab_abs, add_aab, add_cdd},
NodeVector{add_aab, add_cdd, neg_b},
NodeVector{A, B, C, D});
auto add_aab_goe = std::make_shared<op::GetOutputElement>(lk, 0);
auto add_cdd_goe = std::make_shared<op::GetOutputElement>(lk, 1);
auto neg_b_goe = std::make_shared<op::GetOutputElement>(lk, 2);
auto add_aab_goe = std::make_shared<op::GetOutputElement>(ck, 0);
auto add_cdd_goe = std::make_shared<op::GetOutputElement>(ck, 1);
auto neg_b_goe = std::make_shared<op::GetOutputElement>(ck, 2);
auto f = make_shared<Function>(NodeVector{add_aab_goe, add_cdd_goe, neg_b_goe},
ParameterVector{A, B, C, D});
......@@ -1586,7 +1584,7 @@ TEST(cpu_fusion, loop_kernel_multiple_outputs_halide)
EXPECT_TRUE(test::all_close_f(read_vector<float>(r3), expected3, MIN_FLOAT_TOLERANCE_BITS));
}
TEST(cpu_fusion, loop_kernel_copy_with_new_args)
TEST(cpu_fusion, compiled_kernel_copy_with_new_args)
{
Shape shapeA{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shapeA);
......@@ -1603,13 +1601,13 @@ TEST(cpu_fusion, loop_kernel_copy_with_new_args)
auto add_aab = add_ab_abs + A;
auto add_cdd = add_cd_abs + D;
auto lk = make_shared<runtime::cpu::op::LoopKernel>(
auto ck = make_shared<op::CompiledKernel>(
NodeVector{neg_a, neg_b, add_ab, add_cd, add_cd_abs, add_ab_abs, add_aab, add_cdd},
NodeVector{add_aab, add_cdd, neg_b},
NodeVector{A, B, C, D});
auto add_aab_goe = std::make_shared<op::GetOutputElement>(lk, 0);
auto add_cdd_goe = std::make_shared<op::GetOutputElement>(lk, 1);
auto neg_b_goe = std::make_shared<op::GetOutputElement>(lk, 2);
auto add_aab_goe = std::make_shared<op::GetOutputElement>(ck, 0);
auto add_cdd_goe = std::make_shared<op::GetOutputElement>(ck, 1);
auto neg_b_goe = std::make_shared<op::GetOutputElement>(ck, 2);
auto f = make_shared<Function>(NodeVector{add_aab_goe, add_cdd_goe, neg_b_goe},
ParameterVector{A, B, C, D});
......@@ -2170,7 +2168,7 @@ TEST(cpu_fusion, rnn_fprop_1_lstm_cell)
#if 0
TEST(cpu_fusion, loop_kernel_fusion_multiple_groups_pruned)
TEST(cpu_fusion, compiled_kernel_fusion_multiple_groups_pruned)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
......@@ -2195,15 +2193,15 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups_pruned)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPULoopKernelFusion>(3);
pass_manager.register_pass<runtime::cpu::pass::CPUCompiledKernelFusion>(3);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t lkn = count_ops_of_type<runtime::cpu::op::LoopKernel>(cpu_f);
ASSERT_GT(lkn, 0);
size_t ckn = count_ops_of_type<op::CompiledKernel>(cpu_f);
ASSERT_GT(ckn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
......@@ -2219,7 +2217,7 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups_pruned)
}
}
TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
TEST(cpu_fusion, compiled_kernel_fusion_bounded_relu)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
......@@ -2238,7 +2236,7 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
pass::Manager pass_manager;
pass_manager.register_pass<pass::VisualizeTree>("before_relu_fusion.png");
pass_manager.register_pass<runtime::cpu::pass::CPULoopKernelFusion>(3);
pass_manager.register_pass<runtime::cpu::pass::CPUCompiledKernelFusion>(3);
pass_manager.register_pass<pass::VisualizeTree>("after_relu_fusion.png");
auto cpu_f = make_function();
auto int_f = make_function();
......@@ -2246,8 +2244,8 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t lkn = count_ops_of_type<runtime::cpu::op::LoopKernel>(cpu_f);
ASSERT_GT(lkn, 0);
size_t ckn = count_ops_of_type<op::CompiledKernel>(cpu_f);
ASSERT_GT(ckn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
......@@ -2263,7 +2261,7 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
}
}
TEST(cpu_fusion, loop_kernel_fusion_multiple_groups)
TEST(cpu_fusion, compiled_kernel_fusion_multiple_groups)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
......@@ -2288,15 +2286,15 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPULoopKernelFusion>(2);
pass_manager.register_pass<runtime::cpu::pass::CPUCompiledKernelFusion>(2);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t lkn = count_ops_of_type<runtime::cpu::op::LoopKernel>(cpu_f);
ASSERT_GT(lkn, 0);
size_t ckn = count_ops_of_type<op::CompiledKernel>(cpu_f);
ASSERT_GT(ckn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
......@@ -2312,7 +2310,7 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups)
}
}
TEST(cpu_fusion, loop_kernel_fusion_one_group)
TEST(cpu_fusion, compiled_kernel_fusion_one_group)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
......@@ -2338,15 +2336,15 @@ TEST(cpu_fusion, loop_kernel_fusion_one_group)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPULoopKernelFusion>(2);
pass_manager.register_pass<runtime::cpu::pass::CPUCompiledKernelFusion>(2);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t lkn = count_ops_of_type<runtime::cpu::op::LoopKernel>(cpu_f);
ASSERT_GT(lkn, 0);
size_t ckn = count_ops_of_type<op::CompiledKernel>(cpu_f);
ASSERT_GT(ckn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment