Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
da7bc635
Commit
da7bc635
authored
Jul 01, 2019
by
Leona C
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Be sure test file is latest from master branch
parent
2d6f90ab
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
41 additions
and
43 deletions
+41
-43
cpu_fusion.cpp
test/cpu_fusion.cpp
+41
-43
No files found.
test/cpu_fusion.cpp
View file @
da7bc635
...
...
@@ -30,6 +30,7 @@
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/experimental/compiled_kernel.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/quantized_concat.hpp"
#include "ngraph/op/experimental/quantized_conv.hpp"
...
...
@@ -69,7 +70,6 @@
#include "ngraph/runtime/cpu/op/dropout.hpp"
#include "ngraph/runtime/cpu/op/group_conv_bias.hpp"
#include "ngraph/runtime/cpu/op/leaky_relu.hpp"
#include "ngraph/runtime/cpu/op/loop_kernel.hpp"
#include "ngraph/runtime/cpu/op/lstm.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/runtime/cpu/op/rnn.hpp"
...
...
@@ -77,7 +77,6 @@
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/runtime/cpu/op/update_slice.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_loop_kernel_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_mat_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_post_layout_optimizations.hpp"
#include "ngraph/runtime/cpu/pass/cpu_rnn_fusion.hpp"
...
...
@@ -1416,15 +1415,15 @@ TEST(cpu_fusion, backwards_maxpool_with_indices_n4_c1_hw4_2x2_max)
#if defined(NGRAPH_HALIDE)
TEST
(
cpu_fusion
,
loop
_kernel_one_input_one_output_halide
)
TEST
(
cpu_fusion
,
compiled
_kernel_one_input_one_output_halide
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
auto
relu_a
=
make_shared
<
op
::
Relu
>
(
A
);
auto
relu_relu_a
=
make_shared
<
op
::
Relu
>
(
relu_a
);
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
Loop
Kernel
>
(
auto
ck
=
make_shared
<
op
::
Compiled
Kernel
>
(
NodeVector
{
relu_a
,
relu_relu_a
},
NodeVector
{
relu_relu_a
},
NodeVector
{
A
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
l
k
},
ParameterVector
{
A
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
c
k
},
ParameterVector
{
A
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
);
shared_ptr
<
runtime
::
Tensor
>
a
=
backend
->
create_tensor
(
element
::
f32
,
shapeA
);
...
...
@@ -1440,7 +1439,7 @@ TEST(cpu_fusion, loop_kernel_one_input_one_output_halide)
EXPECT_TRUE
(
test
::
all_close
(
read_vector
<
float
>
(
result
),
expected
));
}
TEST
(
cpu_fusion
,
loop
_kernel_two_input_two_output_halide
)
TEST
(
cpu_fusion
,
compiled
_kernel_two_input_two_output_halide
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
...
...
@@ -1448,11 +1447,11 @@ TEST(cpu_fusion, loop_kernel_two_input_two_output_halide)
auto
relu_a
=
make_shared
<
op
::
Relu
>
(
A
);
auto
add_ab
=
make_shared
<
op
::
Add
>
(
relu_a
,
B
);
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
Loop
Kernel
>
(
auto
ck
=
make_shared
<
op
::
Compiled
Kernel
>
(
NodeVector
{
relu_a
,
add_ab
},
NodeVector
{
relu_a
,
add_ab
},
NodeVector
{
A
,
B
});
auto
goe1
=
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
0
);
auto
goe2
=
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
1
);
auto
goe1
=
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
0
);
auto
goe2
=
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
1
);
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
goe1
,
goe2
},
ParameterVector
{
A
,
B
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
);
...
...
@@ -1474,7 +1473,7 @@ TEST(cpu_fusion, loop_kernel_two_input_two_output_halide)
EXPECT_TRUE
(
test
::
all_close
(
read_vector
<
float
>
(
result_relu
),
expected_relu
));
}
TEST
(
cpu_fusion
,
loop
_kernel_embedded_graph_halide
)
TEST
(
cpu_fusion
,
compiled
_kernel_embedded_graph_halide
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
...
...
@@ -1482,9 +1481,9 @@ TEST(cpu_fusion, loop_kernel_embedded_graph_halide)
auto
neg_a
=
make_shared
<
op
::
Negative
>
(
A
);
auto
neg_b
=
make_shared
<
op
::
Negative
>
(
B
);
auto
add
=
neg_a
+
neg_b
;
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
LoopKernel
>
(
NodeVector
{
add
},
NodeVector
{
add
},
NodeVector
{
neg_a
,
neg_b
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
l
k
},
ParameterVector
{
A
,
B
});
auto
ck
=
make_shared
<
op
::
CompiledKernel
>
(
NodeVector
{
add
},
NodeVector
{
add
},
NodeVector
{
neg_a
,
neg_b
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
c
k
},
ParameterVector
{
A
,
B
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
);
shared_ptr
<
runtime
::
Tensor
>
a
=
backend
->
create_tensor
(
element
::
f32
,
shapeA
);
...
...
@@ -1501,15 +1500,14 @@ TEST(cpu_fusion, loop_kernel_embedded_graph_halide)
EXPECT_TRUE
(
test
::
all_close_f
(
read_vector
<
float
>
(
result
),
expected
,
MIN_FLOAT_TOLERANCE_BITS
));
}
TEST
(
cpu_fusion
,
loop
_kernel_two_inputs_one_output_halide
)
TEST
(
cpu_fusion
,
compiled
_kernel_two_inputs_one_output_halide
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
auto
B
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
auto
add
=
A
+
B
;
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
LoopKernel
>
(
NodeVector
{
add
},
NodeVector
{
add
},
NodeVector
{
A
,
B
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
lk
},
ParameterVector
{
A
,
B
});
auto
ck
=
make_shared
<
op
::
CompiledKernel
>
(
NodeVector
{
add
},
NodeVector
{
add
},
NodeVector
{
A
,
B
});
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
ck
},
ParameterVector
{
A
,
B
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
);
shared_ptr
<
runtime
::
Tensor
>
a
=
backend
->
create_tensor
(
element
::
f32
,
shapeA
);
...
...
@@ -1528,7 +1526,7 @@ TEST(cpu_fusion, loop_kernel_two_inputs_one_output_halide)
EXPECT_TRUE
(
test
::
all_close_f
(
read_vector
<
float
>
(
result
),
expected
,
MIN_FLOAT_TOLERANCE_BITS
));
}
TEST
(
cpu_fusion
,
loop
_kernel_multiple_outputs_halide
)
TEST
(
cpu_fusion
,
compiled
_kernel_multiple_outputs_halide
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shapeA
);
...
...
@@ -1545,13 +1543,13 @@ TEST(cpu_fusion, loop_kernel_multiple_outputs_halide)
auto
add_aab
=
add_ab_abs
+
A
;
auto
add_cdd
=
add_cd_abs
+
D
;
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
Loop
Kernel
>
(
auto
ck
=
make_shared
<
op
::
Compiled
Kernel
>
(
NodeVector
{
neg_a
,
neg_b
,
add_ab
,
add_cd
,
add_cd_abs
,
add_ab_abs
,
add_aab
,
add_cdd
},
NodeVector
{
add_aab
,
add_cdd
,
neg_b
},
NodeVector
{
A
,
B
,
C
,
D
});
auto
add_aab_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
0
);
auto
add_cdd_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
1
);
auto
neg_b_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
2
);
auto
add_aab_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
0
);
auto
add_cdd_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
1
);
auto
neg_b_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
2
);
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
add_aab_goe
,
add_cdd_goe
,
neg_b_goe
},
ParameterVector
{
A
,
B
,
C
,
D
});
...
...
@@ -1586,7 +1584,7 @@ TEST(cpu_fusion, loop_kernel_multiple_outputs_halide)
EXPECT_TRUE
(
test
::
all_close_f
(
read_vector
<
float
>
(
r3
),
expected3
,
MIN_FLOAT_TOLERANCE_BITS
));
}
TEST
(
cpu_fusion
,
loop
_kernel_copy_with_new_args
)
TEST
(
cpu_fusion
,
compiled
_kernel_copy_with_new_args
)
{
Shape
shapeA
{
2
,
2
};
auto
A
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
shapeA
);
...
...
@@ -1603,13 +1601,13 @@ TEST(cpu_fusion, loop_kernel_copy_with_new_args)
auto
add_aab
=
add_ab_abs
+
A
;
auto
add_cdd
=
add_cd_abs
+
D
;
auto
lk
=
make_shared
<
runtime
::
cpu
::
op
::
Loop
Kernel
>
(
auto
ck
=
make_shared
<
op
::
Compiled
Kernel
>
(
NodeVector
{
neg_a
,
neg_b
,
add_ab
,
add_cd
,
add_cd_abs
,
add_ab_abs
,
add_aab
,
add_cdd
},
NodeVector
{
add_aab
,
add_cdd
,
neg_b
},
NodeVector
{
A
,
B
,
C
,
D
});
auto
add_aab_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
0
);
auto
add_cdd_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
1
);
auto
neg_b_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
l
k
,
2
);
auto
add_aab_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
0
);
auto
add_cdd_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
1
);
auto
neg_b_goe
=
std
::
make_shared
<
op
::
GetOutputElement
>
(
c
k
,
2
);
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
add_aab_goe
,
add_cdd_goe
,
neg_b_goe
},
ParameterVector
{
A
,
B
,
C
,
D
});
...
...
@@ -2170,7 +2168,7 @@ TEST(cpu_fusion, rnn_fprop_1_lstm_cell)
#if 0
TEST(cpu_fusion,
loop
_kernel_fusion_multiple_groups_pruned)
TEST(cpu_fusion,
compiled
_kernel_fusion_multiple_groups_pruned)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
...
...
@@ -2195,15 +2193,15 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups_pruned)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPU
Loop
KernelFusion>(3);
pass_manager.register_pass<runtime::cpu::pass::CPU
Compiled
KernelFusion>(3);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t
lkn = count_ops_of_type<runtime::cpu::op::Loop
Kernel>(cpu_f);
ASSERT_GT(
l
kn, 0);
size_t
ckn = count_ops_of_type<op::Compiled
Kernel>(cpu_f);
ASSERT_GT(
c
kn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
...
...
@@ -2219,7 +2217,7 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups_pruned)
}
}
TEST(cpu_fusion,
loop
_kernel_fusion_bounded_relu)
TEST(cpu_fusion,
compiled
_kernel_fusion_bounded_relu)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
...
...
@@ -2238,7 +2236,7 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
pass::Manager pass_manager;
pass_manager.register_pass<pass::VisualizeTree>("before_relu_fusion.png");
pass_manager.register_pass<runtime::cpu::pass::CPU
Loop
KernelFusion>(3);
pass_manager.register_pass<runtime::cpu::pass::CPU
Compiled
KernelFusion>(3);
pass_manager.register_pass<pass::VisualizeTree>("after_relu_fusion.png");
auto cpu_f = make_function();
auto int_f = make_function();
...
...
@@ -2246,8 +2244,8 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t
lkn = count_ops_of_type<runtime::cpu::op::Loop
Kernel>(cpu_f);
ASSERT_GT(
l
kn, 0);
size_t
ckn = count_ops_of_type<op::Compiled
Kernel>(cpu_f);
ASSERT_GT(
c
kn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
...
...
@@ -2263,7 +2261,7 @@ TEST(cpu_fusion, loop_kernel_fusion_bounded_relu)
}
}
TEST(cpu_fusion,
loop
_kernel_fusion_multiple_groups)
TEST(cpu_fusion,
compiled
_kernel_fusion_multiple_groups)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
...
...
@@ -2288,15 +2286,15 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPU
Loop
KernelFusion>(2);
pass_manager.register_pass<runtime::cpu::pass::CPU
Compiled
KernelFusion>(2);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t
lkn = count_ops_of_type<runtime::cpu::op::Loop
Kernel>(cpu_f);
ASSERT_GT(
l
kn, 0);
size_t
ckn = count_ops_of_type<op::Compiled
Kernel>(cpu_f);
ASSERT_GT(
c
kn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
...
...
@@ -2312,7 +2310,7 @@ TEST(cpu_fusion, loop_kernel_fusion_multiple_groups)
}
}
TEST(cpu_fusion,
loop
_kernel_fusion_one_group)
TEST(cpu_fusion,
compiled
_kernel_fusion_one_group)
{
auto make_function = []() -> std::shared_ptr<Function> {
Shape shape{};
...
...
@@ -2338,15 +2336,15 @@ TEST(cpu_fusion, loop_kernel_fusion_one_group)
};
pass::Manager pass_manager;
pass_manager.register_pass<runtime::cpu::pass::CPU
Loop
KernelFusion>(2);
pass_manager.register_pass<runtime::cpu::pass::CPU
Compiled
KernelFusion>(2);
auto cpu_f = make_function();
auto int_f = make_function();
pass_manager.run_passes(cpu_f);
test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
size_t
lkn = count_ops_of_type<runtime::cpu::op::Loop
Kernel>(cpu_f);
ASSERT_GT(
l
kn, 0);
size_t
ckn = count_ops_of_type<op::Compiled
Kernel>(cpu_f);
ASSERT_GT(
c
kn, 0);
for (shared_ptr<op::Parameter> param : cpu_f->get_parameters())
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment