Commit 5bbd199b authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[MLIR] Disable CPU fusion + degug tracer tests in MLIR (#3442)

CPU fusion is disabled in MLIR since fused ops are not supported in
nGraph dialect. CPU debug tracer test doesn't expect CompiledKernel ops
generated for MLIR.
parent 861db743
...@@ -57,7 +57,7 @@ static void open_logs(ifstream& meta, ifstream& bin, const string& trace_log, co ...@@ -57,7 +57,7 @@ static void open_logs(ifstream& meta, ifstream& bin, const string& trace_log, co
ASSERT_TRUE(bin.is_open()); ASSERT_TRUE(bin.is_open());
} }
TEST(cpu_debug_tracer, check_flow_with_external_function) TEST(cpu_debug_tracer, MLIR_DISABLE_TEST(check_flow_with_external_function))
{ {
Shape shape{2, 2}; Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape); auto A = make_shared<op::Parameter>(element::f32, shape);
......
...@@ -88,7 +88,6 @@ ...@@ -88,7 +88,6 @@
#include "util/autodiff/numeric_compare.hpp" #include "util/autodiff/numeric_compare.hpp"
#include "util/matcher.hpp" #include "util/matcher.hpp"
#include "util/random.hpp" #include "util/random.hpp"
#include "util/random.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"
using namespace ngraph; using namespace ngraph;
...@@ -621,7 +620,7 @@ static void test_batchnorm_multiply_add_relu(Shape input_shape) ...@@ -621,7 +620,7 @@ static void test_batchnorm_multiply_add_relu(Shape input_shape)
ASSERT_EQ(bn_relu, 1); ASSERT_EQ(bn_relu, 1);
} }
TEST(cpu_fusion, batchnorm_multiply_add_relu) TEST(cpu_fusion, MLIR_DISABLE_TEST(batchnorm_multiply_add_relu))
{ {
test_batchnorm_multiply_add_relu(Shape{1, 3, 2, 2}); test_batchnorm_multiply_add_relu(Shape{1, 3, 2, 2});
test_batchnorm_multiply_add_relu(Shape{1, 2, 2, 2, 2}); test_batchnorm_multiply_add_relu(Shape{1, 2, 2, 2, 2});
...@@ -2503,14 +2502,14 @@ static void check_bounded_relu(Shape param_shape, float constant_val) ...@@ -2503,14 +2502,14 @@ static void check_bounded_relu(Shape param_shape, float constant_val)
EXPECT_TRUE(test::all_close(cpu_results.at(0), int_results.at(0), 1.0e-4f, 1.0e-4f)); EXPECT_TRUE(test::all_close(cpu_results.at(0), int_results.at(0), 1.0e-4f, 1.0e-4f));
} }
TEST(cpu_fusion, fuse_bounded_relu_inter_vs_cpu) TEST(cpu_fusion, MLIR_DISABLE_TEST(fuse_bounded_relu_inter_vs_cpu))
{ {
check_bounded_relu(Shape{4, 3, 2, 2}, 6.0f); check_bounded_relu(Shape{4, 3, 2, 2}, 6.0f);
check_bounded_relu(Shape{4, 3}, 4.0f); check_bounded_relu(Shape{4, 3}, 4.0f);
check_bounded_relu(Shape{4, 3, 2}, 2.0f); check_bounded_relu(Shape{4, 3, 2}, 2.0f);
} }
TEST(cpu_fusion, fuse_dropout) TEST(cpu_fusion, MLIR_DISABLE_TEST(fuse_dropout))
{ {
auto make_function = [](Shape input_shape, auto make_function = [](Shape input_shape,
const uint32_t seed_val, const uint32_t seed_val,
...@@ -2538,7 +2537,6 @@ TEST(cpu_fusion, fuse_dropout) ...@@ -2538,7 +2537,6 @@ TEST(cpu_fusion, fuse_dropout)
auto f = make_shared<Function>(NodeVector{pdivide, gen_mask}, ParameterVector{input}); auto f = make_shared<Function>(NodeVector{pdivide, gen_mask}, ParameterVector{input});
return f; return f;
}; };
uint32_t seed = rand(); uint32_t seed = rand();
...@@ -2583,7 +2581,7 @@ TEST(cpu_fusion, fuse_dropout) ...@@ -2583,7 +2581,7 @@ TEST(cpu_fusion, fuse_dropout)
} }
} }
TEST(cpu_fusion, fuse_leaky_relu) TEST(cpu_fusion, MLIR_DISABLE_TEST(fuse_leaky_relu))
{ {
auto make_function = [](Shape input_shape, vector<float> alpha_val) { auto make_function = [](Shape input_shape, vector<float> alpha_val) {
auto input = std::make_shared<op::Parameter>(element::f32, input_shape); auto input = std::make_shared<op::Parameter>(element::f32, input_shape);
...@@ -2854,7 +2852,6 @@ static std::shared_ptr<Function> ...@@ -2854,7 +2852,6 @@ static std::shared_ptr<Function>
auto bias = std::make_shared<op::Parameter>(element::f32, Shape{400}); auto bias = std::make_shared<op::Parameter>(element::f32, Shape{400});
ParameterVector params{W, bias}; ParameterVector params{W, bias};
auto create_graph = [&]() -> std::shared_ptr<Node> { auto create_graph = [&]() -> std::shared_ptr<Node> {
auto data_param = (data_is_4d) auto data_param = (data_is_4d)
? std::make_shared<op::Parameter>(element::f32, Shape{2, 5, 1, 50}) ? std::make_shared<op::Parameter>(element::f32, Shape{2, 5, 1, 50})
: std::make_shared<op::Parameter>(element::f32, Shape{10, 1, 50}); : std::make_shared<op::Parameter>(element::f32, Shape{10, 1, 50});
...@@ -2867,7 +2864,6 @@ static std::shared_ptr<Function> ...@@ -2867,7 +2864,6 @@ static std::shared_ptr<Function>
auto bias_broadcast = make_shared<op::Broadcast>(bias, dot->get_shape(), AxisSet{0}); auto bias_broadcast = make_shared<op::Broadcast>(bias, dot->get_shape(), AxisSet{0});
auto add_bias = std::make_shared<op::Add>(dot, bias_broadcast); auto add_bias = std::make_shared<op::Add>(dot, bias_broadcast);
return move(add_bias); return move(add_bias);
}; };
NodeVector graph_nodes; NodeVector graph_nodes;
......
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
#include "ngraph/runtime/tensor.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp" #include "ngraph/serializer.hpp"
#ifdef NGRAPH_MLIR_ENABLE
#define MLIR_DISABLE_TEST(name) DISABLED_##name
#else
#define MLIR_DISABLE_TEST(name) name
#endif
namespace ngraph namespace ngraph
{ {
class Node; class Node;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment