Commit a99d29a0 authored by Denise Kutnick's avatar Denise Kutnick Committed by Scott Cyphers

Modify check_inputs for unit tests with PlaidML backend (#3095)

* make check_inputs check for input_count >= expected_input_count

* scrub unit test manifest after check_inputs change
parent 563af715
...@@ -76,11 +76,11 @@ namespace ngraph ...@@ -76,11 +76,11 @@ namespace ngraph
// input count. // input count.
void check_inputs(std::size_t expected_input_count) const void check_inputs(std::size_t expected_input_count) const
{ {
if (op().get_input_size() != expected_input_count) if (op().get_input_size() < expected_input_count)
{ {
std::ostringstream os; std::ostringstream os;
os << "The PlaidML nGraph backend only supports " << op().description() os << "The PlaidML nGraph backend only supports " << op().description()
<< " operations with an input count == " << expected_input_count << " operations with an input count >= " << expected_input_count
<< " (got " << op().get_input_size() << " inputs)"; << " (got " << op().get_input_size() << " inputs)";
throw std::runtime_error{os.str()}; throw std::runtime_error{os.str()};
} }
......
...@@ -261,92 +261,25 @@ batch_mat_mul_forward ...@@ -261,92 +261,25 @@ batch_mat_mul_forward
dot_matrix_2x0_0x2 dot_matrix_2x0_0x2
# dgkutnic ww24.5: these tests are to be triaged by the PlaidML team # dgkutnic ww24.5: these tests are to be triaged by the PlaidML team
# ww25.2: re-scrubbed this list of tests after fixing check_inputs
convolution_3d_1item_large_5o3i_padded_uneven_filter_uneven_data_dilation_data_dilated # initial debug points to some of these failing due to precision issues
select sqrt
product_trivial batch_norm_inference_0eps_f32
product_trivial_5d batch_norm_inference_f32
product_to_scalar batch_norm_training_0eps_f32
product_matrix_columns argmin_trivial
product_matrix_rows argmax_trivial
product_3d_to_matrix_most_sig argmin_trivial_in_i32
product_3d_to_matrix_least_sig
product_3d_to_vector
product_3d_to_scalar
product_2d_to_scalar_int32
product_to_scalar_int32
product_to_scalar_int8
max_trivial
max_trivial_5d
max_to_scalar
max_to_scalar_int8
max_matrix_columns
max_matrix_rows
max_matrix_rows_int32
max_3d_to_matrix_most_sig
max_3d_to_matrix_least_sig
max_3d_to_vector
max_3d_to_scalar
max_3d_to_scalar_int32
min_trivial
min_trivial_5d
min_trivial_5d_int32
min_to_scalar
min_to_scalar_int8
min_matrix_columns
min_matrix_rows
min_matrix_rows_int32
min_3d_to_matrix_most_sig
min_3d_to_matrix_least_sig
min_3d_to_vector
min_3d_to_scalar
min_3d_to_scalar_int32
sum_to_scalar
sum_large_1d_to_scalar sum_large_1d_to_scalar
sum_matrix_columns
sum_matrix_6d
sum_matrix_rows
sum_3d_to_matrix_most_sig
sum_3d_to_matrix_least_sig
sum_3d_to_vector
sum_3d_to_scalar
sum_3d_to_scalar_int32
sum_5d_to_scalar
sum_5d_to_scalar_int32
sum_2d_to_scalar_int8
sum_stable_acc sum_stable_acc
sum_stable_simple_float one_hot_scalar_2_in_3
one_hot_scalar_1_in_3
one_hot_scalar_0_in_3
lstm_cell_no_bias_no_peepholes
lstm_cell_bias_peepholes
lstm_cell_bias_peepholes_clip_input_forget
lstm_cell_activaction_functions
group_conv_transpose
group_conv_transpose_output_shape
divide_python_rounding_int32 divide_python_rounding_int32
any_2x2_to_scalar_true
any_2x2_to_scalar_false
any_2x3_eliminate_col_dim
any_2x3_eliminate_row_dim
any_2x2x3_eliminate_dim_1
any_2x2x3_eliminate_dim_2
any_2x2x3_eliminate_dims_0_1
any_2x2x3_eliminate_dims_0_2
any_2x2x3_eliminate_dims_1_2
any_2x2x3_eliminate_dims_0_1_2
all_trivial
all_2x2_to_scalar_false
all_2x2_to_scalar_true
all_2x3_eliminate_col_dim
all_2x3_eliminate_row_dim
all_2x2x3_eliminate_dim_0
all_2x2x3_eliminate_dim_1
all_2x2x3_eliminate_dim_2
all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
all_dynamic_axis
all_change_axis
backwards_broadcast0
backwards_broadcast1
backwards_select
backwards_select_nested
backwards_sum_v2s
backwards_sum_m2s
backwards_sum_m2v_0
backwards_sum_m2v_1
backwards_batchmatmul_tensor2_tensor2 backwards_batchmatmul_tensor2_tensor2
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment