Commit 4954b8d5 authored by Denise Kutnick's avatar Denise Kutnick Committed by Scott Cyphers

Clean up and triage unit tests (#3546)

* clean up and triage unit tests

* add back in onnx and closeness fails

* Update unit_test.manifest
parent f6425cbd
...@@ -39,88 +39,79 @@ topk_2d_min_one # No plans to implement TopK ...@@ -39,88 +39,79 @@ topk_2d_min_one # No plans to implement TopK
topk_int64 # No plans to implement TopK topk_int64 # No plans to implement TopK
topk_5d_max_partial # No plans to implement TopK topk_5d_max_partial # No plans to implement TopK
topk_1d_i32_max_all # No plans to implement TopK topk_1d_i32_max_all # No plans to implement TopK
topk_2d_max_one_with_equal_values # No plans to implement TopK
model_top_k # No plans to implement TopK
# unsupported op: `Erf`
erf
gelu_f32
gelu_f64
gelu_backprop_factor_f32
gelu_backprop_factor_f64
backwards_gelu_f32
backwards_gelu_f64
model_erf
model_erf_int32
# Tests that PlaidML might be able to run at some point. # unsupported ops: `BroadcastDistributed`
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3 broadcastdistributed
backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1
backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2 # unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'EmbeddingLookup'
backwards_maxpool_n4_c1_hw4_2x2_max model_quant_conv_linear
backwards_maxpool_n2_c1_hw5_3x3_str2_max model_conv_integer_no_zero_point
backwards_slice model_matmul_integer_no_zero_point
batchnorm_fprop_bprop # To debug model_matmul_integer_4d_no_zero_point
batchnorm_fprop_bprop_2step # To debug model_qlinear_matmul
batch_norm_inference_0eps_f64 model_qlinear_matmul_3d
batch_norm_inference_f64 model_matmul_integer
batch_norm_training_0eps_f64 model_matmul_integer_zero_point_zero
softmax_axis_3d_double # To debug model_matmul_integer_scalar
replace_slice_matrix_inplace model_matmul_integer_4d
max_pool_2d_1channel_1image_overpadded model_matmul_integer_4d_zero_point
max_pool_3d model_hardmax
maxpool_bprop_larger_than_cache quantized_convolution
generate_mask quantized_conv_int32_output
generate_mask2 quantized_dot_u8u8
avg_pool_3d quantized_dot_int32_output
avg_pool_3d_uneven_strided_padded_include_in_computation
quantize_clamp_int32 # Requires fp64 inputs, which won't work on GPUs
numeric_float_nan
numeric_double_nan
shape_of_scalar
shape_of_vector
shape_of_matrix
shape_of_5d
sum_stable_acc_double # To debug: precision errors
embedding_lookup_4x5_reverse embedding_lookup_4x5_reverse
embedding_lookup_10x1_arbitrary embedding_lookup_10x1_arbitrary
embedding_lookup_10x1_arbitrary_index_type_int embedding_lookup_10x1_arbitrary_index_type_int
embedding_lookup_10x1_arbitrary_index_type_int64 embedding_lookup_10x1_arbitrary_index_type_int64
floor_int32
gather_nd_scalar_from_2d
gather_nd_1d_from_2d
gather_nd_scalar_from_3d
gather_nd_1d_from_3d
gather_nd_2d_from_3d
gather_nd_batch_scalar_from_2d
gather_nd_batch_1d_from_2d
gather_nd_batch_scalar_from_3d
gather_nd_batch_1d_from_3d
gather_nd_batch_2d_from_3d
gather_nd_single_indices
gather_4d_indices_no_axis_uint8
gather_scalar_indices_axis_1_2d_input
gather_1d_indices_axis_2_4d_input
gather_2d_indices_axis_1_2d_input
gather_scalar_indices_no_axis_2d_input
gather_1d_indices_no_axis_1d_input
gather_2d_indices_no_axis_2d_input
gather_3d_indices_no_axis_2d_input
gather_4d_indices_no_axis_2d_input
scatter_add_4d_indices
scatter_add_3d_indices
scatter_add_2d_indices
scatter_add_1d_indices
scatter_add_scalar_indices
scatter_nd_add_batch_2d_to_3d
scatter_nd_add_2d_to_3d
# To be triaged -- bad kernels, numerical accuracy, edge conditions, # unsupported op: `ReverseSequence`
# unimplemented functionality, &c model_lstm_bdir_short_input_seq
cos model_lstm_mixed_seq_reverse
erf model_reverse_sequence_0_batch_1
sin model_reverse_sequence_1_batch_0
tan
not # result mismatch
abc_int64 model_dequantize_linear_scalar_zero_scale_int8
concat_matrix_int64 model_softmax
select_double avg_pool_3d_uneven_strided_padded
rnn_cell_activation_function
gru_cell_bias_clip
gru_cell_linear_before_reset
softmax_axis_3d
relu_2Dfprop_i32
avg_pool_uint8
avg_pool_int8
one_hot_vector_many_categories
conv_bias_1d
conv_bias_2d
conv_bias_3d
conv_bias_add_2d
normalize_across_hw_4d
divide_python_rounding_int32
convert_int32_bool convert_int32_bool
convert_float32_bool convert_float32_bool
tensor_constant_int64 batch_norm_inference_0eps_f64
constant_equality_bool batch_norm_inference_f64
numeric_float_inf batch_norm_training_0eps_f64
numeric_double_inf batch_norm_inference_parameters_duplication
computation_reuse batch_norm_fprop_b1c2h2w2
pad_negative_exterior_1d_check_limits batch_norm_fprop_b2c2h2w1
batch_norm_fprop_b2c2d2h1w1
batch_norm_fprop_inference_b2c2h2w1
pad_edge_1d pad_edge_1d
pad_edge_1d_top_neg pad_edge_1d_top_neg
pad_edge_1d_top_neg_bigger_than_tensor pad_edge_1d_top_neg_bigger_than_tensor
...@@ -136,29 +127,84 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor ...@@ -136,29 +127,84 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect pad_reflect_1d_multi_reflect
pad_reflect_2d pad_reflect_2d
pad_reflect_2d_with_neg pad_reflect_2d_with_neg
pad_negative_exterior_2d
pad_negative_exterior_2d_all_negative
pad_negative_exterior_4d
pad_symmetric pad_symmetric
max_trivial_int8
max_trivial_5d_int32 # No double precision FP support in PlaidML
sum_trivial_in_double
sum_stable_acc_double
sum_stable_simple_double
softmax_axis_3d_double
select_double
numeric_double_nan
numeric_double_inf
max_3d_to_scalar_double max_3d_to_scalar_double
softmax_axis_3d argmin_trivial_in_double
logical_and
logical_or # unsupported op: `ShapeOf`
batch_norm_inference_parameters_duplication shape_of_vector
batch_norm_fprop_b1c2h2w2 shape_of_matrix
batch_norm_fprop_b2c2h2w1 shape_of_5d
batch_norm_fprop_b2c2d2h1w1
batch_norm_fprop_inference_b2c2h2w1 # unsupported ops: `ScatterAdd` `ScatterNDAdd`
scatter_add_3d_indices
scatter_add_2d_indices
scatter_add_1d_indices
scatter_add_scalar_indices
scatter_nd_add_batch_2d_to_3d
scatter_nd_add_2d_to_3d
# c++ runtime exception
replace_slice_matrix_inplace
quantize_clamp_int32
pad_negative_exterior_1d_check_limits
backwards_slice
backwards_softmax_3d
argmin_3D_i32
argmin_3D_i64
argmax_3D_i32
argmax_3D_i64
argmax_3D_axis_0 argmax_3D_axis_0
argmax_3D_axis_1 argmax_3D_axis_1
argmax_3D_axis_2 argmax_3D_axis_2
argmin_trivial_in_double argmin_4D_i64
topk_2d_max_one_with_equal_values argmax_4D_i64
sum_trivial_in_double
sum_stable_simple_double # PlaidML doesn't support over-padded MaxPool
one_hot_vector_many_categories max_pool_3d
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
# c++ compilation failure
max_trivial_int8
max_trivial_5d_int32
floor_int32
any_trivial
any_2x2x3_eliminate_dim_0
# unsupported op: `GenerateMask`
generate_mask
generate_mask2
# unsupported op: `Gather`
gather_4d_indices_no_axis_uint8
gather_4d_indices_no_axis_2d_input
gather_3d_indices_no_axis_2d_input
gather_2d_indices_no_axis_2d_input
gather_1d_indices_no_axis_1d_input
gather_scalar_indices_no_axis_2d_input
gather_2d_indices_axis_1_2d_input
gather_1d_indices_axis_2_4d_input
gather_scalar_indices_axis_1_2d_input
gather_nd_single_indices
gather_nd_scalar_from_2d
gather_nd_1d_from_2d
gather_nd_scalar_from_3d
gather_nd_1d_from_3d
gather_nd_2d_from_3d
gather_nd_batch_scalar_from_2d
gather_nd_batch_1d_from_2d
gather_nd_batch_scalar_from_3d
gather_nd_batch_1d_from_3d
gather_nd_batch_2d_from_3d
gather_no_axis_int8 gather_no_axis_int8
gather_no_axis_int16 gather_no_axis_int16
gather_no_axis_int32 gather_no_axis_int32
...@@ -168,93 +214,24 @@ gather_no_axis_uint16 ...@@ -168,93 +214,24 @@ gather_no_axis_uint16
gather_no_axis_uint32 gather_no_axis_uint32
gather_no_axis_uint64 gather_no_axis_uint64
gather_no_axis_bool gather_no_axis_bool
elu
elu_negative_alpha
prelu
hardsigmoid
prelu_shared_slope
prelu_negative_slope
relu_2Dfprop_i32
conv_bias_1d
conv_bias_2d
conv_bias_3d
conv_bias_bprop_2d
conv_bias_add_2d
space_to_depth
depth_to_space
normalize_across_chw_4d
normalize_across_chw_4d_max_bias
normalize_across_chw_3d
normalize_across_chw_2d
normalize_across_hw_4d
gemm
fused_clamp
mvn_mean_normalization
mvn_mean_normalization_split_channels
mvn_mean_variance_normalization
mvn_mean_variance_normalization_split_channels
grn_4d
grn_2d_with_bias
scale_shift_no_broadcast
scale_shift
shuffle_channels_simple
shuffle_channels_negative_axis
shuffle_channels_float
squeeze
squeeze_default_axes
squared_difference
squared_difference_broadcast
fake_quantize
fake_quantize_with_clip
fake_quantize_with_clip_across_channels
dot_0_0
dot_2x0_0
equal
notequal
greater
greater_int64
greatereq
less
lesseq
lesseq_int32
lesseq_bool
broadcast_vector_rowwise_int64
minimum_int64
maximum_int64
auto_bcast_binary_elementwise
any_trivial
any_2x2x3_eliminate_dim_0
backwards_acos
backwards_asin
backwards_atan
backwards_softmax_all
backwards_softmax_axis
backwards_softmax_underflow
backwards_softmax_3d
batch_mat_mul_forward
dot_matrix_2x0_0x2
max_pool_uint8
max_pool_int8
avg_pool_uint8
avg_pool_int8
# Need erf # unsupported op: `BatchMatMul`
gelu_f32 batch_mat_mul_forward
gelu_f64 backwards_batchmatmul_tensor2_tensor2
gelu_backprop_factor_f32
gelu_backprop_factor_f64
backwards_gelu_f32
backwards_gelu_f64
# From onnx tests # onnx tests
model_quant_conv_linear_2d model_quant_conv_linear_2d
model_quant_conv_linear_3d model_quant_conv_linear_3d
model_conv_integer model_conv_integer
model_conv_integer_zero_point_zero model_conv_integer_zero_point_zero
model_conv_integer_pads model_conv_integer_pads
model_lstm_fwd_hardsigmoid_activation
model_lstm_fwd_with_clip
model_lstm_fwd_mixed_seq
model_lstm_fwd_large_batch_no_clip model_lstm_fwd_large_batch_no_clip
model_global_lp_pool_p3 model_global_lp_pool_p3
model_argmin_no_keepdims model_argmin_no_keepdims
model_reduce_log_sum_exp
model_elu model_elu
model_selu model_selu
model_sigmoid model_sigmoid
...@@ -264,81 +241,18 @@ model_argmin_int32 ...@@ -264,81 +241,18 @@ model_argmin_int32
model_lp_norm_default model_lp_norm_default
model_instance_normalization model_instance_normalization
# failings on plaidml_nGPU # passing locally, fails closeness checks in CI which may be too strict
argmin_4D_i64 elu
argmax_4D_i64 elu_negative_alpha
# dgkutnic ww24.5: these tests are to be triaged by the PlaidML team
# ww25.2: re-scrubbed this list of tests after fixing check_inputs
# initial debug points to some of these failing due to precision issues
sqrt
batch_norm_inference_0eps_f32
batch_norm_inference_f32
batch_norm_training_0eps_f32
argmin_trivial
argmax_trivial
argmin_trivial_in_i32
argmin_3D_i32
argmin_3D_i64
argmax_3D_i32
argmax_3D_i64
sum_large_1d_to_scalar
sum_stable_acc
one_hot_scalar_2_in_3
one_hot_scalar_1_in_3
one_hot_scalar_0_in_3
lstm_cell_no_bias_no_peepholes lstm_cell_no_bias_no_peepholes
lstm_cell_bias_peepholes lstm_cell_bias_peepholes
lstm_cell_bias_peepholes_clip_input_forget lstm_cell_bias_peepholes_clip_input_forget
lstm_cell_activaction_functions lstm_cell_activaction_functions
divide_python_rounding_int32 dot_0_0
backwards_batchmatmul_tensor2_tensor2 dot_matrix_2x0_0x2
dot_2x0_0
# unsupported ops: `BroadcastDistributed` auto_bcast_binary_elementwise
broadcastdistributed max_pool_2d_1channel_1image_overpadded
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
model_quant_conv_linear
model_conv_integer_no_zero_point
model_matmul_integer_no_zero_point
model_matmul_integer_4d_no_zero_point
model_qlinear_matmul
model_qlinear_matmul_3d
model_matmul_integer
model_matmul_integer_zero_point_zero
model_matmul_integer_scalar
model_matmul_integer_4d
model_matmul_integer_4d_zero_point
model_top_k
model_erf
model_erf_int32
model_hardmax
quantized_convolution
quantized_conv_int32_output
quantized_dot_u8u8
quantized_dot_int32_output
# unsupported op: `ReverseSequence`
model_lstm_bdir_short_input_seq
model_lstm_mixed_seq_reverse
model_reverse_sequence_0_batch_1
model_reverse_sequence_1_batch_0
# node validation error: "Argument shapes are inconsistent."
model_lstm_fwd_with_clip
model_lstm_fwd_mixed_seq
model_lstm_fwd_hardsigmoid_activation
model_reduce_log_sum
model_reduce_log_sum_exp
model_reduce_mean
# result mismatch
model_dequantize_linear_scalar_zero_scale_int8
model_softmax
avg_pool_3d_uneven_strided_padded
rnn_cell_activation_function
gru_cell_bias_clip
gru_cell_linear_before_reset
# axes input param not supported # axes input param not supported
lrn_across_h lrn_across_h
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment