diff --git a/iree_tests/configs/config_onnx_cpu_llvm_sync.json b/iree_tests/configs/config_onnx_cpu_llvm_sync.json index 3fd1ff69f..537472a69 100644 --- a/iree_tests/configs/config_onnx_cpu_llvm_sync.json +++ b/iree_tests/configs/config_onnx_cpu_llvm_sync.json @@ -12,8 +12,6 @@ ], "skip_run_tests": [], "expected_compile_failures": [ - "test_acosh", - "test_acosh_example", "test_adagrad", "test_adagrad_multiple", "test_adam", @@ -31,10 +29,6 @@ "test_ai_onnx_ml_label_encoder_string_int", "test_ai_onnx_ml_label_encoder_string_int_no_default", "test_ai_onnx_ml_label_encoder_tensor_value_only_mapping", - "test_asinh", - "test_asinh_example", - "test_atanh", - "test_atanh_example", "test_averagepool_2d_dilations", "test_averagepool_2d_pads", "test_averagepool_2d_precomputed_pads", @@ -127,8 +121,6 @@ "test_convtranspose_autopad_same", "test_convtranspose_kernel_shape", "test_convtranspose_output_shape", - "test_cosh", - "test_cosh_example", "test_cumsum_1d", "test_cumsum_1d_exclusive", "test_cumsum_1d_reverse", @@ -141,6 +133,7 @@ "test_dequantizelinear_axis", "test_dequantizelinear_blocked", "test_dequantizelinear_e4m3fn", + "test_dequantizelinear_e4m3fn_float16", "test_dequantizelinear_e4m3fn_zero_point", "test_dequantizelinear_e5m2", "test_dequantizelinear_int16", @@ -161,15 +154,10 @@ "test_equal_string", "test_equal_string_broadcast", "test_gathernd_example_int32_batch_dim1", - "test_globalmaxpool", - "test_globalmaxpool_precomputed", "test_gridsample_bicubic", "test_gridsample_bicubic_align_corners_0_additional_1", "test_gridsample_bicubic_align_corners_1_additional_1", "test_gridsample_border_padding", - "test_gridsample_nearest", - "test_gridsample_nearest_align_corners_0_additional_1", - "test_gridsample_nearest_align_corners_1_additional_1", "test_gridsample_reflection_padding", "test_gridsample_volumetric_bilinear_align_corners_0", "test_gridsample_volumetric_bilinear_align_corners_1", @@ -215,18 +203,15 @@ "test_lstm_defaults", "test_lstm_with_initial_bias", "test_lstm_with_peepholes", - "test_max_one_input", "test_maxpool_1d_default", "test_maxpool_2d_precomputed_same_upper", "test_maxpool_2d_same_lower", "test_maxpool_2d_same_upper", "test_maxpool_2d_uint8", - "test_maxpool_with_argmax_2d_precomputed_pads", "test_maxpool_with_argmax_2d_precomputed_strides", "test_maxunpool_export_with_output_shape", "test_maxunpool_export_without_output_shape", "test_melweightmatrix", - "test_min_one_input", "test_mod_mixed_sign_float16", "test_mod_mixed_sign_float32", "test_mod_mixed_sign_float64", @@ -262,10 +247,8 @@ "test_nonmaxsuppression_two_batches", "test_nonmaxsuppression_two_classes", "test_nonzero_example", - "test_onehot_negative_indices", "test_onehot_with_axis", "test_onehot_with_negative_axis", - "test_onehot_without_axis", "test_optional_get_element_tensor", "test_optional_has_element_empty_no_input_name_optional_input", "test_optional_has_element_empty_no_input_name_tensor_input", @@ -318,9 +301,7 @@ "test_reduce_log_sum_desc_axes_expanded", "test_reduce_log_sum_empty_set", "test_reduce_log_sum_empty_set_expanded", - "test_reduce_log_sum_exp_default_axes_keepdims_example", "test_reduce_log_sum_exp_default_axes_keepdims_example_expanded", - "test_reduce_log_sum_exp_default_axes_keepdims_random", "test_reduce_log_sum_exp_default_axes_keepdims_random_expanded", "test_reduce_log_sum_exp_do_not_keepdims_example", "test_reduce_log_sum_exp_do_not_keepdims_example_expanded", @@ -393,15 +374,11 @@ "test_resize_downsample_scales_cubic_A_n0p5_exclude_outside", "test_resize_downsample_scales_cubic_align_corners", "test_resize_downsample_scales_cubic_antialias", - "test_resize_downsample_scales_linear", - "test_resize_downsample_scales_linear_align_corners", "test_resize_downsample_scales_linear_antialias", - "test_resize_downsample_scales_linear_half_pixel_symmetric", "test_resize_downsample_scales_nearest", "test_resize_downsample_sizes_cubic", "test_resize_downsample_sizes_cubic_antialias", "test_resize_downsample_sizes_linear_antialias", - "test_resize_downsample_sizes_linear_pytorch_half_pixel", "test_resize_downsample_sizes_nearest", "test_resize_downsample_sizes_nearest_not_larger", "test_resize_downsample_sizes_nearest_not_smaller", @@ -412,9 +389,6 @@ "test_resize_upsample_scales_cubic_A_n0p5_exclude_outside", "test_resize_upsample_scales_cubic_align_corners", "test_resize_upsample_scales_cubic_asymmetric", - "test_resize_upsample_scales_linear", - "test_resize_upsample_scales_linear_align_corners", - "test_resize_upsample_scales_linear_half_pixel_symmetric", "test_resize_upsample_scales_nearest", "test_resize_upsample_scales_nearest_axes_2_3", "test_resize_upsample_scales_nearest_axes_3_2", @@ -423,7 +397,6 @@ "test_resize_upsample_sizes_nearest_axes_2_3", "test_resize_upsample_sizes_nearest_axes_3_2", "test_resize_upsample_sizes_nearest_ceil_half_pixel", - "test_resize_upsample_sizes_nearest_floor_align_corners", "test_resize_upsample_sizes_nearest_not_larger", "test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", "test_reversesequence_batch", @@ -507,8 +480,6 @@ "test_simple_rnn_batchwise", "test_simple_rnn_defaults", "test_simple_rnn_with_initial_bias", - "test_sinh", - "test_sinh_example", "test_slice", "test_slice_default_steps", "test_slice_end_out_of_bounds", @@ -563,15 +534,6 @@ "test_training_dropout_mask", "test_training_dropout_zero_ratio", "test_training_dropout_zero_ratio_mask", - "test_triu", - "test_triu_neg", - "test_triu_one_row", - "test_triu_out_neg_out", - "test_triu_out_pos", - "test_triu_pos", - "test_triu_square", - "test_triu_square_neg", - "test_triu_zero", "test_unique_not_sorted_without_axis", "test_unique_sorted_with_axis", "test_unique_sorted_with_axis_3d", @@ -615,11 +577,15 @@ "test_elu_default", "test_eyelike_with_dtype", "test_gather_elements_negative_indices", + "test_gridsample_nearest", + "test_gridsample_nearest_align_corners_0_additional_1", + "test_gridsample_nearest_align_corners_1_additional_1", "test_hardsigmoid", "test_hardsigmoid_default", "test_hardsigmoid_example", "test_hardswish_expanded", "test_max_float64", + "test_maxpool_2d_ceil_output_size_reduce_by_one", "test_min_float64", "test_mod_mixed_sign_int16", "test_mod_mixed_sign_int32", @@ -638,8 +604,16 @@ "test_qlinearmatmul_3D_int8_float32", "test_qlinearmatmul_3D_uint8_float16", "test_qlinearmatmul_3D_uint8_float32", + "test_reduce_log_sum_exp_default_axes_keepdims_example", + "test_reduce_log_sum_exp_default_axes_keepdims_random", "test_reduce_min_empty_set", "test_reduce_sum_empty_set_non_reduced_axis_zero", + "test_resize_downsample_scales_linear", + "test_resize_downsample_scales_linear_half_pixel_symmetric", + "test_resize_downsample_sizes_linear_pytorch_half_pixel", + "test_resize_upsample_scales_linear", + "test_resize_upsample_scales_linear_half_pixel_symmetric", + "test_resize_upsample_sizes_nearest_floor_align_corners", "test_scatter_elements_with_negative_indices", "test_sce_mean_no_weight_ii", "test_sce_mean_no_weight_ii_log_prob", @@ -652,6 +626,7 @@ "test_size_example", "test_split_zero_size_splits_opset13", "test_split_zero_size_splits_opset18", - "test_tril_zero" + "test_tril_zero", + "test_triu_zero" ] } diff --git a/iree_tests/configs/config_pytorch_models_cpu_llvm_task.json b/iree_tests/configs/config_pytorch_models_cpu_llvm_task.json index acc861820..c0f21b08a 100644 --- a/iree_tests/configs/config_pytorch_models_cpu_llvm_task.json +++ b/iree_tests/configs/config_pytorch_models_cpu_llvm_task.json @@ -15,5 +15,7 @@ "opt-125M", // TODO(#17344): need to regenerate .mlirbc "resnet50", // TODO(#17344): need to regenerate .mlirbc ], - "expected_run_failures": [] + "expected_run_failures": [ + "sdxl-vae-decode-tank", + ] } diff --git a/iree_tests/configs/config_sdxl_scheduled_unet_cpu_llvm_task.json b/iree_tests/configs/config_sdxl_scheduled_unet_cpu_llvm_task.json index 31ffd338a..ec69c48ce 100644 --- a/iree_tests/configs/config_sdxl_scheduled_unet_cpu_llvm_task.json +++ b/iree_tests/configs/config_sdxl_scheduled_unet_cpu_llvm_task.json @@ -18,5 +18,7 @@ "skip_compile_tests": [], "skip_run_tests": [], "expected_compile_failures": [], - "expected_run_failures": [] + "expected_run_failures": [ + "sdxl-scheduled-unet-3-tank", + ] } diff --git a/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_0.npy b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_0.npy new file mode 100644 index 000000000..5b5fb425b Binary files /dev/null and b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_0.npy differ diff --git a/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_1.npy b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_1.npy new file mode 100644 index 000000000..b2f097179 Binary files /dev/null and b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/input_1.npy differ diff --git a/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/model.mlir b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/model.mlir new file mode 100644 index 000000000..b374e7e54 --- /dev/null +++ b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/model.mlir @@ -0,0 +1,8 @@ +module { + func.func @test_dequantizelinear_e4m3fn_float16(%arg0: !torch.vtensor<[5],f8E4M3FN>, %arg1: !torch.vtensor<[],f16>) -> !torch.vtensor<[5],f16> attributes {torch.onnx_meta.ir_version = 10 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} { + %none = torch.constant.none + %0 = torch.operator "onnx.DequantizeLinear"(%arg0, %arg1) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[5],f8E4M3FN>, !torch.vtensor<[],f16>) -> !torch.vtensor<[5],f16> + return %0 : !torch.vtensor<[5],f16> + } +} + diff --git a/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/output_0.npy b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/output_0.npy new file mode 100644 index 000000000..3b4489613 Binary files /dev/null and b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/output_0.npy differ diff --git a/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/test_data_flags.txt b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/test_data_flags.txt new file mode 100644 index 000000000..37bc3f8a2 --- /dev/null +++ b/iree_tests/onnx/node/generated/test_dequantizelinear_e4m3fn_float16/test_data_flags.txt @@ -0,0 +1,3 @@ +--input=5xf32=@input_0.bin +--input=xf16=@input_1.bin +--expected_output=5xf16=@output_0.bin diff --git a/iree_tests/onnx/node/generated/test_group_normalization_epsilon_expanded/model.mlir b/iree_tests/onnx/node/generated/test_group_normalization_epsilon_expanded/model.mlir index 2e788b874..5d68211f9 100644 --- a/iree_tests/onnx/node/generated/test_group_normalization_epsilon_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_group_normalization_epsilon_expanded/model.mlir @@ -24,17 +24,17 @@ module { %20 = torch.operator "onnx.Sqrt"(%19) : (!torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,1],f32> %21 = torch.operator "onnx.Sub"(%12, %14) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,?],f32> %22 = torch.operator "onnx.Div"(%21, %20) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,?],f32> - %23 = torch.operator "onnx.Reshape"(%22, %3) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %24 = torch.operator "onnx.Reshape"(%23, %11) : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %25 = torch.operator "onnx.Cast"(%24) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> + %23 = torch.operator "onnx.Reshape"(%22, %3) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> + %24 = torch.operator "onnx.Reshape"(%23, %11) : (!torch.vtensor<[3,4,2,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,4,4],f32> + %25 = torch.operator "onnx.Cast"(%24) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[3,4,4],f32>) -> !torch.vtensor<[3,4,4],f32> %26 = torch.operator "onnx.Constant"() {torch.onnx.value_ints = [1 : si64, -1 : si64, 1 : si64]} : () -> !torch.vtensor<[3],si64> %27 = torch.operator "onnx.Cast"(%arg1) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[4],f32>) -> !torch.vtensor<[4],f32> %28 = torch.operator "onnx.Cast"(%arg2) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[4],f32>) -> !torch.vtensor<[4],f32> %29 = torch.operator "onnx.Reshape"(%27, %26) : (!torch.vtensor<[4],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,4,1],f32> %30 = torch.operator "onnx.Reshape"(%28, %26) : (!torch.vtensor<[4],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,4,1],f32> - %31 = torch.operator "onnx.Mul"(%29, %25) : (!torch.vtensor<[1,4,1],f32>, !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,4,?],f32> - %32 = torch.operator "onnx.Add"(%31, %30) : (!torch.vtensor<[?,4,?],f32>, !torch.vtensor<[1,4,1],f32>) -> !torch.vtensor<[?,4,?],f32> - %33 = torch.operator "onnx.Reshape"(%32, %3) : (!torch.vtensor<[?,4,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> + %31 = torch.operator "onnx.Mul"(%29, %25) : (!torch.vtensor<[1,4,1],f32>, !torch.vtensor<[3,4,4],f32>) -> !torch.vtensor<[3,4,4],f32> + %32 = torch.operator "onnx.Add"(%31, %30) : (!torch.vtensor<[3,4,4],f32>, !torch.vtensor<[1,4,1],f32>) -> !torch.vtensor<[3,4,4],f32> + %33 = torch.operator "onnx.Reshape"(%32, %3) : (!torch.vtensor<[3,4,4],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> return %33 : !torch.vtensor<[3,4,2,2],f32> } } diff --git a/iree_tests/onnx/node/generated/test_group_normalization_example_expanded/model.mlir b/iree_tests/onnx/node/generated/test_group_normalization_example_expanded/model.mlir index 1718ecab6..e560e6162 100644 --- a/iree_tests/onnx/node/generated/test_group_normalization_example_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_group_normalization_example_expanded/model.mlir @@ -24,17 +24,17 @@ module { %20 = torch.operator "onnx.Sqrt"(%19) : (!torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,1],f32> %21 = torch.operator "onnx.Sub"(%12, %14) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,?],f32> %22 = torch.operator "onnx.Div"(%21, %20) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,1],f32>) -> !torch.vtensor<[?,?,?],f32> - %23 = torch.operator "onnx.Reshape"(%22, %3) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %24 = torch.operator "onnx.Reshape"(%23, %11) : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %25 = torch.operator "onnx.Cast"(%24) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> + %23 = torch.operator "onnx.Reshape"(%22, %3) : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> + %24 = torch.operator "onnx.Reshape"(%23, %11) : (!torch.vtensor<[3,4,2,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,4,4],f32> + %25 = torch.operator "onnx.Cast"(%24) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[3,4,4],f32>) -> !torch.vtensor<[3,4,4],f32> %26 = torch.operator "onnx.Constant"() {torch.onnx.value_ints = [1 : si64, -1 : si64, 1 : si64]} : () -> !torch.vtensor<[3],si64> %27 = torch.operator "onnx.Cast"(%arg1) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[4],f32>) -> !torch.vtensor<[4],f32> %28 = torch.operator "onnx.Cast"(%arg2) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[4],f32>) -> !torch.vtensor<[4],f32> %29 = torch.operator "onnx.Reshape"(%27, %26) : (!torch.vtensor<[4],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,4,1],f32> %30 = torch.operator "onnx.Reshape"(%28, %26) : (!torch.vtensor<[4],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,4,1],f32> - %31 = torch.operator "onnx.Mul"(%29, %25) : (!torch.vtensor<[1,4,1],f32>, !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,4,?],f32> - %32 = torch.operator "onnx.Add"(%31, %30) : (!torch.vtensor<[?,4,?],f32>, !torch.vtensor<[1,4,1],f32>) -> !torch.vtensor<[?,4,?],f32> - %33 = torch.operator "onnx.Reshape"(%32, %3) : (!torch.vtensor<[?,4,?],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> + %31 = torch.operator "onnx.Mul"(%29, %25) : (!torch.vtensor<[1,4,1],f32>, !torch.vtensor<[3,4,4],f32>) -> !torch.vtensor<[3,4,4],f32> + %32 = torch.operator "onnx.Add"(%31, %30) : (!torch.vtensor<[3,4,4],f32>, !torch.vtensor<[1,4,1],f32>) -> !torch.vtensor<[3,4,4],f32> + %33 = torch.operator "onnx.Reshape"(%32, %3) : (!torch.vtensor<[3,4,4],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,4,2,2],f32> return %33 : !torch.vtensor<[3,4,2,2],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded/model.mlir index d4d09357b..d6647792d 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[2],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[1,12],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,12],f32>) -> !torch.vtensor<[1,12],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[1,12],f32>) -> !torch.vtensor<[1,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[1,12],f32>, !torch.vtensor<[1,12],f32>) -> !torch.vtensor<[1,12],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[1,12],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,4],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[1,1],f32> return %26, %28, %29 : !torch.vtensor<[3,4],f32>, !torch.vtensor<[1,1],f32>, !torch.vtensor<[1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded_ver18/model.mlir index 5123153e9..c841817b3 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis0_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[2],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[1,12],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,12],f32>) -> !torch.vtensor<[1,12],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[1,12],f32>, !torch.vtensor<[1,12],f32>) -> !torch.vtensor<[1,12],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[1,12],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,4],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[1,1],f32> return %27, %29, %30 : !torch.vtensor<[3,4],f32>, !torch.vtensor<[1,1],f32>, !torch.vtensor<[1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded/model.mlir index 76134be00..85a4bd7a3 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,4],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,4],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[3,4],f32>, !torch.vtensor<[1,4],f32>) -> !torch.vtensor<[3,4],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[3,4],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,4],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[3,1],f32>) -> !torch.vtensor<[3,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[3,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[3,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,1],f32> return %26, %28, %29 : !torch.vtensor<[3,4],f32>, !torch.vtensor<[3,1],f32>, !torch.vtensor<[3,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded_ver18/model.mlir index 2503c2c5e..593f397ab 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_2d_axis1_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,4],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,4],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[3,4],f32>, !torch.vtensor<[1,4],f32>) -> !torch.vtensor<[3,4],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[3,4],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,4],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[3,1],f32>) -> !torch.vtensor<[3,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[3,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[3,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[3,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,1],f32> return %27, %29, %30 : !torch.vtensor<[3,4],f32>, !torch.vtensor<[3,1],f32>, !torch.vtensor<[3,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded/model.mlir index f2316cd6d..16d3f9928 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[1,30],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,30],f32>) -> !torch.vtensor<[1,30],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[1,30],f32>) -> !torch.vtensor<[1,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[1,30],f32>, !torch.vtensor<[1,30],f32>) -> !torch.vtensor<[1,30],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[1,30],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,1,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[1,1,1],f32>, !torch.vtensor<[1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded_ver18/model.mlir index fc8c92eed..42d897c36 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis0_epsilon_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[1,30],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,30],f32>) -> !torch.vtensor<[1,30],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[1,30],f32>, !torch.vtensor<[1,30],f32>) -> !torch.vtensor<[1,30],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[1,30],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[1,1,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[1,1,1],f32>, !torch.vtensor<[1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded/model.mlir index 0513dd123..9fcf2c79c 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[2,15],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[2,15],f32>) -> !torch.vtensor<[2,15],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[2,15],f32>) -> !torch.vtensor<[2,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[2,15],f32>, !torch.vtensor<[1,15],f32>) -> !torch.vtensor<[2,15],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[2,15],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[2,1],f32>) -> !torch.vtensor<[2,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,1,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[2,1,1],f32>, !torch.vtensor<[2,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded_ver18/model.mlir index 35afd9b74..2e598232d 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis1_epsilon_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[2,15],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[2,15],f32>) -> !torch.vtensor<[2,15],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[2,15],f32>, !torch.vtensor<[1,15],f32>) -> !torch.vtensor<[2,15],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[2,15],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[2,1],f32>) -> !torch.vtensor<[2,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,1,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[2,1,1],f32>, !torch.vtensor<[2,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded/model.mlir index 96ea9a1c9..6c74fb6d2 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[6,5],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[6,5],f32>) -> !torch.vtensor<[6,5],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[6,5],f32>) -> !torch.vtensor<[6,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[6,5],f32>, !torch.vtensor<[1,5],f32>) -> !torch.vtensor<[6,5],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[6,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[6,1],f32>) -> !torch.vtensor<[6,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[2,3,1],f32>, !torch.vtensor<[2,3,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded_ver18/model.mlir index c17e7ea2d..fe5fdb7ed 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_3d_axis2_epsilon_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[2,3,5],f32>) -> !torch.vtensor<[6,5],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[6,5],f32>) -> !torch.vtensor<[6,5],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[6,5],f32>, !torch.vtensor<[1,5],f32>) -> !torch.vtensor<[6,5],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[6,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[6,1],f32>) -> !torch.vtensor<[6,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[2,3,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,5],f32>, !torch.vtensor<[2,3,1],f32>, !torch.vtensor<[2,3,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded/model.mlir index b0b775369..b826aa53f 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[4],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[1,120],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,120],f32>) -> !torch.vtensor<[1,120],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[1,120],f32>) -> !torch.vtensor<[1,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[1,120],f32>, !torch.vtensor<[1,120],f32>) -> !torch.vtensor<[1,120],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[1,120],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[1,1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[1,1,1,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[1,1,1,1],f32>, !torch.vtensor<[1,1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded_ver18/model.mlir index 77fca035f..e8384112d 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis0_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[0],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[4],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[0],si64>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[1,120],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[1,120],f32>) -> !torch.vtensor<[1,120],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[1,120],f32>, !torch.vtensor<[1,120],f32>) -> !torch.vtensor<[1,120],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[1,120],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[1,1],f32>) -> !torch.vtensor<[1,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[1,1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[1,1,1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[1,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[1,1,1,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[1,1,1,1],f32>, !torch.vtensor<[1,1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded/model.mlir index 45c5d829c..f83c9ecb3 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[2,60],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[2,60],f32>) -> !torch.vtensor<[2,60],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[2,60],f32>) -> !torch.vtensor<[2,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[2,60],f32>, !torch.vtensor<[1,60],f32>) -> !torch.vtensor<[2,60],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[2,60],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[2,1],f32>) -> !torch.vtensor<[2,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,1,1,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,1,1,1],f32>, !torch.vtensor<[2,1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded_ver18/model.mlir index d459454b1..8853825be 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis1_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[1],si64>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[2,60],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[2,60],f32>) -> !torch.vtensor<[2,60],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[2,60],f32>, !torch.vtensor<[1,60],f32>) -> !torch.vtensor<[2,60],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[2,60],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[2,1],f32>) -> !torch.vtensor<[2,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,1,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,1,1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[2,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,1,1,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,1,1,1],f32>, !torch.vtensor<[2,1,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded/model.mlir index 87e1865fc..3994c6d7c 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[6,20],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[6,20],f32>) -> !torch.vtensor<[6,20],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[6,20],f32>) -> !torch.vtensor<[6,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[6,20],f32>, !torch.vtensor<[1,20],f32>) -> !torch.vtensor<[6,20],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[6,20],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[6,1],f32>) -> !torch.vtensor<[6,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,1,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,3,1,1],f32>, !torch.vtensor<[2,3,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded_ver18/model.mlir index 3bea71a68..9cf0ab084 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis2_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[2],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[6,20],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[6,20],f32>) -> !torch.vtensor<[6,20],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[6,20],f32>, !torch.vtensor<[1,20],f32>) -> !torch.vtensor<[6,20],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[6,20],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[6,1],f32>) -> !torch.vtensor<[6,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,1,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,1,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[6,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,1,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,3,1,1],f32>, !torch.vtensor<[2,3,1,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded/model.mlir index b7a1c04d6..b1319d340 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 3 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[24,5],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[24,5],f32>) -> !torch.vtensor<[24,5],f32> %12 = torch.operator "onnx.ReduceMean"(%11) {torch.onnx.axes = [1 : si64]} : (!torch.vtensor<[24,5],f32>) -> !torch.vtensor<[24,1],f32> @@ -29,8 +29,8 @@ module { %25 = torch.operator "onnx.Add"(%23, %24) : (!torch.vtensor<[24,5],f32>, !torch.vtensor<[1,5],f32>) -> !torch.vtensor<[24,5],f32> %26 = torch.operator "onnx.Reshape"(%25, %2) : (!torch.vtensor<[24,5],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %27 = torch.operator "onnx.Reciprocal"(%18) : (!torch.vtensor<[24,1],f32>) -> !torch.vtensor<[24,1],f32> - %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,4,1],f32> - %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,4,1],f32> + %28 = torch.operator "onnx.Reshape"(%12, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,1],f32> + %29 = torch.operator "onnx.Reshape"(%27, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,1],f32> return %26, %28, %29 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,3,4,1],f32>, !torch.vtensor<[2,3,4,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded_ver18/model.mlir b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded_ver18/model.mlir index 00a481537..59365c826 100644 --- a/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded_ver18/model.mlir +++ b/iree_tests/onnx/node/generated/test_layer_normalization_4d_axis3_expanded_ver18/model.mlir @@ -9,8 +9,8 @@ module { %5 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> %6 = torch.operator "onnx.Slice"(%2, %4, %5) : (!torch.vtensor<[4],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[3],si64> %7 = torch.operator "onnx.Sub"(%3, %5) : (!torch.vtensor<[],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> - %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64> - %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64> + %8 = torch.operator "onnx.ConstantOfShape"(%7) {torch.onnx.value = dense<1> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64> + %9 = torch.operator "onnx.Concat"(%6, %8) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[4],si64> %10 = torch.operator "onnx.Flatten"(%arg0) {torch.onnx.axis = 3 : si64} : (!torch.vtensor<[2,3,4,5],f32>) -> !torch.vtensor<[24,5],f32> %11 = torch.operator "onnx.Cast"(%10) {torch.onnx.to = 1 : si64} : (!torch.vtensor<[24,5],f32>) -> !torch.vtensor<[24,5],f32> %12 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> @@ -30,8 +30,8 @@ module { %26 = torch.operator "onnx.Add"(%24, %25) : (!torch.vtensor<[24,5],f32>, !torch.vtensor<[1,5],f32>) -> !torch.vtensor<[24,5],f32> %27 = torch.operator "onnx.Reshape"(%26, %2) : (!torch.vtensor<[24,5],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,5],f32> %28 = torch.operator "onnx.Reciprocal"(%19) : (!torch.vtensor<[24,1],f32>) -> !torch.vtensor<[24,1],f32> - %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,4,1],f32> - %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[2,3,4,1],f32> + %29 = torch.operator "onnx.Reshape"(%13, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,1],f32> + %30 = torch.operator "onnx.Reshape"(%28, %9) : (!torch.vtensor<[24,1],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[2,3,4,1],f32> return %27, %29, %30 : !torch.vtensor<[2,3,4,5],f32>, !torch.vtensor<[2,3,4,1],f32>, !torch.vtensor<[2,3,4,1],f32> } } diff --git a/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/input_0.npy b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/input_0.npy new file mode 100644 index 000000000..12db14385 Binary files /dev/null and b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/input_0.npy differ diff --git a/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/model.mlir b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/model.mlir new file mode 100644 index 000000000..5409e9d45 --- /dev/null +++ b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/model.mlir @@ -0,0 +1,8 @@ +module { + func.func @test_maxpool_2d_ceil_output_size_reduce_by_one(%arg0: !torch.vtensor<[1,1,2,2],f32>) -> !torch.vtensor<[1,1,1,1],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} { + %none = torch.constant.none + %0 = torch.operator "onnx.MaxPool"(%arg0) {torch.onnx.ceil_mode = 1 : si64, torch.onnx.kernel_shape = [1 : si64, 1 : si64], torch.onnx.strides = [2 : si64, 2 : si64]} : (!torch.vtensor<[1,1,2,2],f32>) -> !torch.vtensor<[1,1,1,1],f32> + return %0 : !torch.vtensor<[1,1,1,1],f32> + } +} + diff --git a/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/output_0.npy b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/output_0.npy new file mode 100644 index 000000000..f57858a92 Binary files /dev/null and b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/output_0.npy differ diff --git a/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_flags.txt b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_flags.txt new file mode 100644 index 000000000..1eca09b71 --- /dev/null +++ b/iree_tests/onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_flags.txt @@ -0,0 +1,2 @@ +--input=1x1x2x2xf32=@input_0.bin +--expected_output=1x1x1x1xf32=@output_0.bin diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_expanded/model.mlir index 4b36e75ab..7de38a6f2 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,6,5],f32>) -> !torch.vtensor<[3,6,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,6,5],f32>) -> !torch.vtensor<[3,5,6],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = -1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,6],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,6],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = -1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3,6],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded/model.mlir index ee90b011a..705ab4393 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,6,5],f32>) -> !torch.vtensor<[3,6,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,6,5],f32>) -> !torch.vtensor<[3,5,6],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[3,5,6],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = -1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,6],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,6],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,6],f32>) -> !torch.vtensor<[3,5,6],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = -1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,6],f32>, !torch.vtensor<[3,6],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,6],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded/model.mlir index c823f6073..bc31e5154 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,180,5],f32>) -> !torch.vtensor<[3,180,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,180,5],f32>) -> !torch.vtensor<[3,5,180],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5],f32>) -> !torch.vtensor<[5],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,180],f32>, !torch.vtensor<[5],si64>) -> !torch.vtensor<[?,?,?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = -5 : si64, torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5],si64>) -> !torch.vtensor<[3,6,6,5],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,180],f32>, !torch.vtensor<[5],si64>) -> !torch.vtensor<[3,5,6,6,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = -5 : si64, torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5,6,6,5],f32>, !torch.vtensor<[3,6,6,5],si64>) -> !torch.vtensor<[3,6,6,5],f32> return %7 : !torch.vtensor<[3,6,6,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded/model.mlir index d1ee8d7a1..a3050746b 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,180,5],f32>) -> !torch.vtensor<[3,180,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,180,5],f32>) -> !torch.vtensor<[3,5,180],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5],f32>) -> !torch.vtensor<[5],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,180],f32>, !torch.vtensor<[5],si64>) -> !torch.vtensor<[?,?,?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?,?,?],f32>) -> !torch.vtensor<[3,5,6,6,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = -5 : si64, torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5],si64>) -> !torch.vtensor<[3,6,6,5],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,180],f32>, !torch.vtensor<[5],si64>) -> !torch.vtensor<[3,5,6,6,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,6,6,5],f32>) -> !torch.vtensor<[3,5,6,6,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = -5 : si64, torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5,6,6,5],f32>, !torch.vtensor<[3,6,6,5],si64>) -> !torch.vtensor<[3,6,6,5],f32> return %8, %7 : !torch.vtensor<[3,6,6,5],f32>, !torch.vtensor<[3,5,6,6,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_expanded/model.mlir index 49e3e596e..23202545f 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 10 : si64, torch.onnx.reduction = "sum"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 10 : si64, torch.onnx.reduction = "sum"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded/model.mlir index 3f4051e5c..11e7d39ef 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 10 : si64, torch.onnx.reduction = "sum"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 10 : si64, torch.onnx.reduction = "sum"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_expanded/model.mlir index 2fbe9d1b9..39f49710b 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,2160,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,5,2160],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[7],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[?,?,?,?,?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,6,6,5,3,4],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded/model.mlir index 416d8a00a..bdaa3abc3 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,2160,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,5,2160],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[7],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[?,?,?,?,?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?,?,?,?,?],f32>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,6,6,5,3,4],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,6,6,5,3,4],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_expanded/model.mlir index b0901eb9a..749e6347d 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,2160,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,5,2160],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[7],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[?,?,?,?,?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?,?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>) -> !torch.vtensor<[3,6,6,5,3,4],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5,6,6,5,3,4],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>) -> !torch.vtensor<[3,6,6,5,3,4],f32> return %7 : !torch.vtensor<[3,6,6,5,3,4],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded/model.mlir index fd899d1d2..e87e26b32 100644 --- a/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,2160,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2160,5],f32>) -> !torch.vtensor<[3,5,2160],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[7],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[?,?,?,?,?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?,?,?,?,?],f32>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?,?,?,?,?,?],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>) -> !torch.vtensor<[3,6,6,5,3,4],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2160],f32>, !torch.vtensor<[7],si64>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,6,6,5,3,4],f32>) -> !torch.vtensor<[3,5,6,6,5,3,4],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5,6,6,5,3,4],f32>, !torch.vtensor<[3,6,6,5,3,4],si64>) -> !torch.vtensor<[3,6,6,5,3,4],f32> return %8, %7 : !torch.vtensor<[3,6,6,5,3,4],f32>, !torch.vtensor<[3,5,6,6,5,3,4],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_3d_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_3d_expanded/model.mlir index 628ba1c0a..a05668fee 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_3d_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_3d_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_3d_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_3d_log_prob_expanded/model.mlir index af4251bf8..ae68b7f9a 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_3d_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_3d_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[3,5,2],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3,5,2],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,2],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_expanded/model.mlir index a83cc8f85..93567a507 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_log_prob_expanded/model.mlir index a6c244317..f0ffcdc75 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_expanded/model.mlir index c386761f1..406a38976 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_log_prob_expanded/model.mlir index 389d7ade9..29d90b6a1 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_3d_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[3,5,2],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3,5,2],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,2],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_expanded/model.mlir index 41b3609aa..401c7849b 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,14,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,5,14],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[4],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3,2,7],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,5,2,7],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2,7],f32>, !torch.vtensor<[3,2,7],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_log_prob_expanded/model.mlir index 36777c352..64540aaf8 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_4d_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,14,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,5,14],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[4],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[3,5,2,7],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3,2,7],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,5,2,7],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[3,5,2,7],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2,7],f32>, !torch.vtensor<[3,2,7],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,2,7],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_expanded/model.mlir index 3941b9348..f85040dd9 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_log_prob_expanded/model.mlir index 92deb6ae7..5964d3c35 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_no_weight_ii_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_expanded/model.mlir index a6a170e94..811c29cf6 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_expanded/model.mlir index 60d507596..3245b8d47 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_log_prob_expanded/model.mlir index ad3ca3dab..6c6a3dfbe 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_3d_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,2,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,2,5],f32>) -> !torch.vtensor<[3,5,2],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[3,5,2],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[3,2],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3,5,2],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,2],f32>) -> !torch.vtensor<[3,5,2],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 1 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2],f32>, !torch.vtensor<[3,2],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,2],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_expanded/model.mlir index a90d45982..277fe9a35 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,14,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,5,14],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[4],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3,2,7],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,5,2,7],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2,7],f32>, !torch.vtensor<[3,2,7],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_log_prob_expanded/model.mlir index 88fb63cc3..2943fd2f1 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_4d_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,14,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,14,5],f32>) -> !torch.vtensor<[3,5,14],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[4],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[3,5,2,7],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[3,2,7],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,14],f32>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[3,5,2,7],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5,2,7],f32>) -> !torch.vtensor<[3,5,2,7],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 2 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5,2,7],f32>, !torch.vtensor<[3,2,7],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5,2,7],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_expanded/model.mlir index 5730c72d9..e04187d92 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 0 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 0 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_log_prob_expanded/model.mlir index e5fd9b2e1..f7e9a2196 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_ii_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 0 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.ignore_index = 0 : si64, torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_mean_weight_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_mean_weight_log_prob_expanded/model.mlir index 08d6b4638..8aab8b1ac 100644 --- a/iree_tests/onnx/node/generated/test_sce_mean_weight_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_mean_weight_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "mean"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_none_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_none_expanded/model.mlir index 238f367ad..cc866c2ed 100644 --- a/iree_tests/onnx/node/generated/test_sce_none_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_none_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],f32> return %7 : !torch.vtensor<[3],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_none_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_none_log_prob_expanded/model.mlir index 8ea5da4ed..570a7d08f 100644 --- a/iree_tests/onnx/node/generated/test_sce_none_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_none_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[3],f32> return %8, %7 : !torch.vtensor<[3],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_none_weights_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_none_weights_expanded/model.mlir index ce900ac8c..7692278f7 100644 --- a/iree_tests/onnx/node/generated/test_sce_none_weights_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_none_weights_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[3],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[3],f32> return %7 : !torch.vtensor<[3],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_none_weights_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_none_weights_log_prob_expanded/model.mlir index 8e726d747..3ebb3d0ea 100644 --- a/iree_tests/onnx/node/generated/test_sce_none_weights_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_none_weights_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "none"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[3],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1, %arg2) {torch.onnx.reduction = "none"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>, !torch.vtensor<[5],f32>) -> !torch.vtensor<[3],f32> return %8, %7 : !torch.vtensor<[3],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_sum_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_sum_expanded/model.mlir index 7604ba4e0..b9a1cacda 100644 --- a/iree_tests/onnx/node/generated/test_sce_sum_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_sum_expanded/model.mlir @@ -7,8 +7,8 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "sum"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "sum"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %7 : !torch.vtensor<[],f32> } } diff --git a/iree_tests/onnx/node/generated/test_sce_sum_log_prob_expanded/model.mlir b/iree_tests/onnx/node/generated/test_sce_sum_log_prob_expanded/model.mlir index 23c547841..586cfacde 100644 --- a/iree_tests/onnx/node/generated/test_sce_sum_log_prob_expanded/model.mlir +++ b/iree_tests/onnx/node/generated/test_sce_sum_log_prob_expanded/model.mlir @@ -7,9 +7,9 @@ module { %3 = torch.operator "onnx.LogSoftmax"(%2) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,1,5],f32> %4 = torch.operator "onnx.Transpose"(%3) {torch.onnx.perm = [0 : si64, 2 : si64, 1 : si64]} : (!torch.vtensor<[3,1,5],f32>) -> !torch.vtensor<[3,5,1],f32> %5 = torch.operator "onnx.Shape"(%arg0) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[2],si64> - %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?],f32> - %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[3,5],f32> - %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "sum"} : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> + %6 = torch.operator "onnx.Reshape"(%4, %5) : (!torch.vtensor<[3,5,1],f32>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[3,5],f32> + %7 = torch.operator "onnx.Identity"(%6) : (!torch.vtensor<[3,5],f32>) -> !torch.vtensor<[3,5],f32> + %8 = torch.operator "onnx.NegativeLogLikelihoodLoss"(%6, %arg1) {torch.onnx.reduction = "sum"} : (!torch.vtensor<[3,5],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[],f32> return %8, %7 : !torch.vtensor<[],f32>, !torch.vtensor<[3,5],f32> } } diff --git a/iree_tests/onnx/node/import_failures.txt b/iree_tests/onnx/node/import_failures.txt index ddd750b5d..c1599c770 100644 --- a/iree_tests/onnx/node/import_failures.txt +++ b/iree_tests/onnx/node/import_failures.txt @@ -11,7 +11,6 @@ test_cast_INT4_to_INT8 test_cast_UINT4_to_FLOAT test_cast_UINT4_to_FLOAT16 test_cast_UINT4_to_UINT8 -test_dequantizelinear_e4m3fn_float16 test_dequantizelinear_int4 test_dequantizelinear_uint4 test_identity_opt @@ -20,7 +19,6 @@ test_if_opt test_if_seq test_loop13_seq test_loop16_seq_none -test_maxpool_2d_ceil_output_size_reduce_by_one test_optional_get_element_optional_sequence test_optional_get_element_optional_tensor test_optional_get_element_sequence diff --git a/iree_tests/onnx/node/import_successes.txt b/iree_tests/onnx/node/import_successes.txt index 4b6a8ab5f..d1bee9e2c 100644 --- a/iree_tests/onnx/node/import_successes.txt +++ b/iree_tests/onnx/node/import_successes.txt @@ -301,6 +301,7 @@ test_dequantizelinear test_dequantizelinear_axis test_dequantizelinear_blocked test_dequantizelinear_e4m3fn +test_dequantizelinear_e4m3fn_float16 test_dequantizelinear_e4m3fn_zero_point test_dequantizelinear_e5m2 test_dequantizelinear_int16 @@ -598,6 +599,7 @@ test_max_uint64 test_max_uint8 test_maxpool_1d_default test_maxpool_2d_ceil +test_maxpool_2d_ceil_output_size_reduce_by_one test_maxpool_2d_default test_maxpool_2d_dilations test_maxpool_2d_pads