From e95550f508b850c5144b7d1a63bad5680fe83f6d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 1 Feb 2019 21:41:30 +0000 Subject: [PATCH] [X86][AVX] Add VMOVDDUP-VPBROADCASTQ execution domain mapping Noticed in D57514. Differential Revision: https://reviews.llvm.org/D57519 llvm-svn: 352922 --- llvm/lib/Target/X86/X86InstrInfo.cpp | 4 + .../CodeGen/X86/avx2-intrinsics-fast-isel.ll | 2 +- .../X86/avx2-intrinsics-x86-upgrade.ll | 2 +- llvm/test/CodeGen/X86/avx2-vbroadcast.ll | 20 +-- .../avx512-shuffles/broadcast-scalar-int.ll | 2 +- .../avx512-shuffles/broadcast-vector-int.ll | 4 +- .../X86/avx512-shuffles/partial_permute.ll | 6 +- .../X86/avx512vl-intrinsics-fast-isel.ll | 2 +- .../X86/avx512vl-intrinsics-upgrade.ll | 5 +- .../X86/broadcast-elm-cross-splat-vec.ll | 12 +- llvm/test/CodeGen/X86/insert-loaded-scalar.ll | 13 +- .../CodeGen/X86/insertelement-var-index.ll | 13 +- llvm/test/CodeGen/X86/oddshuffles.ll | 6 +- .../CodeGen/X86/sse2-intrinsics-fast-isel.ll | 16 +- llvm/test/CodeGen/X86/subvector-broadcast.ll | 18 +-- .../CodeGen/X86/vector-shift-lshr-sub128.ll | 16 +- .../test/CodeGen/X86/vector-shuffle-128-v2.ll | 26 +-- .../test/CodeGen/X86/vector-shuffle-128-v4.ll | 12 +- .../test/CodeGen/X86/vector-shuffle-256-v4.ll | 12 +- .../X86/vector-shuffle-combining-avx2.ll | 4 +- .../X86/vector-shuffle-combining-xop.ll | 4 +- .../CodeGen/X86/vector-shuffle-combining.ll | 10 +- .../CodeGen/X86/vector-trunc-math-widen.ll | 152 +++++++++--------- llvm/test/CodeGen/X86/vector-trunc-math.ll | 152 +++++++++--------- llvm/test/CodeGen/X86/vector-trunc-widen.ll | 8 +- llvm/test/CodeGen/X86/vector-trunc.ll | 8 +- llvm/test/CodeGen/X86/widened-broadcast.ll | 30 ++-- 27 files changed, 261 insertions(+), 298 deletions(-) diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 71288826656b70..3956bce26f5933 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6040,6 +6040,8 @@ static const uint16_t ReplaceableInstrs[][3] = { { X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m }, { X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr }, { X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm }, + { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r }, + { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m }, { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r }, { X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m }, { X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr }, @@ -6130,6 +6132,8 @@ static const uint16_t ReplaceableInstrsAVX2[][3] = { { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, + { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm}, + { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr}, { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll index d230ffe7073b23..8720c86edffff9 100644 --- a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll @@ -319,7 +319,7 @@ define <4 x i64> @test_mm256_broadcastd_epi32(<4 x i64> %a0) { define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) { ; CHECK-LABEL: test_mm_broadcastq_epi64: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; CHECK-NEXT: ret{{[l|q]}} %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer ret <2 x i64> %res diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll index 25d7d08842034b..b340a66def9652 100644 --- a/llvm/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll @@ -234,7 +234,7 @@ declare <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32>) nounwind readonly define <2 x i64> @test_x86_avx2_pbroadcastq_128(<2 x i64> %a0) { ; CHECK-LABEL: test_x86_avx2_pbroadcastq_128: ; CHECK: ## %bb.0: -; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0) ret <2 x i64> %res diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll index 9d4cfcefd64b94..165f9071ffa0e2 100644 --- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll @@ -189,12 +189,12 @@ define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp { ; X32-LABEL: Q64: ; X32: ## %bb.0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vpbroadcastq (%eax), %xmm0 +; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X32-NEXT: retl ; ; X64-LABEL: Q64: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vpbroadcastq (%rdi), %xmm0 +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X64-NEXT: retq entry: %q = load i64, i64* %ptr, align 4 @@ -233,7 +233,7 @@ define <8 x i16> @broadcast_mem_v4i16_v8i16(<4 x i16>* %ptr) { ; ; X64-LABEL: broadcast_mem_v4i16_v8i16: ; X64: ## %bb.0: -; X64-NEXT: vpbroadcastq (%rdi), %xmm0 +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X64-NEXT: retq %load = load <4 x i16>, <4 x i16>* %ptr %shuf = shufflevector <4 x i16> %load, <4 x i16> undef, <8 x i32> @@ -471,7 +471,7 @@ define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable re ; ; X64-LABEL: load_splat_2i64_2i64_1111: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0 +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X64-NEXT: retq entry: %ld = load <2 x i64>, <2 x i64>* %ptr @@ -865,12 +865,12 @@ define <4 x i64> @_inreg4xi64(<4 x i64> %a) { define <2 x i64> @_inreg2xi64(<2 x i64> %a) { ; X32-LABEL: _inreg2xi64: ; X32: ## %bb.0: -; X32-NEXT: vpbroadcastq %xmm0, %xmm0 +; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; X32-NEXT: retl ; ; X64-LABEL: _inreg2xi64: ; X64: ## %bb.0: -; X64-NEXT: vpbroadcastq %xmm0, %xmm0 +; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; X64-NEXT: retq %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer ret <2 x i64> %b @@ -1327,9 +1327,9 @@ define void @isel_crash_2q(i64* %cV_R.addr) { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: vmovaps %xmm0, (%esp) -; X32-NEXT: vpbroadcastq (%eax), %xmm1 +; X32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; X32-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: vmovdqa %xmm1, {{[0-9]+}}(%esp) +; X32-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp) ; X32-NEXT: addl $60, %esp ; X32-NEXT: retl ; @@ -1337,9 +1337,9 @@ define void @isel_crash_2q(i64* %cV_R.addr) { ; X64: ## %bb.0: ## %entry ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vpbroadcastq (%rdi), %xmm1 +; X64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) +; X64-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp) ; X64-NEXT: retq entry: %__a.addr.i = alloca <2 x i64>, align 16 diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll index 74005debfedd86..e1ab24a6156840 100644 --- a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll @@ -2324,7 +2324,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mem_mask3(i32* %p, <16 x i32> %mask) define <2 x i64> @test_i64_to_2_mem(i64* %p) { ; CHECK-LABEL: test_i64_to_2_mem: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; CHECK-NEXT: retq %s = load i64, i64* %p %vec = insertelement <2 x i64> undef, i64 %s, i32 0 diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll index 218aa3ffe07ba4..40905682017d72 100644 --- a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll @@ -6,7 +6,7 @@ define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) { ; CHECK-LABEL: test_2xi32_to_4xi32: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; CHECK-NEXT: retq %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> ret <4 x i32> %res @@ -318,7 +318,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i3 define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) { ; CHECK-LABEL: test_2xi32_to_4xi32_mem: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; CHECK-NEXT: retq %vec = load <2 x i32>, <2 x i32>* %vp %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll index 6099bb763cf9e3..6e02b989435041 100644 --- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -1160,9 +1160,9 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4 define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) { ; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,3] -; CHECK-NEXT: vpbroadcastq 8(%rdi), %xmm1 -; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,1,2,3] +; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] ; CHECK-NEXT: retq %vec = load <8 x i32>, <8 x i32>* %vp %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll index 8b513f0a29cdfa..4f1affb94e61f4 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -2104,7 +2104,7 @@ define <4 x i64> @test_mm256_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) { define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) { ; CHECK-LABEL: test_mm_broadcastq_epi64: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; CHECK-NEXT: ret{{[l|q]}} %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer ret <2 x i64> %res diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll index 36a63224e20fa3..6fd1bbbcb1f244 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -3910,8 +3910,9 @@ define <2 x i64> @test_mask_andnot_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) { ; X86-LABEL: test_mask_andnot_epi64_rmb_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; X86-NEXT: vpbroadcastq (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0x08] -; X86-NEXT: vpandn %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1] +; X86-NEXT: vmovddup (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x08] +; X86-NEXT: # xmm1 = mem[0,0] +; X86-NEXT: vandnps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x55,0xc1] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_mask_andnot_epi64_rmb_128: diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll index 353faabba2de2b..5e68dc87219d27 100644 --- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll +++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll @@ -95,8 +95,7 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %a) { ; ; ALL32-LABEL: f16xi8_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] -; ALL32-NEXT: # xmm1 = mem[0,0] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] ; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl @@ -692,8 +691,7 @@ define <8 x i16> @f8xi16_i64(<8 x i16> %a) { ; ; ALL32-LABEL: f8xi16_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] -; ALL32-NEXT: # xmm1 = mem[0,0] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] ; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl @@ -1147,8 +1145,7 @@ define <4 x i32> @f4xi32_i64(<4 x i32> %a) { ; ; ALL32-LABEL: f4xi32_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] -; ALL32-NEXT: # xmm1 = mem[0,0] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] ; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl @@ -1624,7 +1621,8 @@ define <4 x float> @f4xf32_f64(<4 x float> %a) { ; ; ALL64-LABEL: f4xf32_f64: ; ALL64: # %bb.0: -; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760] +; ALL64-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760] +; ALL64-NEXT: # xmm1 = mem[0,0] ; ALL64-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; ALL64-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; ALL64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/insert-loaded-scalar.ll b/llvm/test/CodeGen/X86/insert-loaded-scalar.ll index 66d27788161a36..fe00e50137bbe1 100644 --- a/llvm/test/CodeGen/X86/insert-loaded-scalar.ll +++ b/llvm/test/CodeGen/X86/insert-loaded-scalar.ll @@ -178,15 +178,10 @@ define <2 x i64> @load64_ins_eltc_v2i64(i64* %p) nounwind { ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX1-LABEL: load64_ins_eltc_v2i64: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load64_ins_eltc_v2i64: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX2-NEXT: retq +; AVX-LABEL: load64_ins_eltc_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; AVX-NEXT: retq %x = load i64, i64* %p %ins = insertelement <2 x i64> undef, i64 %x, i32 1 ret <2 x i64> %ins diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll index c7bb6603b70bcb..cbb29202cca29e 100644 --- a/llvm/test/CodeGen/X86/insertelement-var-index.ll +++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll @@ -203,15 +203,10 @@ define <2 x i64> @load_i64_v2i64(i64* %p, i32 %y) nounwind { ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX1-LABEL: load_i64_v2i64: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_i64_v2i64: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX2-NEXT: retq +; AVX-LABEL: load_i64_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; AVX-NEXT: retq %x = load i64, i64* %p %ins = insertelement <2 x i64> undef, i64 %x, i32 %y ret <2 x i64> %ins diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll index 0d501fe5233c03..4d54158e07d1f2 100644 --- a/llvm/test/CodeGen/X86/oddshuffles.ll +++ b/llvm/test/CodeGen/X86/oddshuffles.ll @@ -1673,7 +1673,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, ; XOP-LABEL: interleave_24i32_in: ; XOP: # %bb.0: ; XOP-NEXT: vmovupd (%rsi), %ymm0 -; XOP-NEXT: vmovupd (%rcx), %ymm1 +; XOP-NEXT: vmovups (%rcx), %ymm1 ; XOP-NEXT: vmovups 16(%rcx), %xmm2 ; XOP-NEXT: vmovups (%rdx), %xmm3 ; XOP-NEXT: vmovups 16(%rdx), %xmm4 @@ -1744,8 +1744,8 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 { ; AVX2-LABEL: wrongorder: ; AVX2: # %bb.0: ; AVX2-NEXT: vbroadcastsd %xmm0, %ymm1 -; AVX2-NEXT: vmovapd %ymm1, 32(%rdi) -; AVX2-NEXT: vmovapd %ymm1, (%rdi) +; AVX2-NEXT: vmovaps %ymm1, 32(%rdi) +; AVX2-NEXT: vmovaps %ymm1, (%rdi) ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index 7fd3dc59cf1f8b..477ce131df5c24 100644 --- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -5363,7 +5363,7 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) { ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0] ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0] -; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00] +; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00] ; X86-AVX1-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512-LABEL: test_mm_store_pd1: @@ -5371,7 +5371,7 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) { ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0] ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0] -; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00] +; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00] ; X86-AVX512-NEXT: retl # encoding: [0xc3] ; ; X64-SSE-LABEL: test_mm_store_pd1: @@ -5385,14 +5385,14 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) { ; X64-AVX1: # %bb.0: ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0] ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0] -; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07] +; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07] ; X64-AVX1-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512-LABEL: test_mm_store_pd1: ; X64-AVX512: # %bb.0: ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0] ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0] -; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07] +; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; X64-AVX512-NEXT: retq # encoding: [0xc3] %arg0 = bitcast double * %a0 to <2 x double>* %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer @@ -5489,7 +5489,7 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) { ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0] ; X86-AVX1-NEXT: # xmm0 = xmm0[0,0] -; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00] +; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00] ; X86-AVX1-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512-LABEL: test_mm_store1_pd: @@ -5497,7 +5497,7 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) { ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0] ; X86-AVX512-NEXT: # xmm0 = xmm0[0,0] -; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00] +; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00] ; X86-AVX512-NEXT: retl # encoding: [0xc3] ; ; X64-SSE-LABEL: test_mm_store1_pd: @@ -5511,14 +5511,14 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) { ; X64-AVX1: # %bb.0: ; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0] ; X64-AVX1-NEXT: # xmm0 = xmm0[0,0] -; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07] +; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07] ; X64-AVX1-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512-LABEL: test_mm_store1_pd: ; X64-AVX512: # %bb.0: ; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0] ; X64-AVX512-NEXT: # xmm0 = xmm0[0,0] -; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07] +; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; X64-AVX512-NEXT: retq # encoding: [0xc3] %arg0 = bitcast double * %a0 to <2 x double>* %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll index c0ab9ac47c9ae9..89b40925cd7b5b 100644 --- a/llvm/test/CodeGen/X86/subvector-broadcast.ll +++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll @@ -1562,20 +1562,10 @@ define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) { ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X32-NEXT: retl ; -; X64-AVX1-LABEL: test_2xi32_to_4xi32_mem: -; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; X64-AVX1-NEXT: retq -; -; X64-AVX2-LABEL: test_2xi32_to_4xi32_mem: -; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 -; X64-AVX2-NEXT: retq -; -; X64-AVX512-LABEL: test_2xi32_to_4xi32_mem: -; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 -; X64-AVX512-NEXT: retq +; X64-LABEL: test_2xi32_to_4xi32_mem: +; X64: # %bb.0: +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X64-NEXT: retq %vec = load <2 x i32>, <2 x i32>* %vp %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> ret <4 x i32> %res diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll index b321ca9af74aac..8b0ae09b964cc2 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll @@ -702,8 +702,8 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm2 = mem[0,0] -; AVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 @@ -722,9 +722,9 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321] ; XOPAVX1-NEXT: # xmm2 = mem[0,0] -; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0 -; XOPAVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 +; XOPAVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 +; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 ; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq @@ -1489,7 +1489,7 @@ define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm2 = mem[0,0] -; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] @@ -1509,9 +1509,9 @@ define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321] ; XOPAVX1-NEXT: # xmm2 = mem[0,0] -; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0 +; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero -; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 ; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll index fcf00609d8f1d5..7905d9d45f1110 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -20,12 +20,12 @@ define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) { ; ; AVX2-LABEL: shuffle_v2i64_00: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_00: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> ret <2 x i64> %shuffle @@ -69,12 +69,12 @@ define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) { ; ; AVX2-LABEL: shuffle_v2i64_22: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm1, %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_22: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm0 +; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> ret <2 x i64> %shuffle @@ -1264,20 +1264,10 @@ define <2 x i64> @insert_dup_mem_v2i64(i64* %ptr) { ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX1-LABEL: insert_dup_mem_v2i64: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: insert_dup_mem_v2i64: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX2-NEXT: retq -; -; AVX512VL-LABEL: insert_dup_mem_v2i64: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX512VL-NEXT: retq +; AVX-LABEL: insert_dup_mem_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; AVX-NEXT: retq %tmp = load i64, i64* %ptr, align 1 %tmp1 = insertelement <2 x i64> undef, i64 %tmp, i32 0 %tmp2 = shufflevector <2 x i64> %tmp1, <2 x i64> undef, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll index 149fe3d9dcb983..211c57c63ac7a6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -539,9 +539,9 @@ define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) { ; ; AVX2OR512VL-LABEL: shuffle_v4i32_0451: ; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] -; AVX2OR512VL-NEXT: vpbroadcastq %xmm0, %xmm0 -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] +; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] ; AVX2OR512VL-NEXT: retq %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> ret <4 x i32> %shuffle @@ -595,9 +595,9 @@ define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) { ; ; AVX2OR512VL-LABEL: shuffle_v4i32_4015: ; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vpbroadcastq %xmm1, %xmm1 -; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3] +; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] +; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1] +; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3] ; AVX2OR512VL-NEXT: retq %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> ret <4 x i32> %shuffle diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index 83316d0a80cb79..775caa88bd824c 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -49,7 +49,7 @@ define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) { ; AVX1-LABEL: shuffle_v4f64_0020: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -321,7 +321,7 @@ define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) { ; ALL-LABEL: shuffle_v4f64_0423: ; ALL: # %bb.0: ; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] -; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] ; ALL-NEXT: retq %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> ret <4 x double> %shuffle @@ -923,14 +923,14 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_0412: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1 -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,2] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v4i64_0412: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index bce3ac3c63b76b..40ba3c1a9acac4 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -217,7 +217,7 @@ define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) { define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) { ; CHECK-LABEL: combine_pshufb_as_vpbroadcastq128: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; CHECK-NEXT: ret{{[l|q]}} %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> ) ret <16 x i8> %1 @@ -648,7 +648,7 @@ define <32 x i8> @combine_pshufb_as_packuswb(<16 x i16> %a0, <16 x i16> %a1) nou define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) { ; X86-LABEL: combine_pshufb_insertion_as_broadcast_v2i64: ; X86: # %bb.0: -; X86-NEXT: vpbroadcastq {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X86-NEXT: retl ; ; X64-LABEL: combine_pshufb_insertion_as_broadcast_v2i64: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll index 7da6afda5686fe..4500b63041d0be 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll @@ -234,7 +234,7 @@ define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) { ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; X86-AVX2-NEXT: vmovapd %xmm0, (%eax) +; X86-AVX2-NEXT: vmovaps %xmm0, (%eax) ; X86-AVX2-NEXT: retl ; ; X64-AVX-LABEL: buildvector_v4f32_0404: @@ -247,7 +247,7 @@ define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) { ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; X64-AVX2-NEXT: vmovapd %xmm0, (%rdi) +; X64-AVX2-NEXT: vmovaps %xmm0, (%rdi) ; X64-AVX2-NEXT: retq %v0 = insertelement <4 x float> undef, float %a, i32 0 %v1 = insertelement <4 x float> %v0, float %b, i32 1 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 61d3fc3ba3d9c5..5833c3c390d322 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -677,7 +677,7 @@ define <4 x i32> @combine_nested_undef_test4(<4 x i32> %A, <4 x i32> %B) { ; ; AVX2-LABEL: combine_nested_undef_test4: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: retq %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> @@ -1044,8 +1044,8 @@ define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) { ; ; AVX2-LABEL: combine_nested_undef_test21: ; AVX2: # %bb.0: -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] -; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: retq %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> @@ -1114,7 +1114,7 @@ define <4 x i32> @combine_nested_undef_test25(<4 x i32> %A, <4 x i32> %B) { ; ; AVX2-LABEL: combine_nested_undef_test25: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: retq %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> @@ -1149,7 +1149,7 @@ define <4 x i32> @combine_nested_undef_test27(<4 x i32> %A, <4 x i32> %B) { ; ; AVX2-LABEL: combine_nested_undef_test27: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2-NEXT: retq %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> diff --git a/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll b/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll index 1cda401a82dfa7..553562e5e82bd5 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll @@ -681,21 +681,21 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -1518,21 +1518,21 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3119,28 +3119,28 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin ; ; AVX1-LABEL: trunc_and_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3518,21 +3518,21 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3915,28 +3915,28 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin ; ; AVX1-LABEL: trunc_xor_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4314,21 +4314,21 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4711,28 +4711,28 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind ; ; AVX1-LABEL: trunc_or_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -5110,21 +5110,21 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll index b06f3294e0a273..fdc558aae934d9 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll @@ -681,21 +681,21 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -1518,21 +1518,21 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3119,28 +3119,28 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin ; ; AVX1-LABEL: trunc_and_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3518,21 +3518,21 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3915,28 +3915,28 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin ; ; AVX1-LABEL: trunc_xor_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4314,21 +4314,21 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4711,28 +4711,28 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind ; ; AVX1-LABEL: trunc_or_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -5110,21 +5110,21 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm5 = mem[0,0] -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-trunc-widen.ll b/llvm/test/CodeGen/X86/vector-trunc-widen.ll index be44f4618a11bf..c3f5729b137dad 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-widen.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-widen.ll @@ -288,12 +288,12 @@ define void @trunc8i64_8i8(<8 x i64> %a) { ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm3 = mem[0,0] -; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-trunc.ll b/llvm/test/CodeGen/X86/vector-trunc.ll index ba353fe60a15d8..98362b1176226d 100644 --- a/llvm/test/CodeGen/X86/vector-trunc.ll +++ b/llvm/test/CodeGen/X86/vector-trunc.ll @@ -288,12 +288,12 @@ define void @trunc8i64_8i8(<8 x i64> %a) { ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321] ; AVX1-NEXT: # xmm3 = mem[0,0] -; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/widened-broadcast.ll b/llvm/test/CodeGen/X86/widened-broadcast.ll index ecbeb532f27ea9..c08619975c64ca 100644 --- a/llvm/test/CodeGen/X86/widened-broadcast.ll +++ b/llvm/test/CodeGen/X86/widened-broadcast.ll @@ -101,12 +101,12 @@ define <4 x i32> @load_splat_4i32_4i32_0101(<4 x i32>* %ptr) nounwind uwtable re ; ; AVX2-LABEL: load_splat_4i32_4i32_0101: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_splat_4i32_4i32_0101: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX512-NEXT: retq entry: %ld = load <4 x i32>, <4 x i32>* %ptr @@ -198,12 +198,12 @@ define <8 x i16> @load_splat_8i16_8i16_01230123(<8 x i16>* %ptr) nounwind uwtabl ; ; AVX2-LABEL: load_splat_8i16_8i16_01230123: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_splat_8i16_8i16_01230123: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX512-NEXT: retq entry: %ld = load <8 x i16>, <8 x i16>* %ptr @@ -368,12 +368,12 @@ define <16 x i8> @load_splat_16i8_16i8_0123456701234567(<16 x i8>* %ptr) nounwin ; ; AVX2-LABEL: load_splat_16i8_16i8_0123456701234567: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_splat_16i8_16i8_0123456701234567: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX512-NEXT: retq entry: %ld = load <16 x i8>, <16 x i8>* %ptr @@ -580,20 +580,10 @@ define <4 x i32> @load_splat_4i32_2i32_0101(<2 x i32>* %vp) { ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX1-LABEL: load_splat_4i32_2i32_0101: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_splat_4i32_2i32_0101: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX2-NEXT: retq -; -; AVX512-LABEL: load_splat_4i32_2i32_0101: -; AVX512: # %bb.0: -; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 -; AVX512-NEXT: retq +; AVX-LABEL: load_splat_4i32_2i32_0101: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; AVX-NEXT: retq %vec = load <2 x i32>, <2 x i32>* %vp %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> ret <4 x i32> %res