Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: accept memory instructions in the "then" block #109995

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,9 +326,8 @@ bool SIPreEmitPeephole::mustRetainExeczBranch(
if (TII->hasUnwantedEffectsWhenEXECEmpty(MI))
return true;

// These instructions are potentially expensive even if EXEC = 0.
if (TII->isSMRD(MI) || TII->isVMEM(MI) || TII->isFLAT(MI) ||
TII->isDS(MI) || TII->isWaitcnt(MI.getOpcode()))
// Waitcnt instructions are potentially expensive even if EXEC = 0.
if (TII->isWaitcnt(MI.getOpcode()))
return true;

++NumInstr;
Expand Down
36 changes: 12 additions & 24 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,12 @@ define amdgpu_cs void @atomic_add(<4 x i32> inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB0_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_add v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB0_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -87,13 +86,12 @@ define amdgpu_cs void @atomic_add_and_format(<4 x i32> inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB1_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_add v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB1_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down Expand Up @@ -139,13 +137,12 @@ define amdgpu_cs void @atomic_sub(<4 x i32> inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB2_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_sub v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB2_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.buffer.atomic.sub.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -185,13 +182,12 @@ define amdgpu_cs void @atomic_sub_and_format(<4 x i32> inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB3_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_sub v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB3_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down Expand Up @@ -238,14 +234,13 @@ define amdgpu_cs void @atomic_xor(<4 x i32> inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB4_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: s_and_b32 s4, s4, 1
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_xor v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB4_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.buffer.atomic.xor.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -287,14 +282,13 @@ define amdgpu_cs void @atomic_xor_and_format(<4 x i32> inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB5_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: s_and_b32 s6, s6, 1
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_xor v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB5_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down Expand Up @@ -341,13 +335,12 @@ define amdgpu_cs void @atomic_ptr_add(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB6_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_add v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB6_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.ptr.buffer.atomic.add.i32(i32 1, ptr addrspace(8) %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -389,13 +382,12 @@ define amdgpu_cs void @atomic_ptr_add_and_format(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB7_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_add v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB7_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down Expand Up @@ -443,13 +435,12 @@ define amdgpu_cs void @atomic_ptr_sub(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB8_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_sub v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB8_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.ptr.buffer.atomic.sub.i32(i32 1, ptr addrspace(8) %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -491,13 +482,12 @@ define amdgpu_cs void @atomic_ptr_sub_and_format(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB9_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_sub v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB9_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down Expand Up @@ -546,14 +536,13 @@ define amdgpu_cs void @atomic_ptr_xor(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s6, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GCN-NEXT: s_cbranch_execz .LBB10_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GCN-NEXT: s_and_b32 s4, s4, 1
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_atomic_xor v1, v0, s[0:3], 0 idxen
; GCN-NEXT: .LBB10_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
.entry:
call i32 @llvm.amdgcn.struct.ptr.buffer.atomic.xor.i32(i32 1, ptr addrspace(8) %arg, i32 0, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -597,14 +586,13 @@ define amdgpu_cs void @atomic_ptr_xor_and_format(ptr addrspace(8) inreg %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: ; implicit-def: $vgpr1
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execz .LBB11_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GCN-NEXT: s_and_b32 s6, s6, 1
; GCN-NEXT: v_mov_b32_e32 v1, s6
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: buffer_atomic_xor v1, v2, s[0:3], 0 idxen glc
; GCN-NEXT: .LBB11_2:
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s4, v1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,10 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: .LBB3_6: ; %Flow1
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s6
; GFX10-NEXT: s_cbranch_execz .LBB3_8
; GFX10-NEXT: ; %bb.7: ; %block.after.loop
; GFX10-NEXT: v_mov_b32_e32 v0, 5
; GFX10-NEXT: flat_store_dword v[3:4], v0
; GFX10-NEXT: .LBB3_8: ; %exit
; GFX10-NEXT: ; %bb.8: ; %exit
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
Expand Down Expand Up @@ -315,15 +314,14 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-NEXT: v_mov_b32_e32 v4, v5
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v4
; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB4_4
; GFX10-NEXT: ; %bb.3: ; %if.block.0
; GFX10-NEXT: ; in Loop: Header=BB4_2 Depth=1
; GFX10-NEXT: v_ashrrev_i32_e32 v5, 31, v4
; GFX10-NEXT: v_lshlrev_b64 v[8:9], 2, v[4:5]
; GFX10-NEXT: v_add_co_u32 v8, s4, v2, v8
; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v3, v9, s4
; GFX10-NEXT: global_store_dword v[8:9], v4, off
; GFX10-NEXT: .LBB4_4: ; %loop.break.block
; GFX10-NEXT: ; %bb.4: ; %loop.break.block
; GFX10-NEXT: ; in Loop: Header=BB4_2 Depth=1
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s7
Expand All @@ -342,10 +340,9 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-NEXT: .LBB4_6: ; %cond.block.1
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_and_saveexec_b32 s4, s6
; GFX10-NEXT: s_cbranch_execz .LBB4_8
; GFX10-NEXT: ; %bb.7: ; %if.block.1
; GFX10-NEXT: global_store_dword v[6:7], v4, off
; GFX10-NEXT: .LBB4_8: ; %exit
; GFX10-NEXT: ; %bb.8: ; %exit
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_setpc_b64 s[30:31]
Expand Down Expand Up @@ -536,11 +533,10 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_and_saveexec_b32 s0, s1
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX10-NEXT: s_cbranch_execz .LBB6_6
; GFX10-NEXT: ; %bb.5: ; %break.body
; GFX10-NEXT: v_mov_b32_e32 v0, 10
; GFX10-NEXT: global_store_dword v[4:5], v0, off
; GFX10-NEXT: .LBB6_6: ; %exit
; GFX10-NEXT: ; %bb.6: ; %exit
; GFX10-NEXT: s_endpgm
entry:
br label %A
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -437,11 +437,10 @@ define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr ad
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_and_saveexec_b32 s0, s1
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX10-NEXT: s_cbranch_execz .LBB5_6
; GFX10-NEXT: ; %bb.5: ; %break.body
; GFX10-NEXT: v_mov_b32_e32 v0, 10
; GFX10-NEXT: global_store_dword v[4:5], v0, off
; GFX10-NEXT: .LBB5_6: ; %exit
; GFX10-NEXT: ; %bb.6: ; %exit
; GFX10-NEXT: s_endpgm
entry:
br label %A
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,11 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, i32 %x.size, ptr ad
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s1, s0
; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1
; GFX10-NEXT: s_cbranch_execz .LBB2_7
; GFX10-NEXT: ; %bb.6: ; %break.body
; GFX10-NEXT: v_mov_b32_e32 v0, 10
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: global_store_dword v1, v0, s[2:3]
; GFX10-NEXT: .LBB2_7: ; %exit
; GFX10-NEXT: ; %bb.7: ; %exit
; GFX10-NEXT: s_endpgm
entry:
br label %A
Expand Down
18 changes: 6 additions & 12 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,9 @@ define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dword v1, v2, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB1_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dword v1, v2, s[6:7]
; GFX906-NEXT: .LBB1_2: ; %bb.2
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
Expand Down Expand Up @@ -149,10 +148,9 @@ define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB3_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[6:7]
; GFX906-NEXT: .LBB3_2: ; %bb.2
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
Expand Down Expand Up @@ -185,10 +183,9 @@ define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB4_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[6:7]
; GFX906-NEXT: .LBB4_2: ; %bb.2
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
Expand Down Expand Up @@ -222,11 +219,10 @@ define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[4:5]
; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[4:5] offset:16
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB5_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[6:7]
; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[6:7] offset:16
; GFX906-NEXT: .LBB5_2: ; %bb.2
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(1)
Expand Down Expand Up @@ -486,14 +482,13 @@ define amdgpu_kernel void @v8i8_phi_chain(ptr addrspace(1) %src1, ptr addrspace(
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[4:5]
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB8_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[6:7]
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
; GFX906-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; GFX906-NEXT: s_and_b64 s[4:5], exec, vcc
; GFX906-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX906-NEXT: .LBB8_2: ; %Flow
; GFX906-NEXT: ; %bb.2: ; %Flow
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
; GFX906-NEXT: s_cbranch_execz .LBB8_4
Expand Down Expand Up @@ -547,11 +542,10 @@ define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspac
; GFX906-NEXT: global_load_dwordx2 v[1:2], v5, s[6:7]
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB9_3
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[8:9]
; GFX906-NEXT: .LBB9_3: ; %Flow
; GFX906-NEXT: ; %bb.3: ; %Flow
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: .LBB9_4: ; %bb.3
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
Expand Down
Loading
Loading