Skip to content

Commit

Permalink
AMDGPU: Legalize atomicrmw fadd for v2f16/v2bf16 for local memory
Browse files Browse the repository at this point in the history
Make this legal for gfx940 and gfx12
  • Loading branch information
arsenm committed Jun 13, 2024
1 parent 5e8cf0b commit 86c3c5a
Show file tree
Hide file tree
Showing 7 changed files with 367 additions and 314 deletions.
5 changes: 4 additions & 1 deletion llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,9 @@ static const LLT V10S16 = LLT::fixed_vector(10, 16);
static const LLT V12S16 = LLT::fixed_vector(12, 16);
static const LLT V16S16 = LLT::fixed_vector(16, 16);

static const LLT V2F16 = LLT::fixed_vector(2, LLT::float16());
static const LLT V2BF16 = V2F16; // FIXME

static const LLT V2S32 = LLT::fixed_vector(2, 32);
static const LLT V3S32 = LLT::fixed_vector(3, 32);
static const LLT V4S32 = LLT::fixed_vector(4, 32);
Expand Down Expand Up @@ -1638,7 +1641,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
if (ST.hasLdsAtomicAddF64())
Atomic.legalFor({{S64, LocalPtr}});
if (ST.hasAtomicDsPkAdd16Insts())
Atomic.legalFor({{V2S16, LocalPtr}});
Atomic.legalFor({{V2F16, LocalPtr}, {V2BF16, LocalPtr}});
}
if (ST.hasAtomicFaddInsts())
Atomic.legalFor({{S32, GlobalPtr}});
Expand Down
6 changes: 6 additions & 0 deletions llvm/lib/Target/AMDGPU/DSInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -1082,6 +1082,12 @@ defm : DSAtomicRetNoRetPat_mc<DS_MAX_RTN_U32, DS_MAX_U32, i32, "atomic_load_umax
defm : DSAtomicRetNoRetPat_mc<DS_MIN_RTN_F32, DS_MIN_F32, f32, "atomic_load_fmin">;
defm : DSAtomicRetNoRetPat_mc<DS_MAX_RTN_F32, DS_MAX_F32, f32, "atomic_load_fmax">;


let SubtargetPredicate = HasAtomicDsPkAdd16Insts in {
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_F16, DS_PK_ADD_F16, v2f16, "atomic_load_fadd">;
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_BF16, DS_PK_ADD_BF16, v2bf16, "atomic_load_fadd">;
}

let SubtargetPredicate = isGFX6GFX7GFX8GFX9GFX10 in {
defm : DSAtomicCmpXChgSwapped_mc<DS_CMPST_RTN_B32, DS_CMPST_B32, i32, "atomic_cmp_swap">;
}
Expand Down
14 changes: 13 additions & 1 deletion llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15931,6 +15931,16 @@ static OptimizationRemark emitAtomicRMWLegalRemark(const AtomicRMWInst *RMW) {
<< " operation at memory scope " << MemScope;
}

static bool isHalf2OrBFloat2(Type *Ty) {
if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) {
Type *EltTy = VT->getElementType();
return VT->getNumElements() == 2 &&
(EltTy->isHalfTy() || EltTy->isBFloatTy());
}

return false;
}

TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
unsigned AS = RMW->getPointerAddressSpace();
Expand Down Expand Up @@ -15989,7 +15999,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
: AtomicExpansionKind::CmpXChg;
}

// TODO: Handle v2f16/v2bf16 cases for gfx940
if (Subtarget->hasAtomicDsPkAdd16Insts() && isHalf2OrBFloat2(Ty))
return AtomicExpansionKind::None;

return AtomicExpansionKind::CmpXChg;
}

Expand Down
31 changes: 2 additions & 29 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/fp-atomics-gfx940.ll
Original file line number Diff line number Diff line change
Expand Up @@ -213,22 +213,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16_offset(ptr addrspace(3) %ptr, <2
; GFX940-LABEL: local_atomic_fadd_ret_v2f16_offset:
; GFX940: ; %bb.0:
; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX940-NEXT: ds_read_b32 v2, v0 offset:65532
; GFX940-NEXT: s_mov_b64 s[0:1], 0
; GFX940-NEXT: .LBB15_1: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: ds_pk_add_rtn_f16 v0, v0, v1 offset:65532
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v3, v2
; GFX940-NEXT: v_pk_add_f16 v2, v3, v1
; GFX940-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_cbranch_execnz .LBB15_1
; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: v_mov_b32_e32 v0, v2
; GFX940-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr <2 x half>, ptr addrspace(3) %ptr, i32 16383
%result = atomicrmw fadd ptr addrspace(3) %gep, <2 x half> %val seq_cst
Expand All @@ -239,21 +225,8 @@ define void @local_atomic_fadd_noret_v2f16_offset(ptr addrspace(3) %ptr, <2 x ha
; GFX940-LABEL: local_atomic_fadd_noret_v2f16_offset:
; GFX940: ; %bb.0:
; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX940-NEXT: ds_read_b32 v2, v0 offset:65532
; GFX940-NEXT: s_mov_b64 s[0:1], 0
; GFX940-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: ds_pk_add_f16 v0, v1 offset:65532
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_pk_add_f16 v3, v2, v1
; GFX940-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX940-NEXT: v_mov_b32_e32 v2, v3
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_cbranch_execnz .LBB16_1
; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr <2 x half>, ptr addrspace(3) %ptr, i32 16383
%unused = atomicrmw fadd ptr addrspace(3) %gep, <2 x half> %val seq_cst
Expand Down
27 changes: 2 additions & 25 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll
Original file line number Diff line number Diff line change
Expand Up @@ -52,36 +52,13 @@ define float @test_atomicrmw_fsub(ptr addrspace(3) %addr) {
define <2 x half> @test_atomicrmw_fadd_vector(ptr addrspace(3) %addr) {
; CHECK-LABEL: name: test_atomicrmw_fadd_vector
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: successors: %bb.2(0x80000000)
; CHECK-NEXT: liveins: $vgpr0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3)
; CHECK-NEXT: G_BR %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.atomicrmw.start:
; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1
; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2
; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s16>) = G_FADD [[PHI1]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FADD]](<2 x s16>)
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>)
; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3
; CHECK-NEXT: G_BR %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.atomicrmw.end:
; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2
; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>)
; CHECK-NEXT: [[ATOMICRMW_FADD:%[0-9]+]]:_(<2 x s16>) = G_ATOMICRMW_FADD [[COPY]](p3), [[BUILD_VECTOR]] :: (load store seq_cst (<2 x s16>) on %ir.addr, addrspace 3)
; CHECK-NEXT: $vgpr0 = COPY [[ATOMICRMW_FADD]](<2 x s16>)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
%oldval = atomicrmw fadd ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
ret <2 x half> %oldval
Expand Down
Loading

0 comments on commit 86c3c5a

Please sign in to comment.