From ef56061dcfd162fa3ba9dafd05762707f4c30095 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 16 Aug 2024 22:48:23 +0400 Subject: [PATCH] AMDGPU: Rename type helper functions in atomic handling Requested on #95394 --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 25fee559faa29c..d9387b2dda30ed 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -16093,7 +16093,7 @@ static OptimizationRemark emitAtomicRMWLegalRemark(const AtomicRMWInst *RMW) { << " operation at memory scope " << MemScope; } -static bool isHalf2OrBFloat2(Type *Ty) { +static bool isV2F16OrV2BF16(Type *Ty) { if (auto *VT = dyn_cast(Ty)) { Type *EltTy = VT->getElementType(); return VT->getNumElements() == 2 && @@ -16103,12 +16103,12 @@ static bool isHalf2OrBFloat2(Type *Ty) { return false; } -static bool isHalf2(Type *Ty) { +static bool isV2F16(Type *Ty) { FixedVectorType *VT = dyn_cast(Ty); return VT && VT->getNumElements() == 2 && VT->getElementType()->isHalfTy(); } -static bool isBFloat2(Type *Ty) { +static bool isV2BF16(Type *Ty) { FixedVectorType *VT = dyn_cast(Ty); return VT && VT->getNumElements() == 2 && VT->getElementType()->isBFloatTy(); } @@ -16248,7 +16248,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { : AtomicExpansionKind::CmpXChg; } - if (Subtarget->hasAtomicDsPkAdd16Insts() && isHalf2OrBFloat2(Ty)) + if (Subtarget->hasAtomicDsPkAdd16Insts() && isV2F16OrV2BF16(Ty)) return AtomicExpansionKind::None; return AtomicExpansionKind::CmpXChg; @@ -16273,24 +16273,24 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { if (globalMemoryFPAtomicIsLegal(*Subtarget, RMW, HasSystemScope)) { if (AS == AMDGPUAS::FLAT_ADDRESS) { // gfx940, gfx12 - if (Subtarget->hasAtomicFlatPkAdd16Insts() && isHalf2OrBFloat2(Ty)) + if (Subtarget->hasAtomicFlatPkAdd16Insts() && isV2F16OrV2BF16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); } else if (AMDGPU::isExtendedGlobalAddrSpace(AS)) { // gfx90a, gfx940, gfx12 - if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty)) + if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isV2F16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); // gfx940, gfx12 - if (Subtarget->hasAtomicGlobalPkAddBF16Inst() && isBFloat2(Ty)) + if (Subtarget->hasAtomicGlobalPkAddBF16Inst() && isV2BF16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); } else if (AS == AMDGPUAS::BUFFER_FAT_POINTER) { // gfx90a, gfx940, gfx12 - if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty)) + if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isV2F16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); // While gfx90a/gfx940 supports v2bf16 for global/flat, it does not for // buffer. gfx12 does have the buffer version. - if (Subtarget->hasAtomicBufferPkAddBF16Inst() && isBFloat2(Ty)) + if (Subtarget->hasAtomicBufferPkAddBF16Inst() && isV2BF16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); } @@ -16311,7 +16311,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { // gfx908 if (RMW->use_empty() && Subtarget->hasAtomicBufferGlobalPkAddF16NoRtnInsts() && - isHalf2(Ty)) + isV2F16(Ty)) return ReportUnsafeHWInst(AtomicExpansionKind::None); } }