Skip to content

Commit

Permalink
[RISCV] Narrow indices of fixed vector gather/scatter nodes
Browse files Browse the repository at this point in the history
Doing so allows the use of smaller constants overall, and may allow (for some small vector constants) avoiding the constant pool entirely.  This can result in extra VTYPE toggles if we get unlucky.

This was reviewed under PR llvm#66405.
  • Loading branch information
preames authored and ZijunZhaoCCK committed Sep 19, 2023
1 parent 73e12af commit 08039a1
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 14 deletions.
35 changes: 29 additions & 6 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11633,15 +11633,39 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
}

// According to the property that indexed load/store instructions
// zero-extended their indices, \p narrowIndex tries to narrow the type of index
// operand if it is matched to pattern (shl (zext x to ty), C) and bits(x) + C <
// bits(ty).
/// According to the property that indexed load/store instructions zero-extend
/// their indices, try to narrow the type of index operand.
static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
if (isIndexTypeSigned(IndexType))
return false;

if (N.getOpcode() != ISD::SHL || !N->hasOneUse())
if (!N->hasOneUse())
return false;

EVT VT = N.getValueType();
SDLoc DL(N);

// In general, what we're doing here is seeing if we can sink a truncate to
// a smaller element type into the expression tree building our index.
// TODO: We can generalize this and handle a bunch more cases if useful.

// Narrow a buildvector to the narrowest element type. This requires less
// work and less register pressure at high LMUL, and creates smaller constants
// which may be cheaper to materialize.
if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) {
KnownBits Known = DAG.computeKnownBits(N);
unsigned ActiveBits = std::max(8u, Known.countMaxActiveBits());
LLVMContext &C = *DAG.getContext();
EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
if (ResultVT.bitsLT(VT.getVectorElementType())) {
N = DAG.getNode(ISD::TRUNCATE, DL,
VT.changeVectorElementType(ResultVT), N);
return true;
}
}

// Handle the pattern (shl (zext x to ty), C) and bits(x) + C < bits(ty).
if (N.getOpcode() != ISD::SHL)
return false;

SDValue N0 = N.getOperand(0);
Expand All @@ -11656,7 +11680,6 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
if (!ISD::isConstantSplatVector(N1.getNode(), ShAmt))
return false;;

SDLoc DL(N);
SDValue Src = N0.getOperand(0);
EVT SrcVT = Src.getValueType();
unsigned SrcElen = SrcVT.getScalarSizeInBits();
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13027,17 +13027,17 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV32-NEXT: lui a1, %hi(.LCPI107_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI107_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: vle8.v v9, (a1)
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_strided_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI107_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI107_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: vle8.v v9, (a1)
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
Expand Down Expand Up @@ -13144,17 +13144,17 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV32-NEXT: lui a1, %hi(.LCPI108_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: vle8.v v9, (a1)
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_gather_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI108_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: vle8.v v9, (a1)
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
Expand Down

0 comments on commit 08039a1

Please sign in to comment.