Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DAGCombiner] Combine vp.strided.load with unit stride to vp.load #66766

Merged
merged 3 commits into from
Sep 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -539,6 +539,7 @@ namespace {
SDValue visitMSCATTER(SDNode *N);
SDValue visitVPGATHER(SDNode *N);
SDValue visitVPSCATTER(SDNode *N);
SDValue visitVP_STRIDED_LOAD(SDNode *N);
SDValue visitFP_TO_FP16(SDNode *N);
SDValue visitFP16_TO_FP(SDNode *N);
SDValue visitFP_TO_BF16(SDNode *N);
Expand Down Expand Up @@ -11959,6 +11960,22 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) {
return SDValue();
}

SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) {
auto *SLD = cast<VPStridedLoadSDNode>(N);
EVT EltVT = SLD->getValueType(0).getVectorElementType();
// Combine strided loads with unit-stride to a regular VP load.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Misleading comment - the result is a vp.load not a regular load. i.e. it is still predicated.

if (auto *CStride = dyn_cast<ConstantSDNode>(SLD->getStride());
CStride && CStride->getZExtValue() == EltVT.getStoreSize()) {
SDValue NewLd = DAG.getLoadVP(
SLD->getAddressingMode(), SLD->getExtensionType(), SLD->getValueType(0),
SDLoc(N), SLD->getChain(), SLD->getBasePtr(), SLD->getOffset(),
SLD->getMask(), SLD->getVectorLength(), SLD->getMemoryVT(),
SLD->getMemOperand(), SLD->isExpandingLoad());
return CombineTo(N, NewLd, NewLd.getValue(1));
}
return SDValue();
}

/// A vector select of 2 constant vectors can be simplified to math/logic to
/// avoid a variable select instruction and possibly avoid constant loads.
SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
Expand Down Expand Up @@ -25976,6 +25993,10 @@ SDValue DAGCombiner::visitVPOp(SDNode *N) {
if (SDValue SD = visitVPSCATTER(N))
return SD;

if (N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD)
if (SDValue SD = visitVP_STRIDED_LOAD(N))
return SD;

// VP operations in which all vector elements are disabled - either by
// determining that the mask is all false or that the EVL is 0 - can be
// eliminated.
Expand Down
103 changes: 87 additions & 16 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,16 @@ define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m,
ret <8 x i8> %load
}

define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v8i8_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr %ptr, i32 1, <8 x i1> %m, i32 %evl)
ret <8 x i8> %load
}

declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32)

define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -132,6 +142,16 @@ define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> %
ret <8 x i16> %load
}

define <8 x i16> @strided_vpload_v8i16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v8i16_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl)
ret <8 x i16> %load
}

define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v8i16_allones_mask:
; CHECK: # %bb.0:
Expand Down Expand Up @@ -168,6 +188,16 @@ define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> %
ret <4 x i32> %load
}

define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v4i32_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl)
ret <4 x i32> %load
}

declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32)

define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -204,6 +234,16 @@ define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> %
ret <2 x i64> %load
}

define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v2i64_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl)
ret <2 x i64> %load
}

declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32)

define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -288,6 +328,16 @@ define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1>
ret <8 x half> %load
}

define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v8f16_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl)
ret <8 x half> %load
}

declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32)

define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
Expand All @@ -312,6 +362,16 @@ define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1>
ret <4 x float> %load
}

define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v4f32_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl)
ret <4 x float> %load
}

declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32)

define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -348,6 +408,17 @@ define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1
ret <2 x double> %load
}

define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_v2f64_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl)
ret <2 x double> %load
}


declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32)

define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -416,10 +487,10 @@ define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: bltu a2, a4, .LBB33_2
; CHECK-NEXT: bltu a2, a4, .LBB40_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB33_2:
; CHECK-NEXT: .LBB40_2:
; CHECK-NEXT: mul a4, a3, a1
; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: addi a5, a2, -16
Expand All @@ -444,10 +515,10 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext %
; CHECK: # %bb.0:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: bltu a2, a4, .LBB34_2
; CHECK-NEXT: bltu a2, a4, .LBB41_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB34_2:
; CHECK-NEXT: .LBB41_2:
; CHECK-NEXT: mul a4, a3, a1
; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: addi a5, a2, -16
Expand All @@ -474,21 +545,21 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV32-NEXT: li a5, 32
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: bltu a4, a5, .LBB35_2
; CHECK-RV32-NEXT: bltu a4, a5, .LBB42_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: li a3, 32
; CHECK-RV32-NEXT: .LBB35_2:
; CHECK-RV32-NEXT: .LBB42_2:
; CHECK-RV32-NEXT: mul a5, a3, a2
; CHECK-RV32-NEXT: addi a6, a4, -32
; CHECK-RV32-NEXT: sltu a4, a4, a6
; CHECK-RV32-NEXT: addi a4, a4, -1
; CHECK-RV32-NEXT: and a6, a4, a6
; CHECK-RV32-NEXT: li a4, 16
; CHECK-RV32-NEXT: add a5, a1, a5
; CHECK-RV32-NEXT: bltu a6, a4, .LBB35_4
; CHECK-RV32-NEXT: bltu a6, a4, .LBB42_4
; CHECK-RV32-NEXT: # %bb.3:
; CHECK-RV32-NEXT: li a6, 16
; CHECK-RV32-NEXT: .LBB35_4:
; CHECK-RV32-NEXT: .LBB42_4:
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4
; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma
Expand All @@ -497,10 +568,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV32-NEXT: sltu a6, a3, a5
; CHECK-RV32-NEXT: addi a6, a6, -1
; CHECK-RV32-NEXT: and a5, a6, a5
; CHECK-RV32-NEXT: bltu a3, a4, .LBB35_6
; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_6
; CHECK-RV32-NEXT: # %bb.5:
; CHECK-RV32-NEXT: li a3, 16
; CHECK-RV32-NEXT: .LBB35_6:
; CHECK-RV32-NEXT: .LBB42_6:
; CHECK-RV32-NEXT: mul a4, a3, a2
; CHECK-RV32-NEXT: add a4, a1, a4
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
Expand All @@ -524,21 +595,21 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV64-NEXT: li a5, 32
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: mv a4, a3
; CHECK-RV64-NEXT: bltu a3, a5, .LBB35_2
; CHECK-RV64-NEXT: bltu a3, a5, .LBB42_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: li a4, 32
; CHECK-RV64-NEXT: .LBB35_2:
; CHECK-RV64-NEXT: .LBB42_2:
; CHECK-RV64-NEXT: mul a5, a4, a2
; CHECK-RV64-NEXT: addi a6, a3, -32
; CHECK-RV64-NEXT: sltu a3, a3, a6
; CHECK-RV64-NEXT: addi a3, a3, -1
; CHECK-RV64-NEXT: and a6, a3, a6
; CHECK-RV64-NEXT: li a3, 16
; CHECK-RV64-NEXT: add a5, a1, a5
; CHECK-RV64-NEXT: bltu a6, a3, .LBB35_4
; CHECK-RV64-NEXT: bltu a6, a3, .LBB42_4
; CHECK-RV64-NEXT: # %bb.3:
; CHECK-RV64-NEXT: li a6, 16
; CHECK-RV64-NEXT: .LBB35_4:
; CHECK-RV64-NEXT: .LBB42_4:
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4
; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma
Expand All @@ -547,10 +618,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV64-NEXT: sltu a6, a4, a5
; CHECK-RV64-NEXT: addi a6, a6, -1
; CHECK-RV64-NEXT: and a5, a6, a5
; CHECK-RV64-NEXT: bltu a4, a3, .LBB35_6
; CHECK-RV64-NEXT: bltu a4, a3, .LBB42_6
; CHECK-RV64-NEXT: # %bb.5:
; CHECK-RV64-NEXT: li a4, 16
; CHECK-RV64-NEXT: .LBB35_6:
; CHECK-RV64-NEXT: .LBB42_6:
; CHECK-RV64-NEXT: mul a3, a4, a2
; CHECK-RV64-NEXT: add a3, a1, a3
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
Expand Down
Loading