Skip to content

Commit

Permalink
[AMDGPU] [IGLP]: Fix assert (llvm#73710)
Browse files Browse the repository at this point in the history
We can also re-enter IGLP mutation via later `SchedStage`s in the
`GCNMaxOccupancySchedStrategy` . This is sort of NFC in that there is no
changed behavior for the only current client of `IsReentry`

Change-Id: Iad948777862423288eb1d471961145bd51e07072
  • Loading branch information
jrbyrnes authored and bcahoon committed Feb 26, 2024
1 parent 5067fea commit df9a03c
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 25 deletions.
49 changes: 27 additions & 22 deletions llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -345,13 +345,13 @@ class PipelineSolver {
// return the number of edges missed.
int addEdges(SmallVectorImpl<SchedGroup> &SyncPipeline, SUnit *SU, int SGID,
std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges);
// Link the pipeline as if \p SU was in the SchedGroup with ID \p SGID. It
// returns the cost (in terms of missed pipeline edges), and tracks the edges
// added in \p AddedEdges
/// Link the pipeline as if \p SU was in the SchedGroup with ID \p SGID. It
/// returns the cost (in terms of missed pipeline edges), and tracks the edges
/// added in \p AddedEdges
template <typename T>
int linkSUnit(SUnit *SU, int SGID,
std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges, T I, T E);
// Remove the edges passed via \p AddedEdges
/// Remove the edges passed via \p AddedEdges
void removeEdges(const std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges);
// Convert the passed in maps to arrays for bidirectional iterators
void convertSyncMapsToArrays();
Expand Down Expand Up @@ -851,11 +851,11 @@ class IGLPStrategy {
const SIInstrInfo *TII;

public:
// Add SchedGroups to \p Pipeline to implement this Strategy.
/// Add SchedGroups to \p SyncedSchedGroups to implement this Strategy.
virtual void applyIGLPStrategy(
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
bool IsPostRA) = 0;
bool IsReentry) = 0;

// Returns true if this strategy should be applied to a ScheduleDAG.
virtual bool shouldApplyStrategy(ScheduleDAGInstrs *DAG) = 0;
Expand All @@ -874,7 +874,7 @@ class MFMASmallGemmOpt final : public IGLPStrategy {
void applyIGLPStrategy(
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
bool IsPostRA) override;
bool IsReentry) override;

bool shouldApplyStrategy(ScheduleDAGInstrs *DAG) override { return true; }

Expand All @@ -887,7 +887,7 @@ class MFMASmallGemmOpt final : public IGLPStrategy {
void MFMASmallGemmOpt::applyIGLPStrategy(
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
bool IsPostRA) {
bool IsReentry) {
// Count the number of MFMA instructions.
unsigned MFMACount = 0;
for (const MachineInstr &I : *DAG)
Expand Down Expand Up @@ -1050,8 +1050,8 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
: InstructionRule(TII, SGID, NeedsCache) {}
};

// Whether the SU shares a V_PERM predecessor with any SU in the SchedGroup
// that is /p Distance steps away
/// Whether the SU shares a V_PERM predecessor with any SU in the SchedGroup
/// that is \p Distance steps away
class SharesPredWithPrevNthGroup final : public InstructionRule {
private:
unsigned Distance = 1;
Expand Down Expand Up @@ -1106,7 +1106,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
void applyIGLPStrategy(
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
bool IsPostRA) override;
bool IsReentry) override;

bool shouldApplyStrategy(ScheduleDAGInstrs *DAG) override { return true; }

Expand All @@ -1123,12 +1123,12 @@ static unsigned DSWWithSharedVMEMCount = 0;
void MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
bool IsPostRA) {
bool IsReentry) {
unsigned MFMACount = 0;
unsigned DSRCount = 0;

assert((IsPostRA || (DSWCount == 0 && DSWWithPermCount == 0 &&
DSWWithSharedVMEMCount == 0)) &&
assert((IsReentry || (DSWCount == 0 && DSWWithPermCount == 0 &&
DSWWithSharedVMEMCount == 0)) &&
"DSWCounters should be zero in pre-RA scheduling!");
SmallVector<SUnit *, 6> DSWithPerms;
for (auto &SU : DAG->SUnits) {
Expand All @@ -1138,7 +1138,7 @@ void MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
else if (TII->isDS(*I)) {
if (I->mayLoad())
++DSRCount;
else if (I->mayStore() && !IsPostRA) {
else if (I->mayStore() && !IsReentry) {
++DSWCount;
for (auto Pred : SU.Preds) {
if (Pred.getSUnit()->getInstr()->getOpcode() ==
Expand All @@ -1151,7 +1151,7 @@ void MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
}
}

if (!IsPostRA) {
if (!IsReentry) {
DSWWithPermCount = DSWithPerms.size();
auto I = DSWithPerms.begin();
auto E = DSWithPerms.end();
Expand Down Expand Up @@ -1418,11 +1418,11 @@ class IGroupLPDAGMutation : public ScheduleDAGMutation {
// first created SchedGroup first.
bool IsBottomUp = 1;

// Whether the mutation is being applied to post RA scheduling
bool IsPostRA = false;
// Whether or not this is a reentry into the IGroupLPDAGMutation.
bool IsReentry = false;

IGroupLPDAGMutation() = default;
IGroupLPDAGMutation(bool IsPostRA) : IsPostRA(IsPostRA) {}
IGroupLPDAGMutation(bool IsReentry) : IsReentry(IsReentry) {}
};

unsigned SchedGroup::NumSchedGroups = 0;
Expand Down Expand Up @@ -1710,16 +1710,21 @@ void IGroupLPDAGMutation::initIGLPOpt(SUnit &SU) {
auto S = createIGLPStrategy(StrategyID, DAG, TII);
if (S->shouldApplyStrategy(DAG)) {
IsBottomUp = S->IsBottomUp;
S->applyIGLPStrategy(SyncedInstrs, SyncedSchedGroups, IsPostRA);
S->applyIGLPStrategy(SyncedInstrs, SyncedSchedGroups, IsReentry);
}
}

} // namespace

namespace llvm {

std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(bool IsPostRA) {
return std::make_unique<IGroupLPDAGMutation>(IsPostRA);
/// \p IsReentry specifes whether or not this is a reentry into the
/// IGroupLPDAGMutation. Since there may be multiple scheduling passes on the
/// same scheduling region (e.g. pre and post-RA scheduling / multiple
/// scheduling "phases"), we can reenter this mutation framework more than once
/// for a given region.
std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(bool IsReentry) {
return std::make_unique<IGroupLPDAGMutation>(IsReentry);
}

} // end namespace llvm
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

namespace llvm {

std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(bool IsPostRA);
std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(bool IsReentry);

} // namespace llvm

Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,9 @@ bool GCNSchedStage::initGCNRegion() {
StageID != GCNSchedStageID::UnclusteredHighRPReschedule) {
SavedMutations.clear();
SavedMutations.swap(DAG.Mutations);
DAG.addMutation(createIGroupLPDAGMutation(/*IsPostRA=*/false));
bool IsInitialStage = StageID == GCNSchedStageID::OccInitialSchedule ||
StageID == GCNSchedStageID::ILPInitialSchedule;
DAG.addMutation(createIGroupLPDAGMutation(/*IsReentry=*/!IsInitialStage));
}
return true;
}
Expand Down Expand Up @@ -1555,7 +1557,7 @@ void GCNPostScheduleDAGMILive::schedule() {
if (HasIGLPInstrs) {
SavedMutations.clear();
SavedMutations.swap(Mutations);
addMutation(createIGroupLPDAGMutation(/*IsPostRA=*/true));
addMutation(createIGroupLPDAGMutation(/*IsReentry=*/true));
}

ScheduleDAGMI::schedule();
Expand Down

0 comments on commit df9a03c

Please sign in to comment.