16 #define DEBUG_TYPE "regalloc"
46 STATISTIC(numJoins ,
"Number of interval joins performed");
47 STATISTIC(numCrossRCs ,
"Number of cross class joins performed");
48 STATISTIC(numCommutes ,
"Number of instruction commuting performed");
49 STATISTIC(numExtends ,
"Number of copies extended");
50 STATISTIC(NumReMats ,
"Number of instructions re-materialized");
51 STATISTIC(NumInflated ,
"Number of register classes inflated");
52 STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
53 STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
57 cl::desc(
"Coalesce copies (default=true)"),
68 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
73 cl::desc(
"Verify machine instrs before and after register coalescing"),
91 bool JoinGlobalCopies;
112 void eliminateDeadDefs();
118 void coalesceLocals();
121 void joinAllIntervals();
180 void updateRegDefsUses(
unsigned SrcReg,
unsigned DstReg,
unsigned SubIdx);
193 virtual void releaseMemory();
206 "Simple Register Coalescing",
false,
false)
214 char RegisterCoalescer::
ID = 0;
217 unsigned &Src,
unsigned &Dst,
218 unsigned &SrcSub,
unsigned &DstSub) {
220 Dst = MI->getOperand(0).getReg();
221 DstSub = MI->getOperand(0).getSubReg();
222 Src = MI->getOperand(1).getReg();
223 SrcSub = MI->getOperand(1).getSubReg();
224 }
else if (MI->isSubregToReg()) {
225 Dst = MI->getOperand(0).getReg();
226 DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
227 MI->getOperand(3).getImm());
228 Src = MI->getOperand(2).getReg();
229 SrcSub = MI->getOperand(2).getSubReg();
246 if (!
MII->isCopyLike() && !
MII->isUnconditionalBranch())
256 Flipped = CrossClass =
false;
258 unsigned Src, Dst, SrcSub, DstSub;
259 if (!
isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
261 Partial = SrcSub || DstSub;
278 if (!Dst)
return false;
285 if (!Dst)
return false;
296 if (SrcSub && DstSub) {
298 if (Src == Dst && SrcSub != DstSub)
324 if (DstIdx && !SrcIdx) {
330 CrossClass = NewRC != DstRC || NewRC != SrcRC;
335 "Cannot have a physical SubIdx");
353 unsigned Src, Dst, SrcSub, DstSub;
354 if (!
isMoveInstr(TRI, MI, Src, Dst, SrcSub, DstSub))
361 }
else if (Src != SrcReg) {
369 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
375 return DstReg == Dst;
377 return TRI.
getSubReg(DstReg, SrcSub) == Dst;
388 void RegisterCoalescer::getAnalysisUsage(
AnalysisUsage &AU)
const {
400 void RegisterCoalescer::eliminateDeadDefs() {
406 void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
408 ErasedInstrs.insert(MI);
426 bool RegisterCoalescer::adjustCopiesBackFrom(
const CoalescerPair &CP,
428 assert(!CP.
isPartial() &&
"This doesn't work for partial copies.");
429 assert(!CP.
isPhys() &&
"This doesn't work for physreg copies.");
440 if (BS == IntB.
end())
return false;
441 VNInfo *BValNo = BS->valno;
446 if (BValNo->
def != CopyIdx)
return false;
452 if (AS == IntA.end())
return false;
453 VNInfo *AValNo = AS->valno;
465 if (ValS == IntB.
end())
471 LIS->getInstructionFromIndex(ValS->end.getPrevSlot());
478 if (ValS+1 != BS)
return false;
482 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
486 BValNo->
def = FillerStart;
491 IntB.
addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo));
494 if (BValNo != ValS->valno)
496 DEBUG(
dbgs() <<
" result = " << IntB <<
'\n');
509 if (AS->end == CopyIdx)
510 LIS->shrinkToUses(&IntA);
518 bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
524 if (LIS->hasPHIKill(IntA, AValNo))
529 if (AI->valno != AValNo)
continue;
531 std::upper_bound(IntB.
begin(), IntB.
end(), AI->start);
532 if (BI != IntB.
begin())
534 for (; BI != IntB.
end() && AI->end >= BI->start; ++BI) {
535 if (BI->valno == BValNo)
537 if (BI->start <= AI->start && BI->end > AI->start)
539 if (BI->start > AI->start && BI->start < AI->end)
569 bool RegisterCoalescer::removeCopyByCommutingDef(
const CoalescerPair &CP,
583 if (!BValNo || BValNo->
def != CopyIdx)
588 assert(AValNo &&
"COPY source not live");
599 assert(DefIdx != -1);
603 unsigned Op1, Op2, NewDstIdx;
604 if (!
TII->findCommutedOpIndices(DefMI, Op1, Op2))
608 else if (Op2 == UseOpIdx)
614 unsigned NewReg = NewDstMO.
getReg();
620 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
626 MRI->use_nodbg_begin(IntA.
reg),
627 UE =
MRI->use_nodbg_end(); UI != UE; ++UI) {
629 SlotIndex UseIdx = LIS->getInstructionIndex(UseMI);
631 if (US == IntA.
end() || US->valno != AValNo)
638 DEBUG(
dbgs() <<
"\tremoveCopyByCommutingDef: " << AValNo->
def <<
'\t'
651 if (NewMI != DefMI) {
652 LIS->ReplaceMachineInstrInMaps(DefMI, NewMI);
671 UE =
MRI->use_end(); UI != UE;) {
683 if (US == IntA.
end() || US->valno != AValNo)
705 DEBUG(
dbgs() <<
"\t\tnoop: " << DefIdx <<
'\t' << *UseMI);
706 assert(DVNI->
def == DefIdx);
708 ErasedInstrs.insert(UseMI);
709 LIS->RemoveMachineInstrFromMaps(UseMI);
719 if (AI->valno != AValNo)
continue;
720 IntB.
addSegment(LiveInterval::Segment(AI->start, AI->end, ValNo));
722 DEBUG(
dbgs() <<
"\t\textended: " << IntB <<
'\n');
725 DEBUG(
dbgs() <<
"\t\ttrimmed: " << IntA <<
'\n');
732 bool RegisterCoalescer::reMaterializeTrivialDef(
CoalescerPair &CP,
744 SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI);
746 assert(ValNo &&
"CopyMI input register not live");
758 if (!
TII->isTriviallyReMaterializable(DefMI, AA))
768 unsigned CopyDstReg = DstOperand.
getReg();
775 unsigned NewDstReg = DstReg;
777 unsigned NewDstIdx = TRI->composeSubRegIndices(CP.
getSrcIdx(),
780 NewDstReg = TRI->getSubReg(DstReg, NewDstIdx);
790 "Only expect to deal with virtual or physical registers");
797 TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI);
800 LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
802 ErasedInstrs.insert(CopyMI);
822 RCForInst = TRI->getMatchingSuperRegClass(
MRI->
getRegClass(DstReg), DefRC,
825 if (
MRI->constrainRegClass(DstReg, DefRC)) {
830 }
else if (NewIdx && RCForInst) {
833 MRI->constrainRegClass(DstReg, RCForInst);
840 updateRegDefsUses(DstReg, DstReg, DstIdx);
848 "Only expect virtual or physical registers in remat");
865 assert(MO.
isImplicit() &&
"No explicit operands after implict operands.");
873 SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
874 for (
unsigned i = 0, e = NewMIImplDefs.
size(); i != e; ++i) {
875 unsigned Reg = NewMIImplDefs[i];
877 if (
LiveRange *LR = LIS->getCachedRegUnit(*Units))
878 LR->createDeadDef(NewMIIdx.
getRegSlot(), LIS->getVNInfoAllocator());
885 LIS->shrinkToUses(&SrcInt, &DeadDefs);
886 if (!DeadDefs.empty())
899 bool RegisterCoalescer::eliminateUndefCopy(
MachineInstr *CopyMI,
901 SlotIndex Idx = LIS->getInstructionIndex(CopyMI);
915 assert(DeadVNI &&
"No value defined in DstInt");
920 I =
MRI->reg_nodbg_begin(DstInt->
reg), E =
MRI->reg_nodbg_end();
926 SlotIndex Idx = LIS->getInstructionIndex(MI);
930 DEBUG(
dbgs() <<
"\tnew undef: " << Idx <<
'\t' << *MI);
940 void RegisterCoalescer::updateRegDefsUses(
unsigned SrcReg,
944 LiveInterval *DstInt = DstIsPhys ? 0 : &LIS->getInterval(DstReg);
954 if (SrcReg == DstReg && !Visited.
insert(UseMI))
963 if (DstInt && !Reads && SubIdx)
964 Reads = DstInt->
liveAt(LIS->getInstructionIndex(UseMI));
967 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
973 if (SubIdx && MO.
isDef())
983 dbgs() <<
"\t\tupdated: ";
985 dbgs() << LIS->getInstructionIndex(UseMI) <<
"\t";
992 bool RegisterCoalescer::canJoinPhys(
const CoalescerPair &CP) {
997 DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
1005 DEBUG(
dbgs() <<
"\tCannot join defs into reserved register.\n");
1014 bool RegisterCoalescer::joinCopy(
MachineInstr *CopyMI,
bool &Again) {
1017 DEBUG(
dbgs() << LIS->getInstructionIndex(CopyMI) <<
'\t' << *CopyMI);
1021 DEBUG(
dbgs() <<
"\tNot coalescable.\n");
1030 DeadDefs.push_back(CopyMI);
1031 eliminateDeadDefs();
1036 if (!CP.
isPhys() && eliminateUndefCopy(CopyMI, CP)) {
1037 DEBUG(
dbgs() <<
"\tEliminated copy of <undef> value.\n");
1038 LIS->RemoveMachineInstrFromMaps(CopyMI);
1048 DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
1052 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
1053 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
1055 DEBUG(
dbgs() <<
"\tMerged values: " << LI <<
'\n');
1057 LIS->RemoveMachineInstrFromMaps(CopyMI);
1067 if (!canJoinPhys(CP)) {
1071 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
1083 << TRI->getSubRegIndexName(CP.
getDstIdx()) <<
" and "
1085 << TRI->getSubRegIndexName(CP.
getSrcIdx()) <<
'\n';
1093 LIS->getInterval(CP.
getDstReg()).size())
1101 if (!joinIntervals(CP)) {
1107 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
1113 if (adjustCopiesBackFrom(CP, CopyMI) ||
1114 removeCopyByCommutingDef(CP, CopyMI)) {
1115 LIS->RemoveMachineInstrFromMaps(CopyMI);
1137 if (!CP.
isPhys() && RegClassInfo.isProperSubClass(CP.
getNewRC()))
1143 ErasedInstrs.erase(CopyMI);
1159 dbgs() <<
"\tJoined. Result = ";
1172 bool RegisterCoalescer::joinReservedPhysReg(
CoalescerPair &CP) {
1173 assert(CP.
isPhys() &&
"Must be a physreg copy");
1174 assert(
MRI->isReserved(CP.
getDstReg()) &&
"Not a reserved register");
1176 DEBUG(
dbgs() <<
"\t\tRHS = " << RHS <<
'\n');
1179 "Invalid join with reserved register");
1189 if (RHS.
overlaps(LIS->getRegUnit(*UI))) {
1201 LIS->RemoveMachineInstrFromMaps(CopyMI);
1298 enum ConflictResolution {
1329 ConflictResolution Resolution;
1332 unsigned WriteLanes;
1336 unsigned ValidLanes;
1355 bool ErasableImplicitDef;
1362 bool PrunedComputed;
1364 Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0),
1365 RedefVNI(0), OtherVNI(0), ErasableImplicitDef(
false),
1368 bool isAnalyzed()
const {
return WriteLanes != 0; }
1374 unsigned computeWriteLanes(
const MachineInstr *DefMI,
bool &Redef);
1376 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &Other);
1377 void computeAssignment(
unsigned ValNo, JoinVals &Other);
1378 bool taintExtent(
unsigned,
unsigned, JoinVals&,
1380 bool usesLanes(
MachineInstr *MI,
unsigned,
unsigned,
unsigned);
1381 bool isPrunedValue(
unsigned ValNo, JoinVals &Other);
1389 : LI(li), SubIdx(subIdx), NewVNInfo(newVNInfo), CP(cp), LIS(lis),
1390 Indexes(LIS->getSlotIndexes()), TRI(tri),
1391 Assignments(LI.getNumValNums(), -1), Vals(LI.getNumValNums())
1396 bool mapValues(JoinVals &Other);
1400 bool resolveConflicts(JoinVals &Other);
1415 const int *getAssignments()
const {
return Assignments.data(); }
1422 unsigned JoinVals::computeWriteLanes(
const MachineInstr *DefMI,
bool &Redef) {
1427 L |= TRI->getSubRegIndexLaneMask(
1428 TRI->composeSubRegIndices(SubIdx, MO->
getSubReg()));
1439 assert(MI &&
"No defining instruction");
1460 JoinVals::ConflictResolution
1461 JoinVals::analyzeValue(
unsigned ValNo, JoinVals &Other) {
1462 Val &V = Vals[ValNo];
1463 assert(!V.isAnalyzed() &&
"Value has already been analyzed!");
1465 if (VNI->isUnused()) {
1472 if (VNI->isPHIDef()) {
1474 V.ValidLanes = V.WriteLanes = TRI->getSubRegIndexLaneMask(SubIdx);
1476 DefMI = Indexes->getInstructionFromIndex(VNI->def);
1478 V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef);
1497 assert(V.RedefVNI &&
"Instruction is reading nonexistent value");
1498 computeAssignment(V.RedefVNI->id, Other);
1499 V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes;
1507 V.ErasableImplicitDef =
true;
1508 V.ValidLanes &= ~V.WriteLanes;
1524 if (OtherVNI->def < VNI->def)
1525 Other.computeAssignment(OtherVNI->id, *
this);
1526 else if (VNI->def < OtherVNI->def && OtherLRQ.
valueIn()) {
1529 V.OtherVNI = OtherLRQ.
valueIn();
1530 return CR_Impossible;
1532 V.OtherVNI = OtherVNI;
1533 Val &OtherV = Other.Vals[OtherVNI->id];
1535 if (!OtherV.isAnalyzed())
1540 if (VNI->isPHIDef())
1542 if (V.ValidLanes & OtherV.ValidLanes)
1544 return CR_Impossible;
1550 V.OtherVNI = OtherLRQ.
valueIn();
1559 Other.computeAssignment(V.OtherVNI->id, *
this);
1560 Val &OtherV = Other.Vals[V.OtherVNI->id];
1569 if (OtherV.ErasableImplicitDef && DefMI &&
1570 DefMI->
getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
1571 DEBUG(
dbgs() <<
"IMPLICIT_DEF defined at " << V.OtherVNI->def
1573 <<
", keeping it.\n");
1574 OtherV.ErasableImplicitDef =
false;
1579 if (VNI->isPHIDef())
1591 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
1606 stripCopies(VNI) == stripCopies(V.OtherVNI))
1621 if ((V.WriteLanes & OtherV.ValidLanes) == 0)
1633 assert(VNI->def.isEarlyClobber() &&
1634 "Only early clobber defs can overlap a kill");
1635 return CR_Impossible;
1642 if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes) == 0)
1643 return CR_Impossible;
1649 if (OtherLRQ.
endPoint() >= Indexes->getMBBEndIdx(MBB))
1650 return CR_Impossible;
1659 return CR_Unresolved;
1665 void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &Other) {
1666 Val &V = Vals[ValNo];
1667 if (V.isAnalyzed()) {
1670 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
1673 switch ((V.Resolution = analyzeValue(ValNo, Other))) {
1677 assert(V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
1678 assert(Other.Vals[V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
1679 Assignments[ValNo] = Other.Assignments[V.OtherVNI->id];
1682 <<
PrintReg(Other.LI.reg) <<
':' << V.OtherVNI->id <<
'@'
1683 << V.OtherVNI->def <<
" --> @"
1684 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
1689 assert(V.OtherVNI &&
"OtherVNI not assigned, can't prune");
1690 Other.Vals[V.OtherVNI->id].Pruned =
true;
1694 Assignments[ValNo] = NewVNInfo.size();
1700 bool JoinVals::mapValues(JoinVals &Other) {
1702 computeAssignment(i, Other);
1703 if (Vals[i].Resolution == CR_Impossible) {
1728 taintExtent(
unsigned ValNo,
unsigned TaintedLanes, JoinVals &Other,
1732 SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
1736 assert(OtherI != Other.LI.end() &&
"No conflict?");
1741 if (End >= MBBEnd) {
1743 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
1747 << OtherI->valno->id <<
'@' << OtherI->start
1748 <<
" to " << End <<
'\n');
1752 TaintExtent.push_back(std::make_pair(End, TaintedLanes));
1755 if (++OtherI == Other.LI.end() || OtherI->start >= MBBEnd)
1759 const Val &OV = Other.Vals[OtherI->valno->id];
1760 TaintedLanes &= ~OV.WriteLanes;
1763 }
while (TaintedLanes);
1769 bool JoinVals::usesLanes(
MachineInstr *MI,
unsigned Reg,
unsigned SubIdx,
1778 if (Lanes & TRI->getSubRegIndexLaneMask(
1779 TRI->composeSubRegIndices(SubIdx, MO->
getSubReg())))
1785 bool JoinVals::resolveConflicts(JoinVals &Other) {
1788 assert (V.Resolution != CR_Impossible &&
"Unresolvable conflict");
1789 if (V.Resolution != CR_Unresolved)
1794 assert(V.OtherVNI &&
"Inconsistent conflict resolution.");
1796 const Val &OtherV = Other.Vals[V.OtherVNI->id];
1801 unsigned TaintedLanes = V.WriteLanes & OtherV.ValidLanes;
1803 if (!taintExtent(i, TaintedLanes, Other, TaintExtent))
1807 assert(!TaintExtent.
empty() &&
"There should be at least one conflict.");
1813 MI = Indexes->getInstructionFromIndex(VNI->
def);
1818 "Interference ends on VNI->def. Should have been handled earlier");
1820 Indexes->getInstructionFromIndex(TaintExtent.
front().first);
1821 assert(LastMI &&
"Range must end at a proper instruction");
1822 unsigned TaintNum = 0;
1824 assert(MI != MBB->end() &&
"Bad LastMI");
1825 if (usesLanes(MI, Other.LI.reg, Other.SubIdx, TaintedLanes)) {
1826 DEBUG(
dbgs() <<
"\t\ttainted lanes used by: " << *MI);
1830 if (&*MI == LastMI) {
1831 if (++TaintNum == TaintExtent.
size())
1833 LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first);
1834 assert(LastMI &&
"Range must end at a proper instruction");
1835 TaintedLanes = TaintExtent[TaintNum].second;
1841 V.Resolution = CR_Replace;
1854 bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &Other) {
1855 Val &V = Vals[ValNo];
1856 if (V.Pruned || V.PrunedComputed)
1859 if (V.Resolution != CR_Erase && V.Resolution != CR_Merge)
1864 V.PrunedComputed =
true;
1865 V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *
this);
1869 void JoinVals::pruneValues(JoinVals &Other,
1873 switch (Vals[i].Resolution) {
1878 LIS->pruneValue(&Other.LI, Def, &EndPoints);
1883 Val &OtherV = Other.Vals[Vals[i].OtherVNI->id];
1884 bool EraseImpDef = OtherV.ErasableImplicitDef &&
1885 OtherV.Resolution == CR_Keep;
1890 for (
MIOperands MO(Indexes->getInstructionFromIndex(Def));
1902 <<
": " << Other.LI <<
'\n');
1907 if (isPrunedValue(i, Other)) {
1912 LIS->pruneValue(&LI, Def, &EndPoints);
1914 << Def <<
": " << LI <<
'\n');
1929 switch (Vals[i].Resolution) {
1934 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
1941 DEBUG(
dbgs() <<
"\t\tremoved " << i <<
'@' << Def <<
": " << LI <<
'\n');
1945 MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
1946 assert(MI &&
"No instruction to erase");
1954 DEBUG(
dbgs() <<
"\t\terased:\t" << Def <<
'\t' << *MI);
1955 LIS->RemoveMachineInstrFromMaps(MI);
1969 JoinVals RHSVals(RHS, CP.
getSrcIdx(), NewVNInfo, CP, LIS, TRI);
1970 JoinVals LHSVals(LHS, CP.getDstIdx(), NewVNInfo, CP, LIS, TRI);
1973 <<
"\n\t\tLHS = " << LHS
1978 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
1982 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
1992 LHSVals.pruneValues(RHSVals, EndPoints);
1993 RHSVals.pruneValues(LHSVals, EndPoints);
1998 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
1999 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
2000 while (!ShrinkRegs.
empty())
2001 LIS->shrinkToUses(&LIS->getInterval(ShrinkRegs.
pop_back_val()));
2004 LHS.
join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
2009 MRI->clearKillFlags(LHS.
reg);
2010 MRI->clearKillFlags(RHS.
reg);
2012 if (EndPoints.
empty())
2017 DEBUG(
dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size()
2018 <<
" points: " << LHS <<
'\n');
2019 LIS->extendToIndices(LHS, EndPoints);
2026 return CP.
isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP);
2031 struct MBBPriorityInfo {
2037 : MBB(mbb), Depth(depth), IsSplit(issplit) {}
2046 const MBBPriorityInfo *RHS) {
2048 if (LHS->Depth != RHS->Depth)
2049 return LHS->Depth > RHS->Depth ? -1 : 1;
2052 if (LHS->IsSplit != RHS->IsSplit)
2053 return LHS->IsSplit ? -1 : 1;
2057 unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size();
2058 unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size();
2060 return cl > cr ? -1 : 1;
2063 return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1;
2086 bool RegisterCoalescer::
2088 bool Progress =
false;
2089 for (
unsigned i = 0, e = CurrList.
size(); i != e; ++i) {
2094 if (ErasedInstrs.
erase(CurrList[i])) {
2099 bool Success = joinCopy(CurrList[i], Again);
2100 Progress |= Success;
2101 if (Success || !Again)
2113 const unsigned PrevSize = WorkList.size();
2114 if (JoinGlobalCopies) {
2121 if (!MII->isCopyLike())
2124 LocalWorkList.push_back(&(*MII));
2126 WorkList.push_back(&(*MII));
2132 if (MII->isCopyLike())
2133 WorkList.push_back(MII);
2139 CurrList(WorkList.begin() + PrevSize, WorkList.end());
2140 if (copyCoalesceWorkList(CurrList))
2141 WorkList.erase(
std::remove(WorkList.begin() + PrevSize, WorkList.end(),
2145 void RegisterCoalescer::coalesceLocals() {
2146 copyCoalesceWorkList(LocalWorkList);
2147 for (
unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) {
2148 if (LocalWorkList[j])
2149 WorkList.push_back(LocalWorkList[j]);
2151 LocalWorkList.clear();
2154 void RegisterCoalescer::joinAllIntervals() {
2155 DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
2156 assert(WorkList.empty() && LocalWorkList.empty() &&
"Old data still around.");
2158 std::vector<MBBPriorityInfo> MBBs;
2159 MBBs.reserve(MF->size());
2162 MBBs.push_back(MBBPriorityInfo(MBB,
Loops->getLoopDepth(MBB),
2168 unsigned CurrDepth = UINT_MAX;
2169 for (
unsigned i = 0, e = MBBs.size(); i != e; ++i) {
2171 if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) {
2173 CurrDepth = MBBs[i].Depth;
2175 copyCoalesceInMBB(MBBs[i].MBB);
2181 while (copyCoalesceWorkList(WorkList))
2185 void RegisterCoalescer::releaseMemory() {
2186 ErasedInstrs.
clear();
2189 InflateRegs.clear();
2196 TRI =
TM->getRegisterInfo();
2197 TII =
TM->getInstrInfo();
2198 LIS = &getAnalysis<LiveIntervals>();
2199 AA = &getAnalysis<AliasAnalysis>();
2200 Loops = &getAnalysis<MachineLoopInfo>();
2213 DEBUG(
dbgs() <<
"********** SIMPLE REGISTER COALESCING **********\n"
2214 <<
"********** Function: " << MF->getName() <<
'\n');
2217 MF->verify(
this,
"Before register coalescing");
2219 RegClassInfo.runOnMachineFunction(fn);
2229 InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()),
2231 DEBUG(
dbgs() <<
"Trying to inflate " << InflateRegs.size() <<
" regs.\n");
2232 for (
unsigned i = 0, e = InflateRegs.size(); i != e; ++i) {
2233 unsigned Reg = InflateRegs[i];
2234 if (
MRI->reg_nodbg_empty(Reg))
2236 if (
MRI->recomputeRegClass(Reg, *
TM)) {
2245 MF->verify(
this,
"After register coalescing");
unsigned succ_size() const
static bool isSplitEdge(const MachineBasicBlock *MBB)
void push_back(const T &Elt)
const MachineFunction * getParent() const
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=0) const
instr_iterator erase(instr_iterator I)
AnalysisUsage & addPreserved()
MachineInstr * getParent()
static PassRegistry * getPassRegistry()
Segments::iterator iterator
int remove(const char *path);
SlotIndex def
The index of the defining instruction.
unsigned getDstReg() const
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
The main container class for the LLVM Intermediate Representation.
INITIALIZE_PASS_BEGIN(RegisterCoalescer,"simple-register-coalescing","Simple Register Coalescing", false, false) INITIALIZE_PASS_END(RegisterCoalescer
static cl::opt< bool > VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), cl::Hidden)
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, unsigned &Src, unsigned &Dst, unsigned &SrcSub, unsigned &DstSub)
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS)
STATISTIC(numJoins,"Number of interval joins performed")
char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
const MCInstrDesc & getDesc() const
const TargetRegisterClass * getCommonSubClass(const TargetRegisterClass *A, const TargetRegisterClass *B) const
void setIsDead(bool Val=true)
SlotIndex endPoint() const
void markUnused()
Mark this value as unused.
void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo &)
unsigned getNumValNums() const
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Callback methods for LiveRangeEdit owners.
void initializeRegisterCoalescerPass(PassRegistry &)
LoopInfoBase< BlockT, LoopT > * LI
bool allDefsAreDead() const
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
bool isCrossClass() const
MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
const HexagonInstrInfo * TII
const char * getName() const
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
INITIALIZE_PASS(DeadMachineInstructionElim,"dead-mi-elimination","Remove dead machine instructions", false, false) bool DeadMachineInstructionElim bool SawStore
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
#define llvm_unreachable(msg)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
const TargetRegisterClass * getRegClass(unsigned Reg) const
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
ID
LLVM Calling Convention Representation.
unsigned getNumOperands() const
bool isUnused() const
Returns true if this value is unused.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
iterator addSegment(Segment S)
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
virtual const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const
AnalysisUsage & addPreservedID(const void *ID)
size_t size() const
size - Get the array size.
bool setRegisters(const MachineInstr *)
SlotIndex getPrevSlot() const
const MachineBasicBlock * getParent() const
bool isDebugValue() const
bool isImplicitDef() const
const TargetRegisterClass * getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, const TargetRegisterClass *RCB, unsigned SubB, unsigned &PreA, unsigned &PreB) const
bundle_iterator< MachineInstr, instr_iterator > iterator
void removeValNo(VNInfo *ValNo)
const TargetRegisterClass * getNewRC() const
getNewRC - Return the register class of the coalesced register.
initializer< Ty > init(const Ty &Val)
void array_pod_sort(IteratorTy Start, IteratorTy End)
LiveQueryResult Query(SlotIndex Idx) const
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value. See class MCOperandInfo.
bool isAsCheapAsAMove(QueryType Type=AllInBundle) const
const MachineOperand & getOperand(unsigned i) const
unsigned getSubReg(unsigned Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo...
bool overlaps(const LiveRange &other) const
ItTy next(ItTy it, Dist n)
void substPhysReg(unsigned Reg, const TargetRegisterInfo &)
VNInfo * valueDefined() const
std::pair< bool, bool > readsWritesVirtualRegister(unsigned Reg, SmallVectorImpl< unsigned > *Ops=0) const
#define INITIALIZE_AG_DEPENDENCY(depName)
unsigned getSubReg() const
bool liveAt(SlotIndex index) const
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
void setIsKill(bool Val=true)
const char * getName() const
static cl::opt< bool > EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true))
bool isPhys() const
isPhys - Return true if DstReg is a physical register.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
simple register Simple Register false
simple register coalescing
Promote Memory to Register
int findRegisterUseOperandIdx(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=NULL) const
LiveInterval & getInterval(unsigned Reg)
bool isCoalescable(const MachineInstr *) const
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
StringRef getName() const
unsigned getDstIdx() const
bool isSafeToMove(const TargetInstrInfo *TII, AliasAnalysis *AA, bool &SawStore) const
VNInfo * getValNumInfo(unsigned ValNo)
bool containsOneValue() const
unsigned getSrcIdx() const
bundle_iterator< const MachineInstr, const_instr_iterator > const_iterator
static bool isPhysicalRegister(unsigned Reg)
MachineRegisterInfo & getRegInfo()
virtual void getAnalysisUsage(AnalysisUsage &AU) const
void setReg(unsigned Reg)
void setSubReg(unsigned subReg)
int findRegisterDefOperandIdx(unsigned Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=NULL) const
const TargetMachine & getTarget() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
SlotIndex getRegSlot(bool EC=false) const
unsigned getReg() const
getReg - Returns the register number.
bool isCommutable(QueryType Type=IgnoreBundle) const
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< unsigned > RegsBeingSpilled=None)
simple register Simple Register Coalescing
unsigned getSrcReg() const
getSrcReg - Return the virtual register that will be coalesced away.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction. Note that variadic (isVari...
BasicBlockListType::iterator iterator
ItTy prior(ItTy it, Dist n)
unsigned composeSubRegIndices(unsigned a, unsigned b) const
void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
const MCRegisterInfo & MRI
iterator FindSegmentContaining(SlotIndex Idx)
bool useMachineScheduler() const
Temporary API to test migration to MI scheduler.
SlotIndex - An opaque wrapper around machine indexes.
tier< T1, T2 > tie(T1 &f, T2 &s)
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=0) const
unsigned pred_size() const
bool contains(unsigned Reg) const