18 #define DEBUG_TYPE "gvn"
52 using namespace PatternMatch;
54 STATISTIC(NumGVNInstr,
"Number of instructions deleted");
55 STATISTIC(NumGVNLoad,
"Number of loads deleted");
56 STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
57 STATISTIC(NumGVNBlocks,
"Number of blocks merged");
58 STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
59 STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
60 STATISTIC(NumPRELoad,
"Number of loads PRE'd");
69 cl::desc(
"Max recurse depth (default = 1000)"));
84 Expression(uint32_t o = ~2U) : opcode(o) { }
86 bool operator==(
const Expression &other)
const {
87 if (opcode != other.opcode)
89 if (opcode == ~0U || opcode == ~1U)
91 if (type != other.type)
93 if (varargs != other.varargs)
101 Value.varargs.end()));
112 uint32_t nextValueNumber;
115 Expression create_cmp_expression(
unsigned Opcode,
117 Value *LHS, Value *RHS);
119 uint32_t lookup_or_add_call(
CallInst*
C);
121 ValueTable() : nextValueNumber(1) { }
122 uint32_t lookup_or_add(Value *V);
123 uint32_t
lookup(Value *V)
const;
125 Value *LHS, Value *RHS);
126 void add(Value *V, uint32_t num);
128 void erase(Value *v);
133 uint32_t getNextUnusedValueNumber() {
return nextValueNumber; }
134 void verifyRemoved(
const Value *)
const;
152 static bool isEqual(
const Expression &LHS,
const Expression &RHS) {
163 Expression ValueTable::create_expression(
Instruction *
I) {
169 e.varargs.push_back(lookup_or_add(*OI));
175 assert(I->
getNumOperands() == 2 &&
"Unsupported commutative instruction!");
176 if (e.varargs[0] > e.varargs[1])
180 if (
CmpInst *
C = dyn_cast<CmpInst>(I)) {
183 if (e.varargs[0] > e.varargs[1]) {
187 e.opcode = (
C->getOpcode() << 8) | Predicate;
191 e.varargs.push_back(*II);
197 Expression ValueTable::create_cmp_expression(
unsigned Opcode,
200 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
201 "Not a comparison!");
204 e.varargs.push_back(lookup_or_add(LHS));
205 e.varargs.push_back(lookup_or_add(RHS));
208 if (e.varargs[0] > e.varargs[1]) {
212 e.opcode = (Opcode << 8) | Predicate;
216 Expression ValueTable::create_extractvalue_expression(
ExtractValueInst *EI) {
217 assert(EI != 0 &&
"Not an ExtractValueInst?");
230 e.opcode = Instruction::Add;
234 e.opcode = Instruction::Sub;
238 e.opcode = Instruction::Mul;
247 "Expect two args for recognised intrinsics.");
259 e.varargs.push_back(lookup_or_add(*OI));
263 e.varargs.push_back(*II);
274 valueNumbering.insert(std::make_pair(V, num));
277 uint32_t ValueTable::lookup_or_add_call(
CallInst *
C) {
278 if (AA->doesNotAccessMemory(C)) {
279 Expression
exp = create_expression(C);
280 uint32_t &e = expressionNumbering[
exp];
281 if (!e) e = nextValueNumber++;
282 valueNumbering[
C] = e;
284 }
else if (AA->onlyReadsMemory(C)) {
285 Expression exp = create_expression(C);
286 uint32_t &e = expressionNumbering[
exp];
288 e = nextValueNumber++;
289 valueNumbering[
C] = e;
293 e = nextValueNumber++;
294 valueNumbering[
C] = e;
301 valueNumbering[
C] = nextValueNumber;
302 return nextValueNumber++;
305 if (local_dep.
isDef()) {
309 valueNumbering[
C] = nextValueNumber;
310 return nextValueNumber++;
315 uint32_t cd_vn = lookup_or_add(local_cdep->
getArgOperand(i));
317 valueNumbering[
C] = nextValueNumber;
318 return nextValueNumber++;
322 uint32_t v = lookup_or_add(local_cdep);
323 valueNumbering[
C] = v;
329 MD->getNonLocalCallDependency(
CallSite(C));
335 for (
unsigned i = 0, e = deps.size(); i != e; ++i) {
349 if (NonLocalDepCall && DT->properlyDominates(I->
getBB(), C->
getParent())){
350 cdep = NonLocalDepCall;
359 valueNumbering[
C] = nextValueNumber;
360 return nextValueNumber++;
364 valueNumbering[
C] = nextValueNumber;
365 return nextValueNumber++;
371 valueNumbering[
C] = nextValueNumber;
372 return nextValueNumber++;
376 uint32_t v = lookup_or_add(cdep);
377 valueNumbering[
C] = v;
381 valueNumbering[
C] = nextValueNumber;
382 return nextValueNumber++;
388 uint32_t ValueTable::lookup_or_add(
Value *V) {
390 if (VI != valueNumbering.
end())
393 if (!isa<Instruction>(V)) {
394 valueNumbering[V] = nextValueNumber;
395 return nextValueNumber++;
402 return lookup_or_add_call(cast<CallInst>(I));
403 case Instruction::Add:
404 case Instruction::FAdd:
405 case Instruction::Sub:
406 case Instruction::FSub:
407 case Instruction::Mul:
408 case Instruction::FMul:
409 case Instruction::UDiv:
410 case Instruction::SDiv:
411 case Instruction::FDiv:
412 case Instruction::URem:
413 case Instruction::SRem:
414 case Instruction::FRem:
415 case Instruction::Shl:
416 case Instruction::LShr:
417 case Instruction::AShr:
421 case Instruction::ICmp:
422 case Instruction::FCmp:
423 case Instruction::Trunc:
424 case Instruction::ZExt:
425 case Instruction::SExt:
426 case Instruction::FPToUI:
427 case Instruction::FPToSI:
428 case Instruction::UIToFP:
429 case Instruction::SIToFP:
430 case Instruction::FPTrunc:
431 case Instruction::FPExt:
432 case Instruction::PtrToInt:
433 case Instruction::IntToPtr:
434 case Instruction::BitCast:
437 case Instruction::InsertElement:
438 case Instruction::ShuffleVector:
439 case Instruction::InsertValue:
440 case Instruction::GetElementPtr:
441 exp = create_expression(I);
443 case Instruction::ExtractValue:
444 exp = create_extractvalue_expression(cast<ExtractValueInst>(I));
447 valueNumbering[V] = nextValueNumber;
448 return nextValueNumber++;
451 uint32_t& e = expressionNumbering[
exp];
452 if (!e) e = nextValueNumber++;
453 valueNumbering[V] = e;
461 assert(VI != valueNumbering.
end() &&
"Value not numbered?");
469 uint32_t ValueTable::lookup_or_add_cmp(
unsigned Opcode,
472 Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS);
473 uint32_t& e = expressionNumbering[
exp];
474 if (!e) e = nextValueNumber++;
479 void ValueTable::clear() {
480 valueNumbering.clear();
481 expressionNumbering.clear();
486 void ValueTable::erase(
Value *V) {
487 valueNumbering.erase(V);
492 void ValueTable::verifyRemoved(
const Value *V)
const {
494 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++
I) {
495 assert(I->first != V &&
"Inst still occurs in value numbering map!");
505 struct AvailableValueInBlock {
523 unsigned Offset = 0) {
524 AvailableValueInBlock Res;
526 Res.Val.setPointer(V);
527 Res.Val.setInt(SimpleVal);
533 unsigned Offset = 0) {
534 AvailableValueInBlock Res;
536 Res.Val.setPointer(MI);
537 Res.Val.setInt(MemIntrin);
543 unsigned Offset = 0) {
544 AvailableValueInBlock Res;
546 Res.Val.setPointer(LI);
547 Res.Val.setInt(LoadVal);
552 static AvailableValueInBlock getUndef(
BasicBlock *BB) {
553 AvailableValueInBlock Res;
555 Res.Val.setPointer(0);
556 Res.Val.setInt(UndefVal);
561 bool isSimpleValue()
const {
return Val.getInt() == SimpleVal; }
562 bool isCoercedLoadValue()
const {
return Val.getInt() == LoadVal; }
563 bool isMemIntrinValue()
const {
return Val.getInt() == MemIntrin; }
564 bool isUndefValue()
const {
return Val.getInt() == UndefVal; }
566 Value *getSimpleValue()
const {
567 assert(isSimpleValue() &&
"Wrong accessor");
568 return Val.getPointer();
571 LoadInst *getCoercedLoadValue()
const {
572 assert(isCoercedLoadValue() &&
"Wrong accessor");
573 return cast<LoadInst>(Val.getPointer());
577 assert(isMemIntrinValue() &&
"Wrong accessor");
578 return cast<MemIntrinsic>(Val.getPointer());
583 Value *MaterializeAdjustedValue(
Type *LoadTy, GVN &gvn)
const;
598 struct LeaderTableEntry {
601 LeaderTableEntry *Next;
614 explicit GVN(
bool noloads =
false)
625 InstrsToErase.push_back(I);
630 AliasAnalysis *getAliasAnalysis()
const {
return VN.getAliasAnalysis(); }
636 LeaderTableEntry &Curr = LeaderTable[
N];
643 LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
646 Node->Next = Curr.Next;
653 LeaderTableEntry* Prev = 0;
654 LeaderTableEntry* Curr = &LeaderTable[
N];
656 while (Curr->Val != I || Curr->BB != BB) {
662 Prev->Next = Curr->Next;
668 LeaderTableEntry* Next = Curr->Next;
669 Curr->Val = Next->Val;
671 Curr->Next = Next->Next;
694 bool processNonLocalLoad(
LoadInst *L);
695 void AnalyzeLoadAvailability(
LoadInst *LI, LoadDepVect &Deps,
696 AvailValInBlkVect &ValuesPerBlock,
697 UnavailBlkVect &UnavailableBlocks);
698 bool PerformLoadPRE(
LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
699 UnavailBlkVect &UnavailableBlocks);
708 void cleanupGlobalSets();
710 bool splitCriticalEdges();
712 unsigned replaceAllDominatedUsesWith(
Value *From,
Value *To,
717 void assignValNumForDeadCode();
725 return new GVN(NoLoads);
735 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
739 E = d.
end(); I != E; ++
I) {
740 errs() << I->first <<
"\n";
759 uint32_t RecurseDepth) {
765 std::pair<DenseMap<BasicBlock*, char>::iterator,
char> IV =
766 FullyAvailableBlocks.
insert(std::make_pair(BB, 2));
772 if (IV.first->second == 2)
773 IV.first->second = 3;
774 return IV.first->second != 0;
782 goto SpeculationFailure;
784 for (; PI != PE; ++PI)
789 goto SpeculationFailure;
797 char &BBVal = FullyAvailableBlocks[BB];
815 char &EntryVal = FullyAvailableBlocks[Entry];
816 if (EntryVal == 0)
continue;
823 }
while (!BBWorklist.
empty());
869 if (StoreSize == LoadSize) {
873 return new BitCastInst(StoredVal, LoadedTy,
"", InsertPt);
878 StoredVal =
new PtrToIntInst(StoredVal, StoredValTy,
"", InsertPt);
881 Type *TypeToCastTo = LoadedTy;
885 if (StoredValTy != TypeToCastTo)
886 StoredVal =
new BitCastInst(StoredVal, TypeToCastTo,
"", InsertPt);
890 StoredVal =
new IntToPtrInst(StoredVal, LoadedTy,
"", InsertPt);
898 assert(StoreSize >= LoadSize &&
"CanCoerceMustAliasedValueToLoad fail");
903 StoredVal =
new PtrToIntInst(StoredVal, StoredValTy,
"", InsertPt);
909 StoredVal =
new BitCastInst(StoredVal, StoredValTy,
"", InsertPt);
916 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val,
"tmp", InsertPt);
921 StoredVal =
new TruncInst(StoredVal, NewIntTy,
"trunc", InsertPt);
923 if (LoadedTy == NewIntTy)
928 return new IntToPtrInst(StoredVal, LoadedTy,
"inttoptr", InsertPt);
931 return new BitCastInst(StoredVal, LoadedTy,
"bitcast", InsertPt);
944 uint64_t WriteSizeInBits,
951 int64_t StoreOffset = 0, LoadOffset = 0;
954 if (StoreBase != LoadBase)
962 if (LoadOffset == StoreOffset) {
963 dbgs() <<
"STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
964 <<
"Base = " << *StoreBase <<
"\n"
965 <<
"Store Ptr = " << *WritePtr <<
"\n"
966 <<
"Store Offs = " << StoreOffset <<
"\n"
967 <<
"Load Ptr = " << *LoadPtr <<
"\n";
977 if ((WriteSizeInBits & 7) | (LoadSize & 7))
979 uint64_t StoreSize = WriteSizeInBits >> 3;
983 bool isAAFailure =
false;
984 if (StoreOffset < LoadOffset)
985 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
987 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
991 dbgs() <<
"STORE LOAD DEP WITH COMMON BASE:\n"
992 <<
"Base = " << *StoreBase <<
"\n"
993 <<
"Store Ptr = " << *WritePtr <<
"\n"
994 <<
"Store Offs = " << StoreOffset <<
"\n"
995 <<
"Load Ptr = " << *LoadPtr <<
"\n";
1005 if (StoreOffset > LoadOffset ||
1006 StoreOffset+StoreSize < LoadOffset+LoadSize)
1011 return LoadOffset-StoreOffset;
1027 StorePtr, StoreSize, TD);
1042 if (R != -1)
return R;
1046 int64_t LoadOffs = 0;
1047 const Value *LoadBase =
1053 if (Size == 0)
return -1;
1065 if (SizeCst == 0)
return -1;
1080 if (Src == 0)
return -1;
1091 unsigned AS = Src->getType()->getPointerAddressSpace();
1124 SrcVal = Builder.CreatePtrToInt(SrcVal,
1127 SrcVal = Builder.CreateBitCast(SrcVal,
IntegerType::get(Ctx, StoreSize*8));
1132 ShiftAmt = Offset*8;
1134 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1137 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt);
1139 if (LoadSize != StoreSize)
1158 if (Offset+LoadSize > SrcValSize) {
1159 assert(SrcVal->
isSimple() &&
"Cannot widen volatile/atomic load!");
1163 unsigned NewLoadSize = Offset+LoadSize;
1177 Builder.SetCurrentDebugLocation(SrcVal->
getDebugLoc());
1178 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
1179 LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
1183 DEBUG(
dbgs() <<
"GVN WIDENED LOAD: " << *SrcVal <<
"\n");
1184 DEBUG(
dbgs() <<
"TO: " << *NewLoad <<
"\n");
1188 Value *RV = NewLoad;
1190 RV = Builder.CreateLShr(RV,
1192 RV = Builder.CreateTrunc(RV, SrcVal->
getType());
1200 gvn.getMemDep().removeInstruction(SrcVal);
1220 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1223 Value *Val = MSI->getValue();
1227 Value *OneElt = Val;
1230 for (
unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1232 if (NumBytesSet*2 <= LoadSize) {
1233 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1234 Val = Builder.CreateOr(Val, ShVal);
1240 Value *ShVal = Builder.CreateShl(Val, 1*8);
1241 Val = Builder.CreateOr(OneElt, ShVal);
1273 if (ValuesPerBlock.
size() == 1 &&
1274 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
1276 assert(!ValuesPerBlock[0].isUndefValue() &&
"Dead BB dominate this block");
1277 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->
getType(), gvn);
1287 for (
unsigned i = 0, e = ValuesPerBlock.
size(); i != e; ++i) {
1288 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1304 for (
unsigned i = 0, e = NewPHIs.
size(); i != e; ++i)
1310 for (
unsigned i = 0, e = NewPHIs.
size(); i != e; ++i) {
1322 Value *AvailableValueInBlock::MaterializeAdjustedValue(
Type *LoadTy, GVN &gvn)
const {
1324 if (isSimpleValue()) {
1325 Res = getSimpleValue();
1326 if (Res->
getType() != LoadTy) {
1328 assert(TD &&
"Need target data to handle type mismatch case");
1332 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL VAL:\nOffset: " << Offset <<
" "
1333 << *getSimpleValue() <<
'\n'
1334 << *Res <<
'\n' <<
"\n\n\n");
1336 }
else if (isCoercedLoadValue()) {
1338 if (Load->
getType() == LoadTy && Offset == 0) {
1344 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset <<
" "
1345 << *getCoercedLoadValue() <<
'\n'
1346 << *Res <<
'\n' <<
"\n\n\n");
1348 }
else if (isMemIntrinValue()) {
1350 assert(TD &&
"Need target data to handle type mismatch case");
1353 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1354 <<
" " << *getMemIntrinValue() <<
'\n'
1355 << *Res <<
'\n' <<
"\n\n\n");
1357 assert(isUndefValue() &&
"Should be UndefVal");
1358 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL Undef:\n";);
1365 if (
const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1370 void GVN::AnalyzeLoadAvailability(
LoadInst *LI, LoadDepVect &Deps,
1371 AvailValInBlkVect &ValuesPerBlock,
1372 UnavailBlkVect &UnavailableBlocks) {
1378 unsigned NumDeps = Deps.size();
1379 for (
unsigned i = 0, e = NumDeps; i != e; ++i) {
1383 if (DeadBlocks.count(DepBB)) {
1386 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1391 UnavailableBlocks.push_back(DepBB);
1399 Value *Address = Deps[i].getAddress();
1405 if (TD && Address) {
1409 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1410 DepSI->getValueOperand(),
1424 if (DepLI != LI && Address && TD) {
1430 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
1440 if (TD && Address) {
1444 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1451 UnavailableBlocks.push_back(DepBB);
1463 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1468 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1471 if (S->getValueOperand()->getType() != LI->
getType()) {
1476 UnavailableBlocks.push_back(DepBB);
1481 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1482 S->getValueOperand()));
1486 if (
LoadInst *
LD = dyn_cast<LoadInst>(DepInst)) {
1492 UnavailableBlocks.push_back(DepBB);
1496 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,
LD));
1500 UnavailableBlocks.push_back(DepBB);
1504 bool GVN::PerformLoadPRE(
LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1505 UnavailBlkVect &UnavailableBlocks) {
1515 for (
unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1516 Blockers.
insert(UnavailableBlocks[i]);
1525 if (TmpBB == LoadBB)
1527 if (Blockers.
count(TmpBB))
1546 for (
unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1547 FullyAvailableBlocks[ValuesPerBlock[i].BB] =
true;
1548 for (
unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1549 FullyAvailableBlocks[UnavailableBlocks[i]] =
false;
1558 PredLoads[Pred] = 0;
1562 DEBUG(
dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1563 << Pred->
getName() <<
"': " << *LI <<
'\n');
1569 <<
"COULD NOT PRE LOAD BECAUSE OF LANDING PAD CRITICAL EDGE '"
1570 << Pred->
getName() <<
"': " << *LI <<
'\n');
1579 unsigned NumUnavailablePreds = PredLoads.
size();
1580 assert(NumUnavailablePreds != 0 &&
1581 "Fully available value should already be eliminated!");
1587 if (NumUnavailablePreds != 1)
1592 E = CriticalEdgePred.
end(); I != E; I++) {
1594 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1595 PredLoads.
erase(OrigPred);
1596 PredLoads[NewPred] = 0;
1598 << LoadBB->
getName() <<
'\n');
1602 bool CanDoPRE =
true;
1605 E = PredLoads.
end(); I != E; ++
I) {
1616 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1622 DEBUG(
dbgs() <<
"COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1628 I->second = LoadPtr;
1632 while (!NewInsts.
empty()) {
1634 if (MD) MD->removeInstruction(I);
1639 return !CriticalEdgePred.
empty();
1645 DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *LI <<
'\n');
1647 dbgs() <<
"INSERTED " << NewInsts.
size() <<
" INSTS: "
1648 << *NewInsts.
back() <<
'\n');
1651 for (
unsigned i = 0, e = NewInsts.
size(); i != e; ++i) {
1656 VN.lookup_or_add(NewInsts[i]);
1660 E = PredLoads.
end(); I != E; ++
I) {
1662 Value *LoadPtr = I->second;
1676 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1678 MD->invalidateCachedPointerInfo(LoadPtr);
1679 DEBUG(
dbgs() <<
"GVN INSERTED " << *NewLoad <<
'\n');
1685 if (isa<PHINode>(V))
1688 MD->invalidateCachedPointerInfo(V);
1689 markInstructionForDeletion(LI);
1696 bool GVN::processNonLocalLoad(
LoadInst *LI) {
1700 MD->getNonLocalPointerDependency(Loc,
true, LI->
getParent(), Deps);
1705 unsigned NumDeps = Deps.size();
1712 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1714 dbgs() <<
"GVN: non-local load ";
1716 dbgs() <<
" has unknown dependencies\n";
1722 AvailValInBlkVect ValuesPerBlock;
1723 UnavailBlkVect UnavailableBlocks;
1724 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1728 if (ValuesPerBlock.empty())
1736 if (UnavailableBlocks.empty()) {
1737 DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *LI <<
'\n');
1743 if (isa<PHINode>(V))
1746 MD->invalidateCachedPointerInfo(V);
1747 markInstructionForDeletion(LI);
1756 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1765 if (Op && ReplOp && isa<OverflowingBinaryOperator>(Op) &&
1766 isa<OverflowingBinaryOperator>(ReplOp)) {
1770 ReplOp->setHasNoUnsignedWrap(
false);
1772 if (
Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
1774 ReplInst->getAllMetadataOtherThanDebugLoc(Metadata);
1775 for (
int i = 0, n = Metadata.
size(); i < n; ++i) {
1776 unsigned Kind = Metadata[i].first;
1778 MDNode *ReplMD = Metadata[i].second;
1781 ReplInst->setMetadata(Kind, NULL);
1809 bool GVN::processLoad(
LoadInst *L) {
1817 markInstructionForDeletion(L);
1837 Value *AvailVal = 0;
1876 << *AvailVal <<
'\n' << *L <<
"\n\n\n");
1881 MD->invalidateCachedPointerInfo(AvailVal);
1882 markInstructionForDeletion(L);
1892 dbgs() <<
"GVN: load ";
1895 dbgs() <<
" is clobbered by " << *I <<
'\n';
1902 return processNonLocalLoad(L);
1907 dbgs() <<
"GVN: load ";
1909 dbgs() <<
" has unknown dependence\n";
1915 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1916 Value *StoredVal = DepSI->getValueOperand();
1928 DEBUG(
dbgs() <<
"GVN COERCED STORE:\n" << *DepSI <<
'\n' << *StoredVal
1929 <<
'\n' << *L <<
"\n\n\n");
1938 MD->invalidateCachedPointerInfo(StoredVal);
1939 markInstructionForDeletion(L);
1944 if (
LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1945 Value *AvailableVal = DepLI;
1950 if (DepLI->getType() != L->
getType()) {
1954 if (AvailableVal == 0)
1957 DEBUG(
dbgs() <<
"GVN COERCED LOAD:\n" << *DepLI <<
"\n" << *AvailableVal
1958 <<
"\n" << *L <<
"\n\n\n");
1966 if (DepLI->getType()->getScalarType()->isPointerTy())
1967 MD->invalidateCachedPointerInfo(DepLI);
1968 markInstructionForDeletion(L);
1978 markInstructionForDeletion(L);
1988 markInstructionForDeletion(L);
2003 LeaderTableEntry Vals = LeaderTable[num];
2004 if (!Vals.Val)
return 0;
2007 if (DT->dominates(Vals.BB, BB)) {
2009 if (isa<Constant>(Val))
return Val;
2012 LeaderTableEntry* Next = Vals.Next;
2014 if (DT->dominates(Next->BB, BB)) {
2015 if (isa<Constant>(Next->Val))
return Next->Val;
2016 if (!Val) Val = Next->Val;
2028 unsigned GVN::replaceAllDominatedUsesWith(
Value *From,
Value *To,
2033 Use &U = (UI++).getUse();
2035 if (DT->dominates(Root, U)) {
2055 assert((!Pred || Pred == Src) &&
"No edge between these basic blocks!");
2063 bool GVN::propagateEquality(
Value *LHS,
Value *RHS,
2066 Worklist.
push_back(std::make_pair(LHS, RHS));
2067 bool Changed =
false;
2072 while (!Worklist.
empty()) {
2073 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2074 LHS = Item.first; RHS = Item.second;
2076 if (LHS == RHS)
continue;
2077 assert(LHS->
getType() == RHS->
getType() &&
"Equality but unequal types!");
2080 if (isa<Constant>(LHS) && isa<Constant>(RHS))
continue;
2083 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2085 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2091 uint32_t LVN = VN.lookup_or_add(LHS);
2092 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2093 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2096 uint32_t RVN = VN.lookup_or_add(RHS);
2112 if (RootDominatesEnd && !isa<Instruction>(RHS))
2113 addToLeaderTable(LVN, RHS, Root.
getEnd());
2119 unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root);
2120 Changed |= NumReplacements > 0;
2121 NumGVNEqProp += NumReplacements;
2137 bool isKnownFalse = !isKnownTrue;
2144 Worklist.
push_back(std::make_pair(A, RHS));
2145 Worklist.
push_back(std::make_pair(B, RHS));
2152 if (
ICmpInst *Cmp = dyn_cast<ICmpInst>(LHS)) {
2153 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2159 Worklist.
push_back(std::make_pair(Op0, Op1));
2167 uint32_t NextNum = VN.getNextUnusedValueNumber();
2168 uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2171 if (Num < NextNum) {
2173 if (NotCmp && isa<Instruction>(NotCmp)) {
2174 unsigned NumReplacements =
2175 replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
2176 Changed |= NumReplacements > 0;
2177 NumGVNEqProp += NumReplacements;
2184 if (RootDominatesEnd)
2185 addToLeaderTable(Num, NotVal, Root.
getEnd());
2198 if (isa<DbgInfoIntrinsic>(I))
2208 MD->invalidateCachedPointerInfo(V);
2209 markInstructionForDeletion(I);
2214 if (
LoadInst *LI = dyn_cast<LoadInst>(I)) {
2215 if (processLoad(LI))
2218 unsigned Num = VN.lookup_or_add(LI);
2219 addToLeaderTable(Num, LI, LI->
getParent());
2225 if (
BranchInst *BI = dyn_cast<BranchInst>(I)) {
2226 if (!BI->isConditional())
2229 if (isa<Constant>(BI->getCondition()))
2230 return processFoldableCondBr(BI);
2232 Value *BranchCond = BI->getCondition();
2236 if (TrueSucc == FalseSucc)
2240 bool Changed =
false;
2244 Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
2248 Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
2254 if (
SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2255 Value *SwitchCond = SI->getCondition();
2257 bool Changed =
false;
2261 for (
unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2262 ++SwitchEdges[SI->getSuccessor(i)];
2268 if (SwitchEdges.
lookup(Dst) == 1) {
2270 Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
2280 uint32_t NextNum = VN.getNextUnusedValueNumber();
2281 unsigned Num = VN.lookup_or_add(I);
2285 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
2286 addToLeaderTable(Num, I, I->
getParent());
2293 if (Num >= NextNum) {
2294 addToLeaderTable(Num, I, I->
getParent());
2303 addToLeaderTable(Num, I, I->
getParent());
2310 MD->invalidateCachedPointerInfo(repl);
2311 markInstructionForDeletion(I);
2318 MD = &getAnalysis<MemoryDependenceAnalysis>();
2319 DT = &getAnalysis<DominatorTree>();
2320 TD = getAnalysisIfAvailable<DataLayout>();
2321 TLI = &getAnalysis<TargetLibraryInfo>();
2322 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2326 bool Changed =
false;
2327 bool ShouldContinue =
true;
2335 if (removedBlock) ++NumGVNBlocks;
2337 Changed |= removedBlock;
2340 unsigned Iteration = 0;
2341 while (ShouldContinue) {
2342 DEBUG(
dbgs() <<
"GVN iteration: " << Iteration <<
"\n");
2343 ShouldContinue = iterateOnFunction(F);
2344 Changed |= ShouldContinue;
2351 assignValNumForDeadCode();
2352 bool PREChanged =
true;
2353 while (PREChanged) {
2354 PREChanged = performPRE(F);
2355 Changed |= PREChanged;
2364 cleanupGlobalSets();
2376 assert(InstrsToErase.empty() &&
2377 "We expect InstrsToErase to be empty across iterations");
2378 if (DeadBlocks.count(BB))
2381 bool ChangedFunction =
false;
2385 ChangedFunction |= processInstruction(BI);
2386 if (InstrsToErase.empty()) {
2392 NumGVNInstr += InstrsToErase.size();
2395 bool AtStart = BI == BB->
begin();
2400 E = InstrsToErase.end(); I != E; ++
I) {
2401 DEBUG(
dbgs() <<
"GVN removed: " << **I <<
'\n');
2402 if (MD) MD->removeInstruction(*I);
2403 DEBUG(verifyRemoved(*I));
2404 (*I)->eraseFromParent();
2406 InstrsToErase.clear();
2414 return ChangedFunction;
2419 bool GVN::performPRE(
Function &F) {
2420 bool Changed =
false;
2433 BE = CurrentBlock->
end(); BI != BE; ) {
2436 if (isa<AllocaInst>(CurInst) ||
2437 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2440 isa<DbgInfoIntrinsic>(CurInst))
2447 if (isa<CmpInst>(CurInst))
2451 if (
CallInst *CallI = dyn_cast<CallInst>(CurInst))
2452 if (CallI->isInlineAsm())
2455 uint32_t ValNo = VN.lookup(CurInst);
2463 unsigned NumWith = 0;
2464 unsigned NumWithout = 0;
2469 PE =
pred_end(CurrentBlock); PI != PE; ++PI) {
2474 if (P == CurrentBlock) {
2477 }
else if (!DT->isReachableFromEntry(P)) {
2482 Value* predV = findLeader(P, ValNo);
2484 predMap.
push_back(std::make_pair(static_cast<Value *>(0), P));
2487 }
else if (predV == CurInst) {
2492 predMap.
push_back(std::make_pair(predV, P));
2499 if (NumWithout != 1 || NumWith == 0)
2511 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
2521 bool success =
true;
2522 for (
unsigned i = 0, e = CurInst->
getNumOperands(); i != e; ++i) {
2524 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2527 if (
Value *V = findLeader(PREPred, VN.lookup(Op))) {
2539 DEBUG(verifyRemoved(PREInstr));
2547 VN.add(PREInstr, ValNo);
2551 addToLeaderTable(ValNo, PREInstr, PREPred);
2555 CurInst->
getName() +
".pre-phi",
2556 CurrentBlock->
begin());
2557 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
2558 if (
Value *V = predMap[i].first)
2565 addToLeaderTable(ValNo, Phi, CurrentBlock);
2575 VN.getAliasAnalysis()->addEscapingUse(Phi->
getOperandUse(jj));
2579 MD->invalidateCachedPointerInfo(Phi);
2582 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2584 DEBUG(
dbgs() <<
"GVN PRE removed: " << *CurInst <<
'\n');
2585 if (MD) MD->removeInstruction(CurInst);
2586 DEBUG(verifyRemoved(CurInst));
2592 if (splitCriticalEdges())
2603 MD->invalidateCachedPredecessors();
2609 bool GVN::splitCriticalEdges() {
2610 if (toSplit.empty())
2613 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2615 }
while (!toSplit.empty());
2616 if (MD) MD->invalidateCachedPredecessors();
2621 bool GVN::iterateOnFunction(
Function &F) {
2622 cleanupGlobalSets();
2625 bool Changed =
false;
2630 RE = RPOT.end(); RI != RE; ++RI)
2631 Changed |= processBlock(*RI);
2636 std::vector<BasicBlock *> BBVect;
2637 BBVect.reserve(256);
2639 DE =
df_end(DT->getRootNode()); DI !=
DE; ++DI)
2640 BBVect.push_back(DI->getBlock());
2642 for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end();
2644 Changed |= processBlock(*I);
2650 void GVN::cleanupGlobalSets() {
2652 LeaderTable.clear();
2653 TableAllocator.Reset();
2658 void GVN::verifyRemoved(
const Instruction *Inst)
const {
2659 VN.verifyRemoved(Inst);
2664 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++
I) {
2665 const LeaderTableEntry *Node = &I->second;
2666 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2668 while (Node->Next) {
2670 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2685 while (!NewDead.
empty()) {
2687 if (DeadBlocks.count(D))
2692 DT->getDescendants(D, Dom);
2697 E = Dom.
end(); I != E; I++) {
2701 if (DeadBlocks.count(S))
2704 bool AllPredDead =
true;
2706 if (!DeadBlocks.count(*PI)) {
2707 AllPredDead =
false;
2730 if (DeadBlocks.count(B))
2735 PE = Preds.end(); PI != PE; PI++) {
2738 if (!DeadBlocks.count(P))
2742 if (
BasicBlock *S = splitCriticalEdges(P, B))
2743 DeadBlocks.insert(P = S);
2747 PHINode &Phi = cast<PHINode>(*II);
2768 bool GVN::processFoldableCondBr(
BranchInst *BI) {
2778 if (DeadBlocks.count(DeadRoot))
2782 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
2784 addDeadBlock(DeadRoot);
2792 void GVN::assignValNumForDeadCode() {
2794 E = DeadBlocks.end(); I != E; I++) {
2799 unsigned ValNum = VN.lookup_or_add(Inst);
2800 addToLeaderTable(ValNum, Inst, BB);
static Value * GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &TD)
const Use & getOperandUse(unsigned i) const
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Value * getValueOperand()
FunctionPass * createGVNPass(bool NoLoads=false)
static cl::opt< bool > EnableLoadPRE("enable-load-pre", cl::init(true))
void push_back(const T &Elt)
static ConstantInt * getFalse(LLVMContext &Context)
class_match< Value > m_Value()
m_Value() - Match an arbitrary value and ignore it.
void setHasNoSignedWrap(bool b=true)
Abstract base class of comparison instructions.
AnalysisUsage & addPreserved()
Helper class for SSA formation on a set of values defined in multiple blocks.
void addIncoming(Value *V, BasicBlock *BB)
static PassRegistry * getPassRegistry()
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Intrinsic::ID getIntrinsicID() const
BasicBlock * SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, Pass *P=0, bool MergeIdenticalEdges=false, bool DontDeleteUselessPHIs=false, bool SplitLandingPads=false)
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
const BasicBlock * getStart() const
unsigned getNumOperands() const
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value...
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout *TD)
static Constant * getGetElementPtr(Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false)
static bool isEqual(const Expression &LHS, const Expression &RHS)
static PointerType * get(Type *ElementType, unsigned AddressSpace)
bool mayHaveSideEffects() const
iterator insert(iterator I, const T &Elt)
STATISTIC(NumGVNInstr,"Number of instructions deleted")
static Value * GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &TD)
const Function * getParent() const
Return the enclosing method, or null if none.
MDNode - a tuple of other values.
unsigned GetSuccessorNumber(BasicBlock *BB, BasicBlock *Succ)
static IntegerType * getInt64Ty(LLVMContext &C)
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &TD)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Expression getEmptyKey()
static Value * CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, Instruction *InsertPt, const DataLayout &TD)
iterator end()
Get an iterator to the end of the SetVector.
void setDebugLoc(const DebugLoc &Loc)
setDebugLoc - Set the debug location information for this instruction.
LoopInfoBase< BlockT, LoopT > * LI
StringRef getName() const
void WriteAsOperand(raw_ostream &, const Value *, bool PrintTy=true, const Module *Context=0)
static unsigned getHashValue(const Expression e)
static unsigned getOperandNumForIncomingValue(unsigned i)
bool match(Val *V, const Pattern &P)
Value * GetUnderlyingObject(Value *V, const DataLayout *TD=0, unsigned MaxLookup=6)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
bool isUnconditional() const
static cl::opt< uint32_t > MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, cl::desc("Max recurse depth (default = 1000)"))
void dump() const
dump - Support for debugging, callable in GDB: V->dump()
static cl::opt< bool > EnablePRE("enable-pre", cl::init(true), cl::Hidden)
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
#define llvm_unreachable(msg)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
unsigned getNumArgOperands() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P=0)
void setName(const Twine &Name)
static ConstantInt * ExtractElement(Constant *V, Constant *Idx)
ID
LLVM Calling Convention Representation.
Instruction * clone() const
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI, const DataLayout &TD)
This class represents a cast from a pointer to an integer.
Interval::succ_iterator succ_begin(Interval *I)
uint64_t getZExtValue() const
Return the zero extended value.
virtual void copyValue(Value *From, Value *To)
static void patchReplacementInstruction(Instruction *I, Value *Repl)
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool mayReadFromMemory() const
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
bool count(PtrType Ptr) const
count - Return true if the specified pointer is in the set.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
static bool add(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len)
General addition of 64-bit integer arrays.
BasicBlock * getSuccessor(unsigned i) const
iterator begin()
Get an iterator to the beginning of the SetVector.
This class represents a no-op cast from one type to another.
virtual void addEscapingUse(Use &U)
hash_code hash_value(const APFloat &Arg)
bool isLittleEndian() const
Layout endianness...
void replaceAllUsesWith(Value *V)
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &TD)
This class represents a truncation of integer types.
static Value * GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn)
unsigned getNumIncomingValues() const
Interval::succ_iterator succ_end(Interval *I)
unsigned getNumSuccessors() const
static MDNode * getMostGenericRange(MDNode *A, MDNode *B)
initializer< Ty > init(const Ty &Val)
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block...
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
void insertBefore(Instruction *InsertPos)
LLVM Basic Block Representation.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
df_iterator< T > df_end(const T &G)
static Value * ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVN &gvn)
LLVM Constant Representation.
hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, const T4 &arg4, const T5 &arg5, const T6 &arg6)
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Interval::pred_iterator pred_begin(Interval *I)
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=0)
BasicBlock * getBB() const
Represent an integer comparison operator.
Value * getOperand(unsigned i) const
Interval::pred_iterator pred_end(Interval *I)
Value * getPointerOperand()
bool isCommutative() const
const MemDepResult & getResult() const
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD=0)
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &TD)
Location - A description of a memory location.
void setAlignment(unsigned Align)
This class represents a cast from an integer to a pointer.
#define INITIALIZE_AG_DEPENDENCY(depName)
static MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
Methods for metadata merging.
static UndefValue * get(Type *T)
uint64_t NextPowerOf2(uint64_t A)
Value * SimplifyInstruction(Instruction *I, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool hasNoSignedWrap() const
hasNoSignedWrap - Determine whether the no signed wrap flag is set.
void setMetadata(unsigned KindID, MDNode *Node)
std::vector< NodeType * >::reverse_iterator rpo_iterator
const BasicBlock * getEnd() const
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static IntegerType * get(LLVMContext &C, unsigned NumBits)
Get or create an IntegerType instance.
static Constant * getBitCast(Constant *C, Type *Ty)
A SetVector that performs no allocations if smaller than a certain size.
Class for constant integers.
MDNode * getMetadata(unsigned KindID) const
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
bool erase(const KeyT &Val)
Value * getLength() const
std::vector< NonLocalDepEntry > NonLocalDepInfo
Predicate getSwappedPredicate() const
Return the predicate as if the operands were swapped.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
const BasicBlock & getEntryBlock() const
static ConstantInt * getTrue(LLVMContext &Context)
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
bool isAllOnesValue() const
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
df_iterator< T > df_begin(const T &G)
Value * getArgOperand(unsigned i) const
BasicBlock * getSinglePredecessor()
Return this block if it has a single predecessor block. Otherwise return a null pointer.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
An opaque object representing a hash code.
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Value * getSource() const
static bool isLifetimeStart(const Instruction *Inst)
static const uint16_t * lookup(unsigned opcode, unsigned domain)
Instruction * getInst() const
Value * getCondition() const
unsigned getAlignment() const
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isLandingPad() const
Return true if this basic block is a landing pad.
const Type * getScalarType() const
unsigned getPrimitiveSizeInBits() const
uint64_t getTypeStoreSize(Type *Ty) const
static Expression getTombstoneKey()
LLVMContext & getContext() const
Get the context in which this basic block lives.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, char > &FullyAvailableBlocks, uint32_t RecurseDepth)
LLVM Value Representation.
bool hasNoUnsignedWrap() const
hasNoUnsignedWrap - Determine whether the no unsigned wrap flag is set.
ValueT lookup(const KeyT &Val) const
vector_type::const_iterator iterator
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
A vector that has set insertion semantics.
uint64_t getTypeSizeInBits(Type *Ty) const
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, const DataLayout &TD)
bool isPowerOf2_32(uint32_t Value)
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
bool operator==(uint64_t V1, const APInt &V2)
void setIncomingValue(unsigned i, Value *V)
static MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
iterator find(const KeyT &Val)
Value * getPointerOperand()
int getBasicBlockIndex(const BasicBlock *BB) const
const BasicBlock * getParent() const
InstListType::iterator iterator
Instruction iterators...
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &TD)
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
bool isVoidTy() const
isVoidTy - Return true if this is 'void'.
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
void initializeGVNPass(PassRegistry &)