LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InlineCost.cpp
Go to the documentation of this file.
1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inline cost analysis.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "inline-cost"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/GlobalAlias.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/InstVisitor.h"
30 #include "llvm/Support/CallSite.h"
31 #include "llvm/Support/Debug.h"
34 
35 using namespace llvm;
36 
37 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
38 
39 namespace {
40 
41 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
43  friend class InstVisitor<CallAnalyzer, bool>;
44 
45  // DataLayout if available, or null.
46  const DataLayout *const TD;
47 
48  /// The TargetTransformInfo available for this compilation.
49  const TargetTransformInfo &TTI;
50 
51  // The called function.
52  Function &F;
53 
54  int Threshold;
55  int Cost;
56 
57  bool IsCallerRecursive;
58  bool IsRecursiveCall;
59  bool ExposesReturnsTwice;
60  bool HasDynamicAlloca;
61  bool ContainsNoDuplicateCall;
62 
63  /// Number of bytes allocated statically by the callee.
64  uint64_t AllocatedSize;
65  unsigned NumInstructions, NumVectorInstructions;
66  int FiftyPercentVectorBonus, TenPercentVectorBonus;
67  int VectorBonus;
68 
69  // While we walk the potentially-inlined instructions, we build up and
70  // maintain a mapping of simplified values specific to this callsite. The
71  // idea is to propagate any special information we have about arguments to
72  // this call through the inlinable section of the function, and account for
73  // likely simplifications post-inlining. The most important aspect we track
74  // is CFG altering simplifications -- when we prove a basic block dead, that
75  // can cause dramatic shifts in the cost of inlining a function.
76  DenseMap<Value *, Constant *> SimplifiedValues;
77 
78  // Keep track of the values which map back (through function arguments) to
79  // allocas on the caller stack which could be simplified through SROA.
80  DenseMap<Value *, Value *> SROAArgValues;
81 
82  // The mapping of caller Alloca values to their accumulated cost savings. If
83  // we have to disable SROA for one of the allocas, this tells us how much
84  // cost must be added.
85  DenseMap<Value *, int> SROAArgCosts;
86 
87  // Keep track of values which map to a pointer base and constant offset.
89 
90  // Custom simplification helper routines.
91  bool isAllocaDerivedArg(Value *V);
92  bool lookupSROAArgAndCost(Value *V, Value *&Arg,
94  void disableSROA(DenseMap<Value *, int>::iterator CostIt);
95  void disableSROA(Value *V);
96  void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
97  int InstructionCost);
98  bool handleSROACandidate(bool IsSROAValid,
100  int InstructionCost);
101  bool isGEPOffsetConstant(GetElementPtrInst &GEP);
102  bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
103  bool simplifyCallSite(Function *F, CallSite CS);
104  ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
105 
106  // Custom analysis routines.
107  bool analyzeBlock(BasicBlock *BB);
108 
109  // Disable several entry points to the visitor so we don't accidentally use
110  // them by declaring but not defining them here.
111  void visit(Module *); void visit(Module &);
112  void visit(Function *); void visit(Function &);
113  void visit(BasicBlock *); void visit(BasicBlock &);
114 
115  // Provide base case for our instruction visit.
116  bool visitInstruction(Instruction &I);
117 
118  // Our visit overrides.
119  bool visitAlloca(AllocaInst &I);
120  bool visitPHI(PHINode &I);
121  bool visitGetElementPtr(GetElementPtrInst &I);
122  bool visitBitCast(BitCastInst &I);
123  bool visitPtrToInt(PtrToIntInst &I);
124  bool visitIntToPtr(IntToPtrInst &I);
125  bool visitCastInst(CastInst &I);
126  bool visitUnaryInstruction(UnaryInstruction &I);
127  bool visitCmpInst(CmpInst &I);
128  bool visitSub(BinaryOperator &I);
129  bool visitBinaryOperator(BinaryOperator &I);
130  bool visitLoad(LoadInst &I);
131  bool visitStore(StoreInst &I);
132  bool visitExtractValue(ExtractValueInst &I);
133  bool visitInsertValue(InsertValueInst &I);
134  bool visitCallSite(CallSite CS);
135 
136 public:
137  CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI,
138  Function &Callee, int Threshold)
139  : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
140  IsCallerRecursive(false), IsRecursiveCall(false),
141  ExposesReturnsTwice(false), HasDynamicAlloca(false),
142  ContainsNoDuplicateCall(false), AllocatedSize(0), NumInstructions(0),
143  NumVectorInstructions(0), FiftyPercentVectorBonus(0),
144  TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
145  NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
146  NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
147  SROACostSavings(0), SROACostSavingsLost(0) {}
148 
149  bool analyzeCall(CallSite CS);
150 
151  int getThreshold() { return Threshold; }
152  int getCost() { return Cost; }
153 
154  // Keep a bunch of stats about the cost savings found so we can print them
155  // out when debugging.
156  unsigned NumConstantArgs;
157  unsigned NumConstantOffsetPtrArgs;
158  unsigned NumAllocaArgs;
159  unsigned NumConstantPtrCmps;
160  unsigned NumConstantPtrDiffs;
161  unsigned NumInstructionsSimplified;
162  unsigned SROACostSavings;
163  unsigned SROACostSavingsLost;
164 
165  void dump();
166 };
167 
168 } // namespace
169 
170 /// \brief Test whether the given value is an Alloca-derived function argument.
171 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
172  return SROAArgValues.count(V);
173 }
174 
175 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
176 /// Returns false if V does not map to a SROA-candidate.
177 bool CallAnalyzer::lookupSROAArgAndCost(
178  Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
179  if (SROAArgValues.empty() || SROAArgCosts.empty())
180  return false;
181 
182  DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
183  if (ArgIt == SROAArgValues.end())
184  return false;
185 
186  Arg = ArgIt->second;
187  CostIt = SROAArgCosts.find(Arg);
188  return CostIt != SROAArgCosts.end();
189 }
190 
191 /// \brief Disable SROA for the candidate marked by this cost iterator.
192 ///
193 /// This marks the candidate as no longer viable for SROA, and adds the cost
194 /// savings associated with it back into the inline cost measurement.
195 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
196  // If we're no longer able to perform SROA we need to undo its cost savings
197  // and prevent subsequent analysis.
198  Cost += CostIt->second;
199  SROACostSavings -= CostIt->second;
200  SROACostSavingsLost += CostIt->second;
201  SROAArgCosts.erase(CostIt);
202 }
203 
204 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
205 void CallAnalyzer::disableSROA(Value *V) {
206  Value *SROAArg;
208  if (lookupSROAArgAndCost(V, SROAArg, CostIt))
209  disableSROA(CostIt);
210 }
211 
212 /// \brief Accumulate the given cost for a particular SROA candidate.
213 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
214  int InstructionCost) {
215  CostIt->second += InstructionCost;
216  SROACostSavings += InstructionCost;
217 }
218 
219 /// \brief Helper for the common pattern of handling a SROA candidate.
220 /// Either accumulates the cost savings if the SROA remains valid, or disables
221 /// SROA for the candidate.
222 bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
224  int InstructionCost) {
225  if (IsSROAValid) {
226  accumulateSROACost(CostIt, InstructionCost);
227  return true;
228  }
229 
230  disableSROA(CostIt);
231  return false;
232 }
233 
234 /// \brief Check whether a GEP's indices are all constant.
235 ///
236 /// Respects any simplified values known during the analysis of this callsite.
237 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
238  for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
239  if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
240  return false;
241 
242  return true;
243 }
244 
245 /// \brief Accumulate a constant GEP offset into an APInt if possible.
246 ///
247 /// Returns false if unable to compute the offset for any reason. Respects any
248 /// simplified values known during the analysis of this callsite.
249 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
250  if (!TD)
251  return false;
252 
253  unsigned IntPtrWidth = TD->getPointerSizeInBits();
254  assert(IntPtrWidth == Offset.getBitWidth());
255 
256  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
257  GTI != GTE; ++GTI) {
258  ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
259  if (!OpC)
260  if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
261  OpC = dyn_cast<ConstantInt>(SimpleOp);
262  if (!OpC)
263  return false;
264  if (OpC->isZero()) continue;
265 
266  // Handle a struct index, which adds its field offset to the pointer.
267  if (StructType *STy = dyn_cast<StructType>(*GTI)) {
268  unsigned ElementIdx = OpC->getZExtValue();
269  const StructLayout *SL = TD->getStructLayout(STy);
270  Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
271  continue;
272  }
273 
274  APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
275  Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
276  }
277  return true;
278 }
279 
280 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
281  // FIXME: Check whether inlining will turn a dynamic alloca into a static
282  // alloca, and handle that case.
283 
284  // Accumulate the allocated size.
285  if (I.isStaticAlloca()) {
286  Type *Ty = I.getAllocatedType();
287  AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
288  Ty->getPrimitiveSizeInBits());
289  }
290 
291  // We will happily inline static alloca instructions.
292  if (I.isStaticAlloca())
293  return Base::visitAlloca(I);
294 
295  // FIXME: This is overly conservative. Dynamic allocas are inefficient for
296  // a variety of reasons, and so we would like to not inline them into
297  // functions which don't currently have a dynamic alloca. This simply
298  // disables inlining altogether in the presence of a dynamic alloca.
299  HasDynamicAlloca = true;
300  return false;
301 }
302 
303 bool CallAnalyzer::visitPHI(PHINode &I) {
304  // FIXME: We should potentially be tracking values through phi nodes,
305  // especially when they collapse to a single value due to deleted CFG edges
306  // during inlining.
307 
308  // FIXME: We need to propagate SROA *disabling* through phi nodes, even
309  // though we don't want to propagate it's bonuses. The idea is to disable
310  // SROA if it *might* be used in an inappropriate manner.
311 
312  // Phi nodes are always zero-cost.
313  return true;
314 }
315 
316 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
317  Value *SROAArg;
319  bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
320  SROAArg, CostIt);
321 
322  // Try to fold GEPs of constant-offset call site argument pointers. This
323  // requires target data and inbounds GEPs.
324  if (TD && I.isInBounds()) {
325  // Check if we have a base + offset for the pointer.
326  Value *Ptr = I.getPointerOperand();
327  std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
328  if (BaseAndOffset.first) {
329  // Check if the offset of this GEP is constant, and if so accumulate it
330  // into Offset.
331  if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
332  // Non-constant GEPs aren't folded, and disable SROA.
333  if (SROACandidate)
334  disableSROA(CostIt);
335  return false;
336  }
337 
338  // Add the result as a new mapping to Base + Offset.
339  ConstantOffsetPtrs[&I] = BaseAndOffset;
340 
341  // Also handle SROA candidates here, we already know that the GEP is
342  // all-constant indexed.
343  if (SROACandidate)
344  SROAArgValues[&I] = SROAArg;
345 
346  return true;
347  }
348  }
349 
350  if (isGEPOffsetConstant(I)) {
351  if (SROACandidate)
352  SROAArgValues[&I] = SROAArg;
353 
354  // Constant GEPs are modeled as free.
355  return true;
356  }
357 
358  // Variable GEPs will require math and will disable SROA.
359  if (SROACandidate)
360  disableSROA(CostIt);
361  return false;
362 }
363 
364 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
365  // Propagate constants through bitcasts.
366  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
367  if (!COp)
368  COp = SimplifiedValues.lookup(I.getOperand(0));
369  if (COp)
370  if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
371  SimplifiedValues[&I] = C;
372  return true;
373  }
374 
375  // Track base/offsets through casts
376  std::pair<Value *, APInt> BaseAndOffset
377  = ConstantOffsetPtrs.lookup(I.getOperand(0));
378  // Casts don't change the offset, just wrap it up.
379  if (BaseAndOffset.first)
380  ConstantOffsetPtrs[&I] = BaseAndOffset;
381 
382  // Also look for SROA candidates here.
383  Value *SROAArg;
385  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
386  SROAArgValues[&I] = SROAArg;
387 
388  // Bitcasts are always zero cost.
389  return true;
390 }
391 
392 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
393  // Propagate constants through ptrtoint.
394  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
395  if (!COp)
396  COp = SimplifiedValues.lookup(I.getOperand(0));
397  if (COp)
398  if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
399  SimplifiedValues[&I] = C;
400  return true;
401  }
402 
403  // Track base/offset pairs when converted to a plain integer provided the
404  // integer is large enough to represent the pointer.
405  unsigned IntegerSize = I.getType()->getScalarSizeInBits();
406  if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
407  std::pair<Value *, APInt> BaseAndOffset
408  = ConstantOffsetPtrs.lookup(I.getOperand(0));
409  if (BaseAndOffset.first)
410  ConstantOffsetPtrs[&I] = BaseAndOffset;
411  }
412 
413  // This is really weird. Technically, ptrtoint will disable SROA. However,
414  // unless that ptrtoint is *used* somewhere in the live basic blocks after
415  // inlining, it will be nuked, and SROA should proceed. All of the uses which
416  // would block SROA would also block SROA if applied directly to a pointer,
417  // and so we can just add the integer in here. The only places where SROA is
418  // preserved either cannot fire on an integer, or won't in-and-of themselves
419  // disable SROA (ext) w/o some later use that we would see and disable.
420  Value *SROAArg;
422  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
423  SROAArgValues[&I] = SROAArg;
424 
425  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
426 }
427 
428 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
429  // Propagate constants through ptrtoint.
430  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
431  if (!COp)
432  COp = SimplifiedValues.lookup(I.getOperand(0));
433  if (COp)
434  if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
435  SimplifiedValues[&I] = C;
436  return true;
437  }
438 
439  // Track base/offset pairs when round-tripped through a pointer without
440  // modifications provided the integer is not too large.
441  Value *Op = I.getOperand(0);
442  unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
443  if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
444  std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
445  if (BaseAndOffset.first)
446  ConstantOffsetPtrs[&I] = BaseAndOffset;
447  }
448 
449  // "Propagate" SROA here in the same manner as we do for ptrtoint above.
450  Value *SROAArg;
452  if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
453  SROAArgValues[&I] = SROAArg;
454 
455  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
456 }
457 
458 bool CallAnalyzer::visitCastInst(CastInst &I) {
459  // Propagate constants through ptrtoint.
460  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
461  if (!COp)
462  COp = SimplifiedValues.lookup(I.getOperand(0));
463  if (COp)
464  if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
465  SimplifiedValues[&I] = C;
466  return true;
467  }
468 
469  // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
470  disableSROA(I.getOperand(0));
471 
472  return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
473 }
474 
475 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
476  Value *Operand = I.getOperand(0);
477  Constant *COp = dyn_cast<Constant>(Operand);
478  if (!COp)
479  COp = SimplifiedValues.lookup(Operand);
480  if (COp)
482  COp, TD)) {
483  SimplifiedValues[&I] = C;
484  return true;
485  }
486 
487  // Disable any SROA on the argument to arbitrary unary operators.
488  disableSROA(Operand);
489 
490  return false;
491 }
492 
493 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
494  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
495  // First try to handle simplified comparisons.
496  if (!isa<Constant>(LHS))
497  if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
498  LHS = SimpleLHS;
499  if (!isa<Constant>(RHS))
500  if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
501  RHS = SimpleRHS;
502  if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
503  if (Constant *CRHS = dyn_cast<Constant>(RHS))
504  if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
505  SimplifiedValues[&I] = C;
506  return true;
507  }
508  }
509 
510  if (I.getOpcode() == Instruction::FCmp)
511  return false;
512 
513  // Otherwise look for a comparison between constant offset pointers with
514  // a common base.
515  Value *LHSBase, *RHSBase;
516  APInt LHSOffset, RHSOffset;
517  llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
518  if (LHSBase) {
519  llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
520  if (RHSBase && LHSBase == RHSBase) {
521  // We have common bases, fold the icmp to a constant based on the
522  // offsets.
523  Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
524  Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
525  if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
526  SimplifiedValues[&I] = C;
527  ++NumConstantPtrCmps;
528  return true;
529  }
530  }
531  }
532 
533  // If the comparison is an equality comparison with null, we can simplify it
534  // for any alloca-derived argument.
535  if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
536  if (isAllocaDerivedArg(I.getOperand(0))) {
537  // We can actually predict the result of comparisons between an
538  // alloca-derived value and null. Note that this fires regardless of
539  // SROA firing.
540  bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
541  SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
543  return true;
544  }
545 
546  // Finally check for SROA candidates in comparisons.
547  Value *SROAArg;
549  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
550  if (isa<ConstantPointerNull>(I.getOperand(1))) {
551  accumulateSROACost(CostIt, InlineConstants::InstrCost);
552  return true;
553  }
554 
555  disableSROA(CostIt);
556  }
557 
558  return false;
559 }
560 
561 bool CallAnalyzer::visitSub(BinaryOperator &I) {
562  // Try to handle a special case: we can fold computing the difference of two
563  // constant-related pointers.
564  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
565  Value *LHSBase, *RHSBase;
566  APInt LHSOffset, RHSOffset;
567  llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
568  if (LHSBase) {
569  llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
570  if (RHSBase && LHSBase == RHSBase) {
571  // We have common bases, fold the subtract to a constant based on the
572  // offsets.
573  Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
574  Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
575  if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
576  SimplifiedValues[&I] = C;
577  ++NumConstantPtrDiffs;
578  return true;
579  }
580  }
581  }
582 
583  // Otherwise, fall back to the generic logic for simplifying and handling
584  // instructions.
585  return Base::visitSub(I);
586 }
587 
588 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
589  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
590  if (!isa<Constant>(LHS))
591  if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
592  LHS = SimpleLHS;
593  if (!isa<Constant>(RHS))
594  if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
595  RHS = SimpleRHS;
596  Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
597  if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
598  SimplifiedValues[&I] = C;
599  return true;
600  }
601 
602  // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
603  disableSROA(LHS);
604  disableSROA(RHS);
605 
606  return false;
607 }
608 
609 bool CallAnalyzer::visitLoad(LoadInst &I) {
610  Value *SROAArg;
612  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
613  if (I.isSimple()) {
614  accumulateSROACost(CostIt, InlineConstants::InstrCost);
615  return true;
616  }
617 
618  disableSROA(CostIt);
619  }
620 
621  return false;
622 }
623 
624 bool CallAnalyzer::visitStore(StoreInst &I) {
625  Value *SROAArg;
627  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
628  if (I.isSimple()) {
629  accumulateSROACost(CostIt, InlineConstants::InstrCost);
630  return true;
631  }
632 
633  disableSROA(CostIt);
634  }
635 
636  return false;
637 }
638 
639 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
640  // Constant folding for extract value is trivial.
642  if (!C)
643  C = SimplifiedValues.lookup(I.getAggregateOperand());
644  if (C) {
645  SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
646  return true;
647  }
648 
649  // SROA can look through these but give them a cost.
650  return false;
651 }
652 
653 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
654  // Constant folding for insert value is trivial.
656  if (!AggC)
657  AggC = SimplifiedValues.lookup(I.getAggregateOperand());
659  if (!InsertedC)
660  InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
661  if (AggC && InsertedC) {
662  SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC,
663  I.getIndices());
664  return true;
665  }
666 
667  // SROA can look through these but give them a cost.
668  return false;
669 }
670 
671 /// \brief Try to simplify a call site.
672 ///
673 /// Takes a concrete function and callsite and tries to actually simplify it by
674 /// analyzing the arguments and call itself with instsimplify. Returns true if
675 /// it has simplified the callsite to some other entity (a constant), making it
676 /// free.
677 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
678  // FIXME: Using the instsimplify logic directly for this is inefficient
679  // because we have to continually rebuild the argument list even when no
680  // simplifications can be performed. Until that is fixed with remapping
681  // inside of instsimplify, directly constant fold calls here.
682  if (!canConstantFoldCallTo(F))
683  return false;
684 
685  // Try to re-map the arguments to constants.
686  SmallVector<Constant *, 4> ConstantArgs;
687  ConstantArgs.reserve(CS.arg_size());
688  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
689  I != E; ++I) {
690  Constant *C = dyn_cast<Constant>(*I);
691  if (!C)
692  C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
693  if (!C)
694  return false; // This argument doesn't map to a constant.
695 
696  ConstantArgs.push_back(C);
697  }
698  if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
699  SimplifiedValues[CS.getInstruction()] = C;
700  return true;
701  }
702 
703  return false;
704 }
705 
706 bool CallAnalyzer::visitCallSite(CallSite CS) {
707  if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
708  !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
710  // This aborts the entire analysis.
711  ExposesReturnsTwice = true;
712  return false;
713  }
714  if (CS.isCall() &&
715  cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate))
716  ContainsNoDuplicateCall = true;
717 
718  if (Function *F = CS.getCalledFunction()) {
719  // When we have a concrete function, first try to simplify it directly.
720  if (simplifyCallSite(F, CS))
721  return true;
722 
723  // Next check if it is an intrinsic we know about.
724  // FIXME: Lift this into part of the InstVisitor.
725  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
726  switch (II->getIntrinsicID()) {
727  default:
728  return Base::visitCallSite(CS);
729 
730  case Intrinsic::memset:
731  case Intrinsic::memcpy:
732  case Intrinsic::memmove:
733  // SROA can usually chew through these intrinsics, but they aren't free.
734  return false;
735  }
736  }
737 
738  if (F == CS.getInstruction()->getParent()->getParent()) {
739  // This flag will fully abort the analysis, so don't bother with anything
740  // else.
741  IsRecursiveCall = true;
742  return false;
743  }
744 
745  if (TTI.isLoweredToCall(F)) {
746  // We account for the average 1 instruction per call argument setup
747  // here.
748  Cost += CS.arg_size() * InlineConstants::InstrCost;
749 
750  // Everything other than inline ASM will also have a significant cost
751  // merely from making the call.
752  if (!isa<InlineAsm>(CS.getCalledValue()))
754  }
755 
756  return Base::visitCallSite(CS);
757  }
758 
759  // Otherwise we're in a very special case -- an indirect function call. See
760  // if we can be particularly clever about this.
761  Value *Callee = CS.getCalledValue();
762 
763  // First, pay the price of the argument setup. We account for the average
764  // 1 instruction per call argument setup here.
765  Cost += CS.arg_size() * InlineConstants::InstrCost;
766 
767  // Next, check if this happens to be an indirect function call to a known
768  // function in this inline context. If not, we've done all we can.
769  Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
770  if (!F)
771  return Base::visitCallSite(CS);
772 
773  // If we have a constant that we are calling as a function, we can peer
774  // through it and see the function target. This happens not infrequently
775  // during devirtualization and so we want to give it a hefty bonus for
776  // inlining, but cap that bonus in the event that inlining wouldn't pan
777  // out. Pretend to inline the function, with a custom threshold.
778  CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold);
779  if (CA.analyzeCall(CS)) {
780  // We were able to inline the indirect call! Subtract the cost from the
781  // bonus we want to apply, but don't go below zero.
782  Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
783  }
784 
785  return Base::visitCallSite(CS);
786 }
787 
788 bool CallAnalyzer::visitInstruction(Instruction &I) {
789  // Some instructions are free. All of the free intrinsics can also be
790  // handled by SROA, etc.
791  if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
792  return true;
793 
794  // We found something we don't understand or can't handle. Mark any SROA-able
795  // values in the operand list as no longer viable.
796  for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
797  disableSROA(*OI);
798 
799  return false;
800 }
801 
802 
803 /// \brief Analyze a basic block for its contribution to the inline cost.
804 ///
805 /// This method walks the analyzer over every instruction in the given basic
806 /// block and accounts for their cost during inlining at this callsite. It
807 /// aborts early if the threshold has been exceeded or an impossible to inline
808 /// construct has been detected. It returns false if inlining is no longer
809 /// viable, and true if inlining remains viable.
810 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
811  for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
812  I != E; ++I) {
813  ++NumInstructions;
814  if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
815  ++NumVectorInstructions;
816 
817  // If the instruction simplified to a constant, there is no cost to this
818  // instruction. Visit the instructions using our InstVisitor to account for
819  // all of the per-instruction logic. The visit tree returns true if we
820  // consumed the instruction in any way, and false if the instruction's base
821  // cost should count against inlining.
822  if (Base::visit(I))
823  ++NumInstructionsSimplified;
824  else
826 
827  // If the visit this instruction detected an uninlinable pattern, abort.
828  if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
829  return false;
830 
831  // If the caller is a recursive function then we don't want to inline
832  // functions which allocate a lot of stack space because it would increase
833  // the caller stack usage dramatically.
834  if (IsCallerRecursive &&
836  return false;
837 
838  if (NumVectorInstructions > NumInstructions/2)
839  VectorBonus = FiftyPercentVectorBonus;
840  else if (NumVectorInstructions > NumInstructions/10)
841  VectorBonus = TenPercentVectorBonus;
842  else
843  VectorBonus = 0;
844 
845  // Check if we've past the threshold so we don't spin in huge basic
846  // blocks that will never inline.
847  if (Cost > (Threshold + VectorBonus))
848  return false;
849  }
850 
851  return true;
852 }
853 
854 /// \brief Compute the base pointer and cumulative constant offsets for V.
855 ///
856 /// This strips all constant offsets off of V, leaving it the base pointer, and
857 /// accumulates the total constant offset applied in the returned constant. It
858 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
859 /// no constant offsets applied.
860 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
861  if (!TD || !V->getType()->isPointerTy())
862  return 0;
863 
864  unsigned IntPtrWidth = TD->getPointerSizeInBits();
865  APInt Offset = APInt::getNullValue(IntPtrWidth);
866 
867  // Even though we don't look through PHI nodes, we could be called on an
868  // instruction in an unreachable block, which may be on a cycle.
869  SmallPtrSet<Value *, 4> Visited;
870  Visited.insert(V);
871  do {
872  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
873  if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
874  return 0;
875  V = GEP->getPointerOperand();
876  } else if (Operator::getOpcode(V) == Instruction::BitCast) {
877  V = cast<Operator>(V)->getOperand(0);
878  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
879  if (GA->mayBeOverridden())
880  break;
881  V = GA->getAliasee();
882  } else {
883  break;
884  }
885  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
886  } while (Visited.insert(V));
887 
888  Type *IntPtrTy = TD->getIntPtrType(V->getContext());
889  return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
890 }
891 
892 /// \brief Analyze a call site for potential inlining.
893 ///
894 /// Returns true if inlining this call is viable, and false if it is not
895 /// viable. It computes the cost and adjusts the threshold based on numerous
896 /// factors and heuristics. If this method returns false but the computed cost
897 /// is below the computed threshold, then inlining was forcibly disabled by
898 /// some artifact of the routine.
899 bool CallAnalyzer::analyzeCall(CallSite CS) {
900  ++NumCallsAnalyzed;
901 
902  // Track whether the post-inlining function would have more than one basic
903  // block. A single basic block is often intended for inlining. Balloon the
904  // threshold by 50% until we pass the single-BB phase.
905  bool SingleBB = true;
906  int SingleBBBonus = Threshold / 2;
907  Threshold += SingleBBBonus;
908 
909  // Perform some tweaks to the cost and threshold based on the direct
910  // callsite information.
911 
912  // We want to more aggressively inline vector-dense kernels, so up the
913  // threshold, and we'll lower it if the % of vector instructions gets too
914  // low.
915  assert(NumInstructions == 0);
916  assert(NumVectorInstructions == 0);
917  FiftyPercentVectorBonus = Threshold;
918  TenPercentVectorBonus = Threshold / 2;
919 
920  // Give out bonuses per argument, as the instructions setting them up will
921  // be gone after inlining.
922  for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
923  if (TD && CS.isByValArgument(I)) {
924  // We approximate the number of loads and stores needed by dividing the
925  // size of the byval type by the target's pointer size.
926  PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
927  unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
928  unsigned PointerSize = TD->getPointerSizeInBits();
929  // Ceiling division.
930  unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
931 
932  // If it generates more than 8 stores it is likely to be expanded as an
933  // inline memcpy so we take that as an upper bound. Otherwise we assume
934  // one load and one store per word copied.
935  // FIXME: The maxStoresPerMemcpy setting from the target should be used
936  // here instead of a magic number of 8, but it's not available via
937  // DataLayout.
938  NumStores = std::min(NumStores, 8U);
939 
940  Cost -= 2 * NumStores * InlineConstants::InstrCost;
941  } else {
942  // For non-byval arguments subtract off one instruction per call
943  // argument.
945  }
946  }
947 
948  // If there is only one call of the function, and it has internal linkage,
949  // the cost of inlining it drops dramatically.
950  bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
951  &F == CS.getCalledFunction();
952  if (OnlyOneCallAndLocalLinkage)
954 
955  // If the instruction after the call, or if the normal destination of the
956  // invoke is an unreachable instruction, the function is noreturn. As such,
957  // there is little point in inlining this unless there is literally zero
958  // cost.
959  Instruction *Instr = CS.getInstruction();
960  if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
961  if (isa<UnreachableInst>(II->getNormalDest()->begin()))
962  Threshold = 1;
963  } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
964  Threshold = 1;
965 
966  // If this function uses the coldcc calling convention, prefer not to inline
967  // it.
970 
971  // Check if we're done. This can happen due to bonuses and penalties.
972  if (Cost > Threshold)
973  return false;
974 
975  if (F.empty())
976  return true;
977 
978  Function *Caller = CS.getInstruction()->getParent()->getParent();
979  // Check if the caller function is recursive itself.
980  for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
981  U != E; ++U) {
982  CallSite Site(cast<Value>(*U));
983  if (!Site)
984  continue;
985  Instruction *I = Site.getInstruction();
986  if (I->getParent()->getParent() == Caller) {
987  IsCallerRecursive = true;
988  break;
989  }
990  }
991 
992  // Track whether we've seen a return instruction. The first return
993  // instruction is free, as at least one will usually disappear in inlining.
994  bool HasReturn = false;
995 
996  // Populate our simplified values by mapping from function arguments to call
997  // arguments with known important simplifications.
999  for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1000  FAI != FAE; ++FAI, ++CAI) {
1001  assert(CAI != CS.arg_end());
1002  if (Constant *C = dyn_cast<Constant>(CAI))
1003  SimplifiedValues[FAI] = C;
1004 
1005  Value *PtrArg = *CAI;
1006  if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1007  ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
1008 
1009  // We can SROA any pointer arguments derived from alloca instructions.
1010  if (isa<AllocaInst>(PtrArg)) {
1011  SROAArgValues[FAI] = PtrArg;
1012  SROAArgCosts[PtrArg] = 0;
1013  }
1014  }
1015  }
1016  NumConstantArgs = SimplifiedValues.size();
1017  NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1018  NumAllocaArgs = SROAArgValues.size();
1019 
1020  // The worklist of live basic blocks in the callee *after* inlining. We avoid
1021  // adding basic blocks of the callee which can be proven to be dead for this
1022  // particular call site in order to get more accurate cost estimates. This
1023  // requires a somewhat heavyweight iteration pattern: we need to walk the
1024  // basic blocks in a breadth-first order as we insert live successors. To
1025  // accomplish this, prioritizing for small iterations because we exit after
1026  // crossing our threshold, we use a small-size optimized SetVector.
1028  SmallPtrSet<BasicBlock *, 16> > BBSetVector;
1029  BBSetVector BBWorklist;
1030  BBWorklist.insert(&F.getEntryBlock());
1031  // Note that we *must not* cache the size, this loop grows the worklist.
1032  for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1033  // Bail out the moment we cross the threshold. This means we'll under-count
1034  // the cost, but only when undercounting doesn't matter.
1035  if (Cost > (Threshold + VectorBonus))
1036  break;
1037 
1038  BasicBlock *BB = BBWorklist[Idx];
1039  if (BB->empty())
1040  continue;
1041 
1042  // Handle the terminator cost here where we can track returns and other
1043  // function-wide constructs.
1044  TerminatorInst *TI = BB->getTerminator();
1045 
1046  // We never want to inline functions that contain an indirectbr. This is
1047  // incorrect because all the blockaddress's (in static global initializers
1048  // for example) would be referring to the original function, and this
1049  // indirect jump would jump from the inlined copy of the function into the
1050  // original function which is extremely undefined behavior.
1051  // FIXME: This logic isn't really right; we can safely inline functions
1052  // with indirectbr's as long as no other function or global references the
1053  // blockaddress of a block within the current function. And as a QOI issue,
1054  // if someone is using a blockaddress without an indirectbr, and that
1055  // reference somehow ends up in another function or global, we probably
1056  // don't want to inline this function.
1057  if (isa<IndirectBrInst>(TI))
1058  return false;
1059 
1060  if (!HasReturn && isa<ReturnInst>(TI))
1061  HasReturn = true;
1062  else
1064 
1065  // Analyze the cost of this block. If we blow through the threshold, this
1066  // returns false, and we can bail on out.
1067  if (!analyzeBlock(BB)) {
1068  if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
1069  return false;
1070 
1071  // If the caller is a recursive function then we don't want to inline
1072  // functions which allocate a lot of stack space because it would increase
1073  // the caller stack usage dramatically.
1074  if (IsCallerRecursive &&
1076  return false;
1077 
1078  break;
1079  }
1080 
1081  // Add in the live successors by first checking whether we have terminator
1082  // that may be simplified based on the values simplified by this call.
1083  if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1084  if (BI->isConditional()) {
1085  Value *Cond = BI->getCondition();
1086  if (ConstantInt *SimpleCond
1087  = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1088  BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1089  continue;
1090  }
1091  }
1092  } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1093  Value *Cond = SI->getCondition();
1094  if (ConstantInt *SimpleCond
1095  = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1096  BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1097  continue;
1098  }
1099  }
1100 
1101  // If we're unable to select a particular successor, just count all of
1102  // them.
1103  for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1104  ++TIdx)
1105  BBWorklist.insert(TI->getSuccessor(TIdx));
1106 
1107  // If we had any successors at this point, than post-inlining is likely to
1108  // have them as well. Note that we assume any basic blocks which existed
1109  // due to branches or switches which folded above will also fold after
1110  // inlining.
1111  if (SingleBB && TI->getNumSuccessors() > 1) {
1112  // Take off the bonus we applied to the threshold.
1113  Threshold -= SingleBBBonus;
1114  SingleBB = false;
1115  }
1116  }
1117 
1118  // If this is a noduplicate call, we can still inline as long as
1119  // inlining this would cause the removal of the caller (so the instruction
1120  // is not actually duplicated, just moved).
1121  if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1122  return false;
1123 
1124  Threshold += VectorBonus;
1125 
1126  return Cost < Threshold;
1127 }
1128 
1129 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1130 /// \brief Dump stats about this call's analysis.
1131 void CallAnalyzer::dump() {
1132 #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
1133  DEBUG_PRINT_STAT(NumConstantArgs);
1134  DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1135  DEBUG_PRINT_STAT(NumAllocaArgs);
1136  DEBUG_PRINT_STAT(NumConstantPtrCmps);
1137  DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1138  DEBUG_PRINT_STAT(NumInstructionsSimplified);
1139  DEBUG_PRINT_STAT(SROACostSavings);
1140  DEBUG_PRINT_STAT(SROACostSavingsLost);
1141  DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1142 #undef DEBUG_PRINT_STAT
1143 }
1144 #endif
1145 
1146 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1147  true, true)
1150  true, true)
1151 
1152 char InlineCostAnalysis::ID = 0;
1153 
1154 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
1155 
1157 
1159  AU.setPreservesAll();
1162 }
1163 
1165  TD = getAnalysisIfAvailable<DataLayout>();
1166  TTI = &getAnalysis<TargetTransformInfo>();
1167  return false;
1168 }
1169 
1171  return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1172 }
1173 
1174 /// \brief Test that two functions either have or have not the given attribute
1175 /// at the same time.
1176 static bool attributeMatches(Function *F1, Function *F2,
1177  Attribute::AttrKind Attr) {
1178  return F1->hasFnAttribute(Attr) == F2->hasFnAttribute(Attr);
1179 }
1180 
1181 /// \brief Test that there are no attribute conflicts between Caller and Callee
1182 /// that prevent inlining.
1184  Function *Callee) {
1185  return attributeMatches(Caller, Callee, Attribute::SanitizeAddress) &&
1186  attributeMatches(Caller, Callee, Attribute::SanitizeMemory) &&
1188 }
1189 
1191  int Threshold) {
1192  // Cannot inline indirect calls.
1193  if (!Callee)
1194  return llvm::InlineCost::getNever();
1195 
1196  // Calls to functions with always-inline attributes should be inlined
1197  // whenever possible.
1198  if (Callee->hasFnAttribute(Attribute::AlwaysInline)) {
1199  if (isInlineViable(*Callee))
1200  return llvm::InlineCost::getAlways();
1201  return llvm::InlineCost::getNever();
1202  }
1203 
1204  // Never inline functions with conflicting attributes (unless callee has
1205  // always-inline attribute).
1206  if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
1207  return llvm::InlineCost::getNever();
1208 
1209  // Don't inline this call if the caller has the optnone attribute.
1211  return llvm::InlineCost::getNever();
1212 
1213  // Don't inline functions which can be redefined at link-time to mean
1214  // something else. Don't inline functions marked noinline or call sites
1215  // marked noinline.
1216  if (Callee->mayBeOverridden() ||
1218  return llvm::InlineCost::getNever();
1219 
1220  DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1221  << "...\n");
1222 
1223  CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
1224  bool ShouldInline = CA.analyzeCall(CS);
1225 
1226  DEBUG(CA.dump());
1227 
1228  // Check if there was a reason to force inlining or no inlining.
1229  if (!ShouldInline && CA.getCost() < CA.getThreshold())
1230  return InlineCost::getNever();
1231  if (ShouldInline && CA.getCost() >= CA.getThreshold())
1232  return InlineCost::getAlways();
1233 
1234  return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1235 }
1236 
1238  bool ReturnsTwice =
1239  F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1241  for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1242  // Disallow inlining of functions which contain an indirect branch.
1243  if (isa<IndirectBrInst>(BI->getTerminator()))
1244  return false;
1245 
1246  for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
1247  ++II) {
1248  CallSite CS(II);
1249  if (!CS)
1250  continue;
1251 
1252  // Disallow recursive calls.
1253  if (&F == CS.getCalledFunction())
1254  return false;
1255 
1256  // Disallow calls which expose returns-twice to a function not previously
1257  // attributed as such.
1258  if (!ReturnsTwice && CS.isCall() &&
1259  cast<CallInst>(CS.getInstruction())->canReturnTwice())
1260  return false;
1261  }
1262  }
1263 
1264  return true;
1265 }
use_iterator use_end()
Definition: Value.h:152
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:445
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:603
Abstract base class of comparison instructions.
Definition: InstrTypes.h:633
void reserve(unsigned N)
Definition: SmallVector.h:425
Base class for instruction visitors.
Definition: InstVisitor.h:81
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
unsigned getScalarSizeInBits()
Definition: Type.cpp:135
bool canConstantFoldCallTo(const Function *F)
The main container class for the LLVM Intermediate Representation.
Definition: Module.h:112
IterTy arg_end() const
Definition: CallSite.h:143
iterator end()
Definition: Function.h:397
unsigned arg_size() const
Definition: CallSite.h:145
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
static bool functionsHaveCompatibleAttributes(Function *Caller, Function *Callee)
Test that there are no attribute conflicts between Caller and Callee that prevent inlining...
bool isSimple() const
Definition: Instructions.h:338
const int ColdccPenalty
Definition: InlineCost.h:33
gep_type_iterator gep_type_end(const User *GEP)
bool insert(PtrType Ptr)
Definition: SmallPtrSet.h:253
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:116
arg_iterator arg_end()
Definition: Function.h:418
F(f)
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2040
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Definition: Attributes.cpp:818
bool isSimple() const
Definition: Instructions.h:218
op_iterator op_begin()
Definition: User.h:116
Represents the cost of inlining a function.
Definition: InlineCost.h:50
bool isEquality() const
Determine if this is an equals/not equals predicate.
virtual void getAnalysisUsage(AnalysisUsage &Info) const
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:111
CallingConv::ID getCallingConv() const
Definition: Function.h:161
StringRef getName() const
Definition: Value.cpp:167
iterator begin()
Definition: BasicBlock.h:193
ArrayRef< unsigned > getIndices() const
AnalysisUsage & addRequired()
FunTy * getCaller() const
Definition: CallSite.h:153
Base class of casting instructions.
Definition: InstrTypes.h:387
const APInt & getValue() const
Return the constant's value.
Definition: Constants.h:105
Definition: Use.h:60
static bool attributeMatches(Function *F1, Function *F2, Attribute::AttrKind Attr)
Test that two functions either have or have not the given attribute at the same time.
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:172
static Constant * getExtractValue(Constant *Agg, ArrayRef< unsigned > Idxs)
Definition: Constants.cpp:1989
Type * getAllocatedType() const
#define DEBUG_PRINT_STAT(x)
void getAnalysisUsage(AnalysisUsage &AU) const
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
#define false
Definition: ConvertUTF.c:64
This class represents a cast from a pointer to an integer.
uint64_t getZExtValue() const
Return the zero extended value.
Definition: Constants.h:116
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS)
Definition: Constants.cpp:1870
bool empty() const
Definition: BasicBlock.h:204
static Constant * getPtrToInt(Constant *C, Type *Ty)
Definition: Constants.cpp:1637
This class represents a no-op cast from one type to another.
op_iterator idx_begin()
Definition: Instructions.h:785
Value * getInsertedValueOperand()
const int LastCallToStaticBonus
Definition: InlineCost.h:32
ValTy * getCalledValue() const
Definition: CallSite.h:85
bool runOnSCC(CallGraphSCC &SCC)
static Constant * getIntToPtr(Constant *C, Type *Ty)
Definition: Constants.cpp:1649
iterator begin()
Definition: Function.h:395
const int CallPenalty
Definition: InlineCost.h:31
Type * getElementType() const
Definition: DerivedTypes.h:319
bool isInBounds() const
isInBounds - Determine whether the GEP has the inbounds flag.
Constant * ConstantFoldCall(Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=0)
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:442
unsigned getNumSuccessors() const
Definition: InstrTypes.h:59
always inline
bool isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
inline Inline Cost true
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
const int IndirectCallThreshold
Definition: InlineCost.h:30
LLVM Basic Block Representation.
Definition: BasicBlock.h:72
InstrTy * getInstruction() const
Definition: CallSite.h:79
BasicBlock * getSuccessor(unsigned idx) const
Definition: InstrTypes.h:65
static bool mayBeOverridden(LinkageTypes Linkage)
Definition: GlobalValue.h:171
bool isVectorTy() const
Definition: Type.h:229
LLVM Constant Representation.
Definition: Constant.h:41
Cost analyzer used by inliner.
Definition: InlineCost.h:101
Expected to fold away in lowering.
op_iterator op_end()
Definition: User.h:118
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1252
static InlineCost getNever()
Definition: InlineCost.h:74
Value * getPointerOperand()
Definition: Operator.h:382
Value * getOperand(unsigned i) const
Definition: User.h:88
arg_iterator arg_begin()
Definition: Function.h:410
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:714
inline Inline Cost Analysis
This class represents a cast from an integer to a pointer.
#define INITIALIZE_AG_DEPENDENCY(depName)
Definition: PassSupport.h:169
bool isPointerTy() const
Definition: Type.h:220
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:517
Call cannot be duplicated.
Definition: Attributes.h:83
Value * SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
STATISTIC(NumCallsAnalyzed,"Number of call sites analyzed")
BinaryOps getOpcode() const
Definition: InstrTypes.h:326
static Constant * getBitCast(Constant *C, Type *Ty)
Definition: Constants.cpp:1661
Class for constant integers.
Definition: Constants.h:51
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:256
iterator end()
Definition: BasicBlock.h:195
bool isStaticAlloca() const
Type * getType() const
Definition: Value.h:111
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
Definition: Constants.cpp:492
bool isZero() const
Definition: Constants.h:160
const BasicBlock & getEntryBlock() const
Definition: Function.h:380
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:438
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
Definition: Debug.cpp:101
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:170
INITIALIZE_PASS_BEGIN(InlineCostAnalysis,"inline-cost","Inline Cost Analysis", true, true) INITIALIZE_PASS_END(InlineCostAnalysis
Class for arbitrary precision integers.
Definition: APInt.h:75
Function must not be optimized.
Definition: Attributes.h:92
bool empty() const
Definition: Function.h:401
unsigned getOpcode() const
Definition: Operator.h:51
ThreadSanitizer is on.
Definition: Attributes.h:107
use_iterator use_begin()
Definition: Value.h:150
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:200
bool isCall() const
Definition: CallSite.h:73
bool isInBounds() const
Definition: Operator.h:373
AddressSanitizer is on.
Definition: Attributes.h:106
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
bool hasOneUse() const
Definition: Value.h:161
bool isNoInline() const
Return true if the call should not be inlined.
Definition: CallSite.h:208
unsigned getPrimitiveSizeInBits() const
Definition: Type.cpp:117
static Constant * getInsertValue(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
Definition: Constants.cpp:1969
InlineCost getInlineCost(CallSite CS, int Threshold)
Get an InlineCost object representing the cost of inlining this callsite.
IterTy arg_begin() const
Definition: CallSite.h:137
bool hasLocalLinkage() const
Definition: GlobalValue.h:211
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:709
static int const Threshold
const unsigned TotalAllocaSizeRecursiveCaller
Definition: InlineCost.h:37
Function can return twice.
Definition: Attributes.h:96
LLVM Value Representation.
Definition: Value.h:66
APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:83
A vector that has set insertion semantics.
Definition: SetVector.h:37
Constant * ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, ArrayRef< Constant * > Ops, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0)
ItTy prior(ItTy it, Dist n)
Definition: STLExtras.h:167
#define DEBUG(X)
Definition: Debug.h:97
inline cost
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1798
static APInt getNullValue(unsigned numBits)
Get the '0' value.
Definition: APInt.h:457
iterator find(const KeyT &Val)
Definition: DenseMap.h:108
static Constant * getCast(unsigned ops, Constant *C, Type *Ty)
Definition: Constants.cpp:1444
static InlineCost getAlways()
Definition: InlineCost.h:71
tier< T1, T2 > tie(T1 &f, T2 &s)
Definition: STLExtras.h:216
MemorySanitizer is on.
Definition: Attributes.h:108
const BasicBlock * getParent() const
Definition: Instruction.h:52
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
static InlineCost get(int Cost, int Threshold)
Definition: InlineCost.h:66
FunTy * getCalledFunction() const
Definition: CallSite.h:93
gep_type_iterator gep_type_begin(const User *GEP)