LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ScalarEvolution.cpp
Go to the documentation of this file.
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
13 //
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
19 //
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
25 //
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
30 //
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
34 //
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
37 //
38 //===----------------------------------------------------------------------===//
39 //
40 // There are several good references for the techniques used in this analysis.
41 //
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 //
46 // On computational properties of chains of recurrences
47 // Eugene V. Zima
48 //
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
51 //
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
54 //
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
58 //
59 //===----------------------------------------------------------------------===//
60 
61 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/ADT/STLExtras.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
69 #include "llvm/Analysis/LoopInfo.h"
72 #include "llvm/Assembly/Writer.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DerivedTypes.h"
76 #include "llvm/IR/GlobalAlias.h"
77 #include "llvm/IR/GlobalVariable.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/LLVMContext.h"
80 #include "llvm/IR/Operator.h"
83 #include "llvm/Support/Debug.h"
90 #include <algorithm>
91 using namespace llvm;
92 
93 STATISTIC(NumArrayLenItCounts,
94  "Number of trip counts computed with array length");
95 STATISTIC(NumTripCountsComputed,
96  "Number of loops with predictable loop counts");
97 STATISTIC(NumTripCountsNotComputed,
98  "Number of loops without predictable loop counts");
99 STATISTIC(NumBruteForceTripCountsComputed,
100  "Number of loops with trip counts computed by force");
101 
102 static cl::opt<unsigned>
103 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
104  cl::desc("Maximum number of iterations SCEV will "
105  "symbolically execute a constant "
106  "derived loop"),
107  cl::init(100));
108 
109 // FIXME: Enable this with XDEBUG when the test suite is clean.
110 static cl::opt<bool>
111 VerifySCEV("verify-scev",
112  cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
113 
114 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
115  "Scalar Evolution Analysis", false, true)
120  "Scalar Evolution Analysis", false, true)
121 char ScalarEvolution::ID = 0;
122 
123 //===----------------------------------------------------------------------===//
124 // SCEV class definitions
125 //===----------------------------------------------------------------------===//
126 
127 //===----------------------------------------------------------------------===//
128 // Implementation of the SCEV class.
129 //
130 
131 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
132 void SCEV::dump() const {
133  print(dbgs());
134  dbgs() << '\n';
135 }
136 #endif
137 
138 void SCEV::print(raw_ostream &OS) const {
139  switch (getSCEVType()) {
140  case scConstant:
141  WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
142  return;
143  case scTruncate: {
144  const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
145  const SCEV *Op = Trunc->getOperand();
146  OS << "(trunc " << *Op->getType() << " " << *Op << " to "
147  << *Trunc->getType() << ")";
148  return;
149  }
150  case scZeroExtend: {
151  const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
152  const SCEV *Op = ZExt->getOperand();
153  OS << "(zext " << *Op->getType() << " " << *Op << " to "
154  << *ZExt->getType() << ")";
155  return;
156  }
157  case scSignExtend: {
158  const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
159  const SCEV *Op = SExt->getOperand();
160  OS << "(sext " << *Op->getType() << " " << *Op << " to "
161  << *SExt->getType() << ")";
162  return;
163  }
164  case scAddRecExpr: {
165  const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
166  OS << "{" << *AR->getOperand(0);
167  for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
168  OS << ",+," << *AR->getOperand(i);
169  OS << "}<";
170  if (AR->getNoWrapFlags(FlagNUW))
171  OS << "nuw><";
172  if (AR->getNoWrapFlags(FlagNSW))
173  OS << "nsw><";
174  if (AR->getNoWrapFlags(FlagNW) &&
176  OS << "nw><";
177  WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
178  OS << ">";
179  return;
180  }
181  case scAddExpr:
182  case scMulExpr:
183  case scUMaxExpr:
184  case scSMaxExpr: {
185  const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
186  const char *OpStr = 0;
187  switch (NAry->getSCEVType()) {
188  case scAddExpr: OpStr = " + "; break;
189  case scMulExpr: OpStr = " * "; break;
190  case scUMaxExpr: OpStr = " umax "; break;
191  case scSMaxExpr: OpStr = " smax "; break;
192  }
193  OS << "(";
194  for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
195  I != E; ++I) {
196  OS << **I;
197  if (llvm::next(I) != E)
198  OS << OpStr;
199  }
200  OS << ")";
201  switch (NAry->getSCEVType()) {
202  case scAddExpr:
203  case scMulExpr:
204  if (NAry->getNoWrapFlags(FlagNUW))
205  OS << "<nuw>";
206  if (NAry->getNoWrapFlags(FlagNSW))
207  OS << "<nsw>";
208  }
209  return;
210  }
211  case scUDivExpr: {
212  const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
213  OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
214  return;
215  }
216  case scUnknown: {
217  const SCEVUnknown *U = cast<SCEVUnknown>(this);
218  Type *AllocTy;
219  if (U->isSizeOf(AllocTy)) {
220  OS << "sizeof(" << *AllocTy << ")";
221  return;
222  }
223  if (U->isAlignOf(AllocTy)) {
224  OS << "alignof(" << *AllocTy << ")";
225  return;
226  }
227 
228  Type *CTy;
229  Constant *FieldNo;
230  if (U->isOffsetOf(CTy, FieldNo)) {
231  OS << "offsetof(" << *CTy << ", ";
232  WriteAsOperand(OS, FieldNo, false);
233  OS << ")";
234  return;
235  }
236 
237  // Otherwise just print it normally.
238  WriteAsOperand(OS, U->getValue(), false);
239  return;
240  }
241  case scCouldNotCompute:
242  OS << "***COULDNOTCOMPUTE***";
243  return;
244  default: break;
245  }
246  llvm_unreachable("Unknown SCEV kind!");
247 }
248 
249 Type *SCEV::getType() const {
250  switch (getSCEVType()) {
251  case scConstant:
252  return cast<SCEVConstant>(this)->getType();
253  case scTruncate:
254  case scZeroExtend:
255  case scSignExtend:
256  return cast<SCEVCastExpr>(this)->getType();
257  case scAddRecExpr:
258  case scMulExpr:
259  case scUMaxExpr:
260  case scSMaxExpr:
261  return cast<SCEVNAryExpr>(this)->getType();
262  case scAddExpr:
263  return cast<SCEVAddExpr>(this)->getType();
264  case scUDivExpr:
265  return cast<SCEVUDivExpr>(this)->getType();
266  case scUnknown:
267  return cast<SCEVUnknown>(this)->getType();
268  case scCouldNotCompute:
269  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
270  default:
271  llvm_unreachable("Unknown SCEV kind!");
272  }
273 }
274 
275 bool SCEV::isZero() const {
276  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
277  return SC->getValue()->isZero();
278  return false;
279 }
280 
281 bool SCEV::isOne() const {
282  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
283  return SC->getValue()->isOne();
284  return false;
285 }
286 
287 bool SCEV::isAllOnesValue() const {
288  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
289  return SC->getValue()->isAllOnesValue();
290  return false;
291 }
292 
293 /// isNonConstantNegative - Return true if the specified scev is negated, but
294 /// not a constant.
296  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
297  if (!Mul) return false;
298 
299  // If there is a constant factor, it will be first.
300  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
301  if (!SC) return false;
302 
303  // Return true if the value is negative, this matches things like (-42 * V).
304  return SC->getValue()->getValue().isNegative();
305 }
306 
309 
311  return S->getSCEVType() == scCouldNotCompute;
312 }
313 
317  ID.AddPointer(V);
318  void *IP = 0;
319  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
320  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
321  UniqueSCEVs.InsertNode(S, IP);
322  return S;
323 }
324 
326  return getConstant(ConstantInt::get(getContext(), Val));
327 }
328 
329 const SCEV *
330 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
331  IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
332  return getConstant(ConstantInt::get(ITy, V, isSigned));
333 }
334 
336  unsigned SCEVTy, const SCEV *op, Type *ty)
337  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
338 
339 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
340  const SCEV *op, Type *ty)
341  : SCEVCastExpr(ID, scTruncate, op, ty) {
342  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
343  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
344  "Cannot truncate non-integer value!");
345 }
346 
347 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
348  const SCEV *op, Type *ty)
349  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
350  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
351  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
352  "Cannot zero extend non-integer value!");
353 }
354 
355 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
356  const SCEV *op, Type *ty)
357  : SCEVCastExpr(ID, scSignExtend, op, ty) {
358  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
359  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
360  "Cannot sign extend non-integer value!");
361 }
362 
363 void SCEVUnknown::deleted() {
364  // Clear this SCEVUnknown from various maps.
365  SE->forgetMemoizedResults(this);
366 
367  // Remove this SCEVUnknown from the uniquing map.
368  SE->UniqueSCEVs.RemoveNode(this);
369 
370  // Release the value.
371  setValPtr(0);
372 }
373 
374 void SCEVUnknown::allUsesReplacedWith(Value *New) {
375  // Clear this SCEVUnknown from various maps.
376  SE->forgetMemoizedResults(this);
377 
378  // Remove this SCEVUnknown from the uniquing map.
379  SE->UniqueSCEVs.RemoveNode(this);
380 
381  // Update this SCEVUnknown to point to the new value. This is needed
382  // because there may still be outstanding SCEVs which still point to
383  // this SCEVUnknown.
384  setValPtr(New);
385 }
386 
387 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
388  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
389  if (VCE->getOpcode() == Instruction::PtrToInt)
390  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
391  if (CE->getOpcode() == Instruction::GetElementPtr &&
392  CE->getOperand(0)->isNullValue() &&
393  CE->getNumOperands() == 2)
394  if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
395  if (CI->isOne()) {
396  AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
397  ->getElementType();
398  return true;
399  }
400 
401  return false;
402 }
403 
404 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
405  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
406  if (VCE->getOpcode() == Instruction::PtrToInt)
407  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
408  if (CE->getOpcode() == Instruction::GetElementPtr &&
409  CE->getOperand(0)->isNullValue()) {
410  Type *Ty =
411  cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
412  if (StructType *STy = dyn_cast<StructType>(Ty))
413  if (!STy->isPacked() &&
414  CE->getNumOperands() == 3 &&
415  CE->getOperand(1)->isNullValue()) {
416  if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
417  if (CI->isOne() &&
418  STy->getNumElements() == 2 &&
419  STy->getElementType(0)->isIntegerTy(1)) {
420  AllocTy = STy->getElementType(1);
421  return true;
422  }
423  }
424  }
425 
426  return false;
427 }
428 
429 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
430  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
431  if (VCE->getOpcode() == Instruction::PtrToInt)
432  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
433  if (CE->getOpcode() == Instruction::GetElementPtr &&
434  CE->getNumOperands() == 3 &&
435  CE->getOperand(0)->isNullValue() &&
436  CE->getOperand(1)->isNullValue()) {
437  Type *Ty =
438  cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
439  // Ignore vector types here so that ScalarEvolutionExpander doesn't
440  // emit getelementptrs that index into vectors.
441  if (Ty->isStructTy() || Ty->isArrayTy()) {
442  CTy = Ty;
443  FieldNo = CE->getOperand(2);
444  return true;
445  }
446  }
447 
448  return false;
449 }
450 
451 //===----------------------------------------------------------------------===//
452 // SCEV Utilities
453 //===----------------------------------------------------------------------===//
454 
455 namespace {
456  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
457  /// than the complexity of the RHS. This comparator is used to canonicalize
458  /// expressions.
459  class SCEVComplexityCompare {
460  const LoopInfo *const LI;
461  public:
462  explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
463 
464  // Return true or false if LHS is less than, or at least RHS, respectively.
465  bool operator()(const SCEV *LHS, const SCEV *RHS) const {
466  return compare(LHS, RHS) < 0;
467  }
468 
469  // Return negative, zero, or positive, if LHS is less than, equal to, or
470  // greater than RHS, respectively. A three-way result allows recursive
471  // comparisons to be more efficient.
472  int compare(const SCEV *LHS, const SCEV *RHS) const {
473  // Fast-path: SCEVs are uniqued so we can do a quick equality check.
474  if (LHS == RHS)
475  return 0;
476 
477  // Primarily, sort the SCEVs by their getSCEVType().
478  unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
479  if (LType != RType)
480  return (int)LType - (int)RType;
481 
482  // Aside from the getSCEVType() ordering, the particular ordering
483  // isn't very important except that it's beneficial to be consistent,
484  // so that (a + b) and (b + a) don't end up as different expressions.
485  switch (LType) {
486  case scUnknown: {
487  const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
488  const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
489 
490  // Sort SCEVUnknown values with some loose heuristics. TODO: This is
491  // not as complete as it could be.
492  const Value *LV = LU->getValue(), *RV = RU->getValue();
493 
494  // Order pointer values after integer values. This helps SCEVExpander
495  // form GEPs.
496  bool LIsPointer = LV->getType()->isPointerTy(),
497  RIsPointer = RV->getType()->isPointerTy();
498  if (LIsPointer != RIsPointer)
499  return (int)LIsPointer - (int)RIsPointer;
500 
501  // Compare getValueID values.
502  unsigned LID = LV->getValueID(),
503  RID = RV->getValueID();
504  if (LID != RID)
505  return (int)LID - (int)RID;
506 
507  // Sort arguments by their position.
508  if (const Argument *LA = dyn_cast<Argument>(LV)) {
509  const Argument *RA = cast<Argument>(RV);
510  unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
511  return (int)LArgNo - (int)RArgNo;
512  }
513 
514  // For instructions, compare their loop depth, and their operand
515  // count. This is pretty loose.
516  if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
517  const Instruction *RInst = cast<Instruction>(RV);
518 
519  // Compare loop depths.
520  const BasicBlock *LParent = LInst->getParent(),
521  *RParent = RInst->getParent();
522  if (LParent != RParent) {
523  unsigned LDepth = LI->getLoopDepth(LParent),
524  RDepth = LI->getLoopDepth(RParent);
525  if (LDepth != RDepth)
526  return (int)LDepth - (int)RDepth;
527  }
528 
529  // Compare the number of operands.
530  unsigned LNumOps = LInst->getNumOperands(),
531  RNumOps = RInst->getNumOperands();
532  return (int)LNumOps - (int)RNumOps;
533  }
534 
535  return 0;
536  }
537 
538  case scConstant: {
539  const SCEVConstant *LC = cast<SCEVConstant>(LHS);
540  const SCEVConstant *RC = cast<SCEVConstant>(RHS);
541 
542  // Compare constant values.
543  const APInt &LA = LC->getValue()->getValue();
544  const APInt &RA = RC->getValue()->getValue();
545  unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
546  if (LBitWidth != RBitWidth)
547  return (int)LBitWidth - (int)RBitWidth;
548  return LA.ult(RA) ? -1 : 1;
549  }
550 
551  case scAddRecExpr: {
552  const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
553  const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
554 
555  // Compare addrec loop depths.
556  const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
557  if (LLoop != RLoop) {
558  unsigned LDepth = LLoop->getLoopDepth(),
559  RDepth = RLoop->getLoopDepth();
560  if (LDepth != RDepth)
561  return (int)LDepth - (int)RDepth;
562  }
563 
564  // Addrec complexity grows with operand count.
565  unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
566  if (LNumOps != RNumOps)
567  return (int)LNumOps - (int)RNumOps;
568 
569  // Lexicographically compare.
570  for (unsigned i = 0; i != LNumOps; ++i) {
571  long X = compare(LA->getOperand(i), RA->getOperand(i));
572  if (X != 0)
573  return X;
574  }
575 
576  return 0;
577  }
578 
579  case scAddExpr:
580  case scMulExpr:
581  case scSMaxExpr:
582  case scUMaxExpr: {
583  const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
584  const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
585 
586  // Lexicographically compare n-ary expressions.
587  unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
588  if (LNumOps != RNumOps)
589  return (int)LNumOps - (int)RNumOps;
590 
591  for (unsigned i = 0; i != LNumOps; ++i) {
592  if (i >= RNumOps)
593  return 1;
594  long X = compare(LC->getOperand(i), RC->getOperand(i));
595  if (X != 0)
596  return X;
597  }
598  return (int)LNumOps - (int)RNumOps;
599  }
600 
601  case scUDivExpr: {
602  const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
603  const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
604 
605  // Lexicographically compare udiv expressions.
606  long X = compare(LC->getLHS(), RC->getLHS());
607  if (X != 0)
608  return X;
609  return compare(LC->getRHS(), RC->getRHS());
610  }
611 
612  case scTruncate:
613  case scZeroExtend:
614  case scSignExtend: {
615  const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
616  const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
617 
618  // Compare cast expressions by operand.
619  return compare(LC->getOperand(), RC->getOperand());
620  }
621 
622  default:
623  llvm_unreachable("Unknown SCEV kind!");
624  }
625  }
626  };
627 }
628 
629 /// GroupByComplexity - Given a list of SCEV objects, order them by their
630 /// complexity, and group objects of the same complexity together by value.
631 /// When this routine is finished, we know that any duplicates in the vector are
632 /// consecutive and that complexity is monotonically increasing.
633 ///
634 /// Note that we go take special precautions to ensure that we get deterministic
635 /// results from this routine. In other words, we don't want the results of
636 /// this to depend on where the addresses of various SCEV objects happened to
637 /// land in memory.
638 ///
640  LoopInfo *LI) {
641  if (Ops.size() < 2) return; // Noop
642  if (Ops.size() == 2) {
643  // This is the common case, which also happens to be trivially simple.
644  // Special case it.
645  const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
646  if (SCEVComplexityCompare(LI)(RHS, LHS))
647  std::swap(LHS, RHS);
648  return;
649  }
650 
651  // Do the rough sort by complexity.
652  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
653 
654  // Now that we are sorted by complexity, group elements of the same
655  // complexity. Note that this is, at worst, N^2, but the vector is likely to
656  // be extremely short in practice. Note that we take this approach because we
657  // do not want to depend on the addresses of the objects we are grouping.
658  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
659  const SCEV *S = Ops[i];
660  unsigned Complexity = S->getSCEVType();
661 
662  // If there are any objects of the same complexity and same value as this
663  // one, group them.
664  for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
665  if (Ops[j] == S) { // Found a duplicate.
666  // Move it to immediately after i'th element.
667  std::swap(Ops[i+1], Ops[j]);
668  ++i; // no need to rescan it.
669  if (i == e-2) return; // Done!
670  }
671  }
672  }
673 }
674 
675 
676 
677 //===----------------------------------------------------------------------===//
678 // Simple SCEV method implementations
679 //===----------------------------------------------------------------------===//
680 
681 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
682 /// Assume, K > 0.
683 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
684  ScalarEvolution &SE,
685  Type *ResultTy) {
686  // Handle the simplest case efficiently.
687  if (K == 1)
688  return SE.getTruncateOrZeroExtend(It, ResultTy);
689 
690  // We are using the following formula for BC(It, K):
691  //
692  // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
693  //
694  // Suppose, W is the bitwidth of the return value. We must be prepared for
695  // overflow. Hence, we must assure that the result of our computation is
696  // equal to the accurate one modulo 2^W. Unfortunately, division isn't
697  // safe in modular arithmetic.
698  //
699  // However, this code doesn't use exactly that formula; the formula it uses
700  // is something like the following, where T is the number of factors of 2 in
701  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
702  // exponentiation:
703  //
704  // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
705  //
706  // This formula is trivially equivalent to the previous formula. However,
707  // this formula can be implemented much more efficiently. The trick is that
708  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
709  // arithmetic. To do exact division in modular arithmetic, all we have
710  // to do is multiply by the inverse. Therefore, this step can be done at
711  // width W.
712  //
713  // The next issue is how to safely do the division by 2^T. The way this
714  // is done is by doing the multiplication step at a width of at least W + T
715  // bits. This way, the bottom W+T bits of the product are accurate. Then,
716  // when we perform the division by 2^T (which is equivalent to a right shift
717  // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
718  // truncated out after the division by 2^T.
719  //
720  // In comparison to just directly using the first formula, this technique
721  // is much more efficient; using the first formula requires W * K bits,
722  // but this formula less than W + K bits. Also, the first formula requires
723  // a division step, whereas this formula only requires multiplies and shifts.
724  //
725  // It doesn't matter whether the subtraction step is done in the calculation
726  // width or the input iteration count's width; if the subtraction overflows,
727  // the result must be zero anyway. We prefer here to do it in the width of
728  // the induction variable because it helps a lot for certain cases; CodeGen
729  // isn't smart enough to ignore the overflow, which leads to much less
730  // efficient code if the width of the subtraction is wider than the native
731  // register width.
732  //
733  // (It's possible to not widen at all by pulling out factors of 2 before
734  // the multiplication; for example, K=2 can be calculated as
735  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
736  // extra arithmetic, so it's not an obvious win, and it gets
737  // much more complicated for K > 3.)
738 
739  // Protection from insane SCEVs; this bound is conservative,
740  // but it probably doesn't matter.
741  if (K > 1000)
742  return SE.getCouldNotCompute();
743 
744  unsigned W = SE.getTypeSizeInBits(ResultTy);
745 
746  // Calculate K! / 2^T and T; we divide out the factors of two before
747  // multiplying for calculating K! / 2^T to avoid overflow.
748  // Other overflow doesn't matter because we only care about the bottom
749  // W bits of the result.
750  APInt OddFactorial(W, 1);
751  unsigned T = 1;
752  for (unsigned i = 3; i <= K; ++i) {
753  APInt Mult(W, i);
754  unsigned TwoFactors = Mult.countTrailingZeros();
755  T += TwoFactors;
756  Mult = Mult.lshr(TwoFactors);
757  OddFactorial *= Mult;
758  }
759 
760  // We need at least W + T bits for the multiplication step
761  unsigned CalculationBits = W + T;
762 
763  // Calculate 2^T, at width T+W.
764  APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
765 
766  // Calculate the multiplicative inverse of K! / 2^T;
767  // this multiplication factor will perform the exact division by
768  // K! / 2^T.
769  APInt Mod = APInt::getSignedMinValue(W+1);
770  APInt MultiplyFactor = OddFactorial.zext(W+1);
771  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
772  MultiplyFactor = MultiplyFactor.trunc(W);
773 
774  // Calculate the product, at width T+W
775  IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
776  CalculationBits);
777  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
778  for (unsigned i = 1; i != K; ++i) {
779  const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
780  Dividend = SE.getMulExpr(Dividend,
781  SE.getTruncateOrZeroExtend(S, CalculationTy));
782  }
783 
784  // Divide by 2^T
785  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
786 
787  // Truncate the result, and divide by K! / 2^T.
788 
789  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
790  SE.getTruncateOrZeroExtend(DivResult, ResultTy));
791 }
792 
793 /// evaluateAtIteration - Return the value of this chain of recurrences at
794 /// the specified iteration number. We can evaluate this recurrence by
795 /// multiplying each element in the chain by the binomial coefficient
796 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
797 ///
798 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
799 ///
800 /// where BC(It, k) stands for binomial coefficient.
801 ///
803  ScalarEvolution &SE) const {
804  const SCEV *Result = getStart();
805  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
806  // The computation is correct in the face of overflow provided that the
807  // multiplication is performed _after_ the evaluation of the binomial
808  // coefficient.
809  const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
810  if (isa<SCEVCouldNotCompute>(Coeff))
811  return Coeff;
812 
813  Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
814  }
815  return Result;
816 }
817 
818 //===----------------------------------------------------------------------===//
819 // SCEV Expression folder implementations
820 //===----------------------------------------------------------------------===//
821 
823  Type *Ty) {
824  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
825  "This is not a truncating conversion!");
826  assert(isSCEVable(Ty) &&
827  "This is not a conversion to a SCEVable type!");
828  Ty = getEffectiveSCEVType(Ty);
829 
832  ID.AddPointer(Op);
833  ID.AddPointer(Ty);
834  void *IP = 0;
835  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
836 
837  // Fold if the operand is constant.
838  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
839  return getConstant(
840  cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
841 
842  // trunc(trunc(x)) --> trunc(x)
843  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
844  return getTruncateExpr(ST->getOperand(), Ty);
845 
846  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
847  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
848  return getTruncateOrSignExtend(SS->getOperand(), Ty);
849 
850  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
851  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
852  return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
853 
854  // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
855  // eliminate all the truncates.
856  if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
858  bool hasTrunc = false;
859  for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
860  const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
861  hasTrunc = isa<SCEVTruncateExpr>(S);
862  Operands.push_back(S);
863  }
864  if (!hasTrunc)
865  return getAddExpr(Operands);
866  UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
867  }
868 
869  // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
870  // eliminate all the truncates.
871  if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
873  bool hasTrunc = false;
874  for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
875  const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
876  hasTrunc = isa<SCEVTruncateExpr>(S);
877  Operands.push_back(S);
878  }
879  if (!hasTrunc)
880  return getMulExpr(Operands);
881  UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
882  }
883 
884  // If the input value is a chrec scev, truncate the chrec's operands.
885  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
887  for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
888  Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
889  return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
890  }
891 
892  // The cast wasn't folded; create an explicit cast node. We can reuse
893  // the existing insert position since if we get here, we won't have
894  // made any changes which would invalidate it.
895  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
896  Op, Ty);
897  UniqueSCEVs.InsertNode(S, IP);
898  return S;
899 }
900 
902  Type *Ty) {
903  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
904  "This is not an extending conversion!");
905  assert(isSCEVable(Ty) &&
906  "This is not a conversion to a SCEVable type!");
907  Ty = getEffectiveSCEVType(Ty);
908 
909  // Fold if the operand is constant.
910  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
911  return getConstant(
912  cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
913 
914  // zext(zext(x)) --> zext(x)
915  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
916  return getZeroExtendExpr(SZ->getOperand(), Ty);
917 
918  // Before doing any expensive analysis, check to see if we've already
919  // computed a SCEV for this Op and Ty.
922  ID.AddPointer(Op);
923  ID.AddPointer(Ty);
924  void *IP = 0;
925  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
926 
927  // zext(trunc(x)) --> zext(x) or x or trunc(x)
928  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
929  // It's possible the bits taken off by the truncate were all zero bits. If
930  // so, we should be able to simplify this further.
931  const SCEV *X = ST->getOperand();
933  unsigned TruncBits = getTypeSizeInBits(ST->getType());
934  unsigned NewBits = getTypeSizeInBits(Ty);
935  if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
936  CR.zextOrTrunc(NewBits)))
937  return getTruncateOrZeroExtend(X, Ty);
938  }
939 
940  // If the input value is a chrec scev, and we can prove that the value
941  // did not overflow the old, smaller, value, we can zero extend all of the
942  // operands (often constants). This allows analysis of something like
943  // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
944  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
945  if (AR->isAffine()) {
946  const SCEV *Start = AR->getStart();
947  const SCEV *Step = AR->getStepRecurrence(*this);
948  unsigned BitWidth = getTypeSizeInBits(AR->getType());
949  const Loop *L = AR->getLoop();
950 
951  // If we have special knowledge that this addrec won't overflow,
952  // we don't need to do any further analysis.
953  if (AR->getNoWrapFlags(SCEV::FlagNUW))
954  return getAddRecExpr(getZeroExtendExpr(Start, Ty),
955  getZeroExtendExpr(Step, Ty),
956  L, AR->getNoWrapFlags());
957 
958  // Check whether the backedge-taken count is SCEVCouldNotCompute.
959  // Note that this serves two purposes: It filters out loops that are
960  // simply not analyzable, and it covers the case where this code is
961  // being called from within backedge-taken count analysis, such that
962  // attempting to ask for the backedge-taken count would likely result
963  // in infinite recursion. In the later case, the analysis code will
964  // cope with a conservative value, and it will take care to purge
965  // that value once it has finished.
966  const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
967  if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
968  // Manually compute the final value for AR, checking for
969  // overflow.
970 
971  // Check whether the backedge-taken count can be losslessly casted to
972  // the addrec's type. The count is always unsigned.
973  const SCEV *CastedMaxBECount =
974  getTruncateOrZeroExtend(MaxBECount, Start->getType());
975  const SCEV *RecastedMaxBECount =
976  getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
977  if (MaxBECount == RecastedMaxBECount) {
978  Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
979  // Check whether Start+Step*MaxBECount has no unsigned overflow.
980  const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
981  const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
982  const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
983  const SCEV *WideMaxBECount =
984  getZeroExtendExpr(CastedMaxBECount, WideTy);
985  const SCEV *OperandExtendedAdd =
986  getAddExpr(WideStart,
987  getMulExpr(WideMaxBECount,
988  getZeroExtendExpr(Step, WideTy)));
989  if (ZAdd == OperandExtendedAdd) {
990  // Cache knowledge of AR NUW, which is propagated to this AddRec.
991  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
992  // Return the expression with the addrec on the outside.
993  return getAddRecExpr(getZeroExtendExpr(Start, Ty),
994  getZeroExtendExpr(Step, Ty),
995  L, AR->getNoWrapFlags());
996  }
997  // Similar to above, only this time treat the step value as signed.
998  // This covers loops that count down.
999  OperandExtendedAdd =
1000  getAddExpr(WideStart,
1001  getMulExpr(WideMaxBECount,
1002  getSignExtendExpr(Step, WideTy)));
1003  if (ZAdd == OperandExtendedAdd) {
1004  // Cache knowledge of AR NW, which is propagated to this AddRec.
1005  // Negative step causes unsigned wrap, but it still can't self-wrap.
1006  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1007  // Return the expression with the addrec on the outside.
1008  return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1009  getSignExtendExpr(Step, Ty),
1010  L, AR->getNoWrapFlags());
1011  }
1012  }
1013 
1014  // If the backedge is guarded by a comparison with the pre-inc value
1015  // the addrec is safe. Also, if the entry is guarded by a comparison
1016  // with the start value and the backedge is guarded by a comparison
1017  // with the post-inc value, the addrec is safe.
1018  if (isKnownPositive(Step)) {
1019  const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1020  getUnsignedRange(Step).getUnsignedMax());
1024  AR->getPostIncExpr(*this), N))) {
1025  // Cache knowledge of AR NUW, which is propagated to this AddRec.
1026  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1027  // Return the expression with the addrec on the outside.
1028  return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1029  getZeroExtendExpr(Step, Ty),
1030  L, AR->getNoWrapFlags());
1031  }
1032  } else if (isKnownNegative(Step)) {
1033  const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1034  getSignedRange(Step).getSignedMin());
1038  AR->getPostIncExpr(*this), N))) {
1039  // Cache knowledge of AR NW, which is propagated to this AddRec.
1040  // Negative step causes unsigned wrap, but it still can't self-wrap.
1041  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1042  // Return the expression with the addrec on the outside.
1043  return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1044  getSignExtendExpr(Step, Ty),
1045  L, AR->getNoWrapFlags());
1046  }
1047  }
1048  }
1049  }
1050 
1051  // The cast wasn't folded; create an explicit cast node.
1052  // Recompute the insert position, as it may have been invalidated.
1053  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1054  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1055  Op, Ty);
1056  UniqueSCEVs.InsertNode(S, IP);
1057  return S;
1058 }
1059 
1060 // Get the limit of a recurrence such that incrementing by Step cannot cause
1061 // signed overflow as long as the value of the recurrence within the loop does
1062 // not exceed this limit before incrementing.
1063 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1064  ICmpInst::Predicate *Pred,
1065  ScalarEvolution *SE) {
1066  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1067  if (SE->isKnownPositive(Step)) {
1068  *Pred = ICmpInst::ICMP_SLT;
1069  return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1070  SE->getSignedRange(Step).getSignedMax());
1071  }
1072  if (SE->isKnownNegative(Step)) {
1073  *Pred = ICmpInst::ICMP_SGT;
1074  return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1075  SE->getSignedRange(Step).getSignedMin());
1076  }
1077  return 0;
1078 }
1079 
1080 // The recurrence AR has been shown to have no signed wrap. Typically, if we can
1081 // prove NSW for AR, then we can just as easily prove NSW for its preincrement
1082 // or postincrement sibling. This allows normalizing a sign extended AddRec as
1083 // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
1084 // result, the expression "Step + sext(PreIncAR)" is congruent with
1085 // "sext(PostIncAR)"
1087  Type *Ty,
1088  ScalarEvolution *SE) {
1089  const Loop *L = AR->getLoop();
1090  const SCEV *Start = AR->getStart();
1091  const SCEV *Step = AR->getStepRecurrence(*SE);
1092 
1093  // Check for a simple looking step prior to loop entry.
1094  const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1095  if (!SA)
1096  return 0;
1097 
1098  // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1099  // subtraction is expensive. For this purpose, perform a quick and dirty
1100  // difference, by checking for Step in the operand list.
1102  for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
1103  I != E; ++I) {
1104  if (*I != Step)
1105  DiffOps.push_back(*I);
1106  }
1107  if (DiffOps.size() == SA->getNumOperands())
1108  return 0;
1109 
1110  // This is a postinc AR. Check for overflow on the preinc recurrence using the
1111  // same three conditions that getSignExtendedExpr checks.
1112 
1113  // 1. NSW flags on the step increment.
1114  const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
1115  const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1116  SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1117 
1118  if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
1119  return PreStart;
1120 
1121  // 2. Direct overflow check on the step operation's expression.
1122  unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1123  Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1124  const SCEV *OperandExtendedStart =
1125  SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
1126  SE->getSignExtendExpr(Step, WideTy));
1127  if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
1128  // Cache knowledge of PreAR NSW.
1129  if (PreAR)
1130  const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
1131  // FIXME: this optimization needs a unit test
1132  DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
1133  return PreStart;
1134  }
1135 
1136  // 3. Loop precondition.
1137  ICmpInst::Predicate Pred;
1138  const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
1139 
1140  if (OverflowLimit &&
1141  SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
1142  return PreStart;
1143  }
1144  return 0;
1145 }
1146 
1147 // Get the normalized sign-extended expression for this AddRec's Start.
1149  Type *Ty,
1150  ScalarEvolution *SE) {
1151  const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
1152  if (!PreStart)
1153  return SE->getSignExtendExpr(AR->getStart(), Ty);
1154 
1155  return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
1156  SE->getSignExtendExpr(PreStart, Ty));
1157 }
1158 
1160  Type *Ty) {
1161  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1162  "This is not an extending conversion!");
1163  assert(isSCEVable(Ty) &&
1164  "This is not a conversion to a SCEVable type!");
1165  Ty = getEffectiveSCEVType(Ty);
1166 
1167  // Fold if the operand is constant.
1168  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1169  return getConstant(
1170  cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1171 
1172  // sext(sext(x)) --> sext(x)
1173  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1174  return getSignExtendExpr(SS->getOperand(), Ty);
1175 
1176  // sext(zext(x)) --> zext(x)
1177  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1178  return getZeroExtendExpr(SZ->getOperand(), Ty);
1179 
1180  // Before doing any expensive analysis, check to see if we've already
1181  // computed a SCEV for this Op and Ty.
1184  ID.AddPointer(Op);
1185  ID.AddPointer(Ty);
1186  void *IP = 0;
1187  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1188 
1189  // If the input value is provably positive, build a zext instead.
1190  if (isKnownNonNegative(Op))
1191  return getZeroExtendExpr(Op, Ty);
1192 
1193  // sext(trunc(x)) --> sext(x) or x or trunc(x)
1194  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1195  // It's possible the bits taken off by the truncate were all sign bits. If
1196  // so, we should be able to simplify this further.
1197  const SCEV *X = ST->getOperand();
1198  ConstantRange CR = getSignedRange(X);
1199  unsigned TruncBits = getTypeSizeInBits(ST->getType());
1200  unsigned NewBits = getTypeSizeInBits(Ty);
1201  if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1202  CR.sextOrTrunc(NewBits)))
1203  return getTruncateOrSignExtend(X, Ty);
1204  }
1205 
1206  // If the input value is a chrec scev, and we can prove that the value
1207  // did not overflow the old, smaller, value, we can sign extend all of the
1208  // operands (often constants). This allows analysis of something like
1209  // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1210  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1211  if (AR->isAffine()) {
1212  const SCEV *Start = AR->getStart();
1213  const SCEV *Step = AR->getStepRecurrence(*this);
1214  unsigned BitWidth = getTypeSizeInBits(AR->getType());
1215  const Loop *L = AR->getLoop();
1216 
1217  // If we have special knowledge that this addrec won't overflow,
1218  // we don't need to do any further analysis.
1219  if (AR->getNoWrapFlags(SCEV::FlagNSW))
1220  return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1221  getSignExtendExpr(Step, Ty),
1222  L, SCEV::FlagNSW);
1223 
1224  // Check whether the backedge-taken count is SCEVCouldNotCompute.
1225  // Note that this serves two purposes: It filters out loops that are
1226  // simply not analyzable, and it covers the case where this code is
1227  // being called from within backedge-taken count analysis, such that
1228  // attempting to ask for the backedge-taken count would likely result
1229  // in infinite recursion. In the later case, the analysis code will
1230  // cope with a conservative value, and it will take care to purge
1231  // that value once it has finished.
1232  const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1233  if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1234  // Manually compute the final value for AR, checking for
1235  // overflow.
1236 
1237  // Check whether the backedge-taken count can be losslessly casted to
1238  // the addrec's type. The count is always unsigned.
1239  const SCEV *CastedMaxBECount =
1240  getTruncateOrZeroExtend(MaxBECount, Start->getType());
1241  const SCEV *RecastedMaxBECount =
1242  getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1243  if (MaxBECount == RecastedMaxBECount) {
1244  Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1245  // Check whether Start+Step*MaxBECount has no signed overflow.
1246  const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1247  const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1248  const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1249  const SCEV *WideMaxBECount =
1250  getZeroExtendExpr(CastedMaxBECount, WideTy);
1251  const SCEV *OperandExtendedAdd =
1252  getAddExpr(WideStart,
1253  getMulExpr(WideMaxBECount,
1254  getSignExtendExpr(Step, WideTy)));
1255  if (SAdd == OperandExtendedAdd) {
1256  // Cache knowledge of AR NSW, which is propagated to this AddRec.
1257  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1258  // Return the expression with the addrec on the outside.
1259  return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1260  getSignExtendExpr(Step, Ty),
1261  L, AR->getNoWrapFlags());
1262  }
1263  // Similar to above, only this time treat the step value as unsigned.
1264  // This covers loops that count up with an unsigned step.
1265  OperandExtendedAdd =
1266  getAddExpr(WideStart,
1267  getMulExpr(WideMaxBECount,
1268  getZeroExtendExpr(Step, WideTy)));
1269  if (SAdd == OperandExtendedAdd) {
1270  // Cache knowledge of AR NSW, which is propagated to this AddRec.
1271  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1272  // Return the expression with the addrec on the outside.
1273  return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1274  getZeroExtendExpr(Step, Ty),
1275  L, AR->getNoWrapFlags());
1276  }
1277  }
1278 
1279  // If the backedge is guarded by a comparison with the pre-inc value
1280  // the addrec is safe. Also, if the entry is guarded by a comparison
1281  // with the start value and the backedge is guarded by a comparison
1282  // with the post-inc value, the addrec is safe.
1283  ICmpInst::Predicate Pred;
1284  const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
1285  if (OverflowLimit &&
1286  (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1287  (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1288  isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1289  OverflowLimit)))) {
1290  // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1291  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1292  return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1293  getSignExtendExpr(Step, Ty),
1294  L, AR->getNoWrapFlags());
1295  }
1296  }
1297  }
1298 
1299  // The cast wasn't folded; create an explicit cast node.
1300  // Recompute the insert position, as it may have been invalidated.
1301  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1302  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1303  Op, Ty);
1304  UniqueSCEVs.InsertNode(S, IP);
1305  return S;
1306 }
1307 
1308 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1309 /// unspecified bits out to the given type.
1310 ///
1312  Type *Ty) {
1313  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1314  "This is not an extending conversion!");
1315  assert(isSCEVable(Ty) &&
1316  "This is not a conversion to a SCEVable type!");
1317  Ty = getEffectiveSCEVType(Ty);
1318 
1319  // Sign-extend negative constants.
1320  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1321  if (SC->getValue()->getValue().isNegative())
1322  return getSignExtendExpr(Op, Ty);
1323 
1324  // Peel off a truncate cast.
1325  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1326  const SCEV *NewOp = T->getOperand();
1327  if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1328  return getAnyExtendExpr(NewOp, Ty);
1329  return getTruncateOrNoop(NewOp, Ty);
1330  }
1331 
1332  // Next try a zext cast. If the cast is folded, use it.
1333  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1334  if (!isa<SCEVZeroExtendExpr>(ZExt))
1335  return ZExt;
1336 
1337  // Next try a sext cast. If the cast is folded, use it.
1338  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1339  if (!isa<SCEVSignExtendExpr>(SExt))
1340  return SExt;
1341 
1342  // Force the cast to be folded into the operands of an addrec.
1343  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1345  for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1346  I != E; ++I)
1347  Ops.push_back(getAnyExtendExpr(*I, Ty));
1348  return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1349  }
1350 
1351  // If the expression is obviously signed, use the sext cast value.
1352  if (isa<SCEVSMaxExpr>(Op))
1353  return SExt;
1354 
1355  // Absent any other information, use the zext cast value.
1356  return ZExt;
1357 }
1358 
1359 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1360 /// a list of operands to be added under the given scale, update the given
1361 /// map. This is a helper function for getAddRecExpr. As an example of
1362 /// what it does, given a sequence of operands that would form an add
1363 /// expression like this:
1364 ///
1365 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1366 ///
1367 /// where A and B are constants, update the map with these values:
1368 ///
1369 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1370 ///
1371 /// and add 13 + A*B*29 to AccumulatedConstant.
1372 /// This will allow getAddRecExpr to produce this:
1373 ///
1374 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1375 ///
1376 /// This form often exposes folding opportunities that are hidden in
1377 /// the original operand list.
1378 ///
1379 /// Return true iff it appears that any interesting folding opportunities
1380 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1381 /// the common case where no interesting opportunities are present, and
1382 /// is also used as a check to avoid infinite recursion.
1383 ///
1384 static bool
1387  APInt &AccumulatedConstant,
1388  const SCEV *const *Ops, size_t NumOperands,
1389  const APInt &Scale,
1390  ScalarEvolution &SE) {
1391  bool Interesting = false;
1392 
1393  // Iterate over the add operands. They are sorted, with constants first.
1394  unsigned i = 0;
1395  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1396  ++i;
1397  // Pull a buried constant out to the outside.
1398  if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1399  Interesting = true;
1400  AccumulatedConstant += Scale * C->getValue()->getValue();
1401  }
1402 
1403  // Next comes everything else. We're especially interested in multiplies
1404  // here, but they're in the middle, so just visit the rest with one loop.
1405  for (; i != NumOperands; ++i) {
1406  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1407  if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1408  APInt NewScale =
1409  Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1410  if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1411  // A multiplication of a constant with another add; recurse.
1412  const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1413  Interesting |=
1414  CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1415  Add->op_begin(), Add->getNumOperands(),
1416  NewScale, SE);
1417  } else {
1418  // A multiplication of a constant with some other value. Update
1419  // the map.
1420  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1421  const SCEV *Key = SE.getMulExpr(MulOps);
1422  std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1423  M.insert(std::make_pair(Key, NewScale));
1424  if (Pair.second) {
1425  NewOps.push_back(Pair.first->first);
1426  } else {
1427  Pair.first->second += NewScale;
1428  // The map already had an entry for this value, which may indicate
1429  // a folding opportunity.
1430  Interesting = true;
1431  }
1432  }
1433  } else {
1434  // An ordinary operand. Update the map.
1435  std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1436  M.insert(std::make_pair(Ops[i], Scale));
1437  if (Pair.second) {
1438  NewOps.push_back(Pair.first->first);
1439  } else {
1440  Pair.first->second += Scale;
1441  // The map already had an entry for this value, which may indicate
1442  // a folding opportunity.
1443  Interesting = true;
1444  }
1445  }
1446  }
1447 
1448  return Interesting;
1449 }
1450 
1451 namespace {
1452  struct APIntCompare {
1453  bool operator()(const APInt &LHS, const APInt &RHS) const {
1454  return LHS.ult(RHS);
1455  }
1456  };
1457 }
1458 
1459 /// getAddExpr - Get a canonical add expression, or something simpler if
1460 /// possible.
1463  assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1464  "only nuw or nsw allowed");
1465  assert(!Ops.empty() && "Cannot get empty add!");
1466  if (Ops.size() == 1) return Ops[0];
1467 #ifndef NDEBUG
1468  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1469  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1470  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1471  "SCEVAddExpr operand types don't match!");
1472 #endif
1473 
1474  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1475  // And vice-versa.
1476  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1477  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1478  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1479  bool All = true;
1481  E = Ops.end(); I != E; ++I)
1482  if (!isKnownNonNegative(*I)) {
1483  All = false;
1484  break;
1485  }
1486  if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1487  }
1488 
1489  // Sort by complexity, this groups all similar expression types together.
1490  GroupByComplexity(Ops, LI);
1491 
1492  // If there are any constants, fold them together.
1493  unsigned Idx = 0;
1494  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1495  ++Idx;
1496  assert(Idx < Ops.size());
1497  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1498  // We found two constants, fold them together!
1499  Ops[0] = getConstant(LHSC->getValue()->getValue() +
1500  RHSC->getValue()->getValue());
1501  if (Ops.size() == 2) return Ops[0];
1502  Ops.erase(Ops.begin()+1); // Erase the folded element
1503  LHSC = cast<SCEVConstant>(Ops[0]);
1504  }
1505 
1506  // If we are left with a constant zero being added, strip it off.
1507  if (LHSC->getValue()->isZero()) {
1508  Ops.erase(Ops.begin());
1509  --Idx;
1510  }
1511 
1512  if (Ops.size() == 1) return Ops[0];
1513  }
1514 
1515  // Okay, check to see if the same value occurs in the operand list more than
1516  // once. If so, merge them together into an multiply expression. Since we
1517  // sorted the list, these values are required to be adjacent.
1518  Type *Ty = Ops[0]->getType();
1519  bool FoundMatch = false;
1520  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
1521  if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1522  // Scan ahead to count how many equal operands there are.
1523  unsigned Count = 2;
1524  while (i+Count != e && Ops[i+Count] == Ops[i])
1525  ++Count;
1526  // Merge the values into a multiply.
1527  const SCEV *Scale = getConstant(Ty, Count);
1528  const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1529  if (Ops.size() == Count)
1530  return Mul;
1531  Ops[i] = Mul;
1532  Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
1533  --i; e -= Count - 1;
1534  FoundMatch = true;
1535  }
1536  if (FoundMatch)
1537  return getAddExpr(Ops, Flags);
1538 
1539  // Check for truncates. If all the operands are truncated from the same
1540  // type, see if factoring out the truncate would permit the result to be
1541  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1542  // if the contents of the resulting outer trunc fold to something simple.
1543  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1544  const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1545  Type *DstType = Trunc->getType();
1546  Type *SrcType = Trunc->getOperand()->getType();
1548  bool Ok = true;
1549  // Check all the operands to see if they can be represented in the
1550  // source type of the truncate.
1551  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1552  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1553  if (T->getOperand()->getType() != SrcType) {
1554  Ok = false;
1555  break;
1556  }
1557  LargeOps.push_back(T->getOperand());
1558  } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1559  LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1560  } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1561  SmallVector<const SCEV *, 8> LargeMulOps;
1562  for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1563  if (const SCEVTruncateExpr *T =
1564  dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1565  if (T->getOperand()->getType() != SrcType) {
1566  Ok = false;
1567  break;
1568  }
1569  LargeMulOps.push_back(T->getOperand());
1570  } else if (const SCEVConstant *C =
1571  dyn_cast<SCEVConstant>(M->getOperand(j))) {
1572  LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1573  } else {
1574  Ok = false;
1575  break;
1576  }
1577  }
1578  if (Ok)
1579  LargeOps.push_back(getMulExpr(LargeMulOps));
1580  } else {
1581  Ok = false;
1582  break;
1583  }
1584  }
1585  if (Ok) {
1586  // Evaluate the expression in the larger type.
1587  const SCEV *Fold = getAddExpr(LargeOps, Flags);
1588  // If it folds to something simple, use it. Otherwise, don't.
1589  if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1590  return getTruncateExpr(Fold, DstType);
1591  }
1592  }
1593 
1594  // Skip past any other cast SCEVs.
1595  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1596  ++Idx;
1597 
1598  // If there are add operands they would be next.
1599  if (Idx < Ops.size()) {
1600  bool DeletedAdd = false;
1601  while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1602  // If we have an add, expand the add operands onto the end of the operands
1603  // list.
1604  Ops.erase(Ops.begin()+Idx);
1605  Ops.append(Add->op_begin(), Add->op_end());
1606  DeletedAdd = true;
1607  }
1608 
1609  // If we deleted at least one add, we added operands to the end of the list,
1610  // and they are not necessarily sorted. Recurse to resort and resimplify
1611  // any operands we just acquired.
1612  if (DeletedAdd)
1613  return getAddExpr(Ops);
1614  }
1615 
1616  // Skip over the add expression until we get to a multiply.
1617  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1618  ++Idx;
1619 
1620  // Check to see if there are any folding opportunities present with
1621  // operands multiplied by constant values.
1622  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1623  uint64_t BitWidth = getTypeSizeInBits(Ty);
1626  APInt AccumulatedConstant(BitWidth, 0);
1627  if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1628  Ops.data(), Ops.size(),
1629  APInt(BitWidth, 1), *this)) {
1630  // Some interesting folding opportunity is present, so its worthwhile to
1631  // re-generate the operands list. Group the operands by constant scale,
1632  // to avoid multiplying by the same constant scale multiple times.
1633  std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1635  E = NewOps.end(); I != E; ++I)
1636  MulOpLists[M.find(*I)->second].push_back(*I);
1637  // Re-generate the operands list.
1638  Ops.clear();
1639  if (AccumulatedConstant != 0)
1640  Ops.push_back(getConstant(AccumulatedConstant));
1641  for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1642  I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1643  if (I->first != 0)
1644  Ops.push_back(getMulExpr(getConstant(I->first),
1645  getAddExpr(I->second)));
1646  if (Ops.empty())
1647  return getConstant(Ty, 0);
1648  if (Ops.size() == 1)
1649  return Ops[0];
1650  return getAddExpr(Ops);
1651  }
1652  }
1653 
1654  // If we are adding something to a multiply expression, make sure the
1655  // something is not already an operand of the multiply. If so, merge it into
1656  // the multiply.
1657  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1658  const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1659  for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1660  const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1661  if (isa<SCEVConstant>(MulOpSCEV))
1662  continue;
1663  for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1664  if (MulOpSCEV == Ops[AddOp]) {
1665  // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1666  const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1667  if (Mul->getNumOperands() != 2) {
1668  // If the multiply has more than two operands, we must get the
1669  // Y*Z term.
1670  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1671  Mul->op_begin()+MulOp);
1672  MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1673  InnerMul = getMulExpr(MulOps);
1674  }
1675  const SCEV *One = getConstant(Ty, 1);
1676  const SCEV *AddOne = getAddExpr(One, InnerMul);
1677  const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1678  if (Ops.size() == 2) return OuterMul;
1679  if (AddOp < Idx) {
1680  Ops.erase(Ops.begin()+AddOp);
1681  Ops.erase(Ops.begin()+Idx-1);
1682  } else {
1683  Ops.erase(Ops.begin()+Idx);
1684  Ops.erase(Ops.begin()+AddOp-1);
1685  }
1686  Ops.push_back(OuterMul);
1687  return getAddExpr(Ops);
1688  }
1689 
1690  // Check this multiply against other multiplies being added together.
1691  for (unsigned OtherMulIdx = Idx+1;
1692  OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1693  ++OtherMulIdx) {
1694  const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1695  // If MulOp occurs in OtherMul, we can fold the two multiplies
1696  // together.
1697  for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1698  OMulOp != e; ++OMulOp)
1699  if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1700  // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1701  const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1702  if (Mul->getNumOperands() != 2) {
1703  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1704  Mul->op_begin()+MulOp);
1705  MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1706  InnerMul1 = getMulExpr(MulOps);
1707  }
1708  const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1709  if (OtherMul->getNumOperands() != 2) {
1710  SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1711  OtherMul->op_begin()+OMulOp);
1712  MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
1713  InnerMul2 = getMulExpr(MulOps);
1714  }
1715  const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1716  const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1717  if (Ops.size() == 2) return OuterMul;
1718  Ops.erase(Ops.begin()+Idx);
1719  Ops.erase(Ops.begin()+OtherMulIdx-1);
1720  Ops.push_back(OuterMul);
1721  return getAddExpr(Ops);
1722  }
1723  }
1724  }
1725  }
1726 
1727  // If there are any add recurrences in the operands list, see if any other
1728  // added values are loop invariant. If so, we can fold them into the
1729  // recurrence.
1730  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1731  ++Idx;
1732 
1733  // Scan over all recurrences, trying to fold loop invariants into them.
1734  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1735  // Scan all of the other operands to this add and add them to the vector if
1736  // they are loop invariant w.r.t. the recurrence.
1738  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1739  const Loop *AddRecLoop = AddRec->getLoop();
1740  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1741  if (isLoopInvariant(Ops[i], AddRecLoop)) {
1742  LIOps.push_back(Ops[i]);
1743  Ops.erase(Ops.begin()+i);
1744  --i; --e;
1745  }
1746 
1747  // If we found some loop invariants, fold them into the recurrence.
1748  if (!LIOps.empty()) {
1749  // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1750  LIOps.push_back(AddRec->getStart());
1751 
1752  SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1753  AddRec->op_end());
1754  AddRecOps[0] = getAddExpr(LIOps);
1755 
1756  // Build the new addrec. Propagate the NUW and NSW flags if both the
1757  // outer add and the inner addrec are guaranteed to have no overflow.
1758  // Always propagate NW.
1759  Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
1760  const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
1761 
1762  // If all of the other operands were loop invariant, we are done.
1763  if (Ops.size() == 1) return NewRec;
1764 
1765  // Otherwise, add the folded AddRec by the non-invariant parts.
1766  for (unsigned i = 0;; ++i)
1767  if (Ops[i] == AddRec) {
1768  Ops[i] = NewRec;
1769  break;
1770  }
1771  return getAddExpr(Ops);
1772  }
1773 
1774  // Okay, if there weren't any loop invariants to be folded, check to see if
1775  // there are multiple AddRec's with the same loop induction variable being
1776  // added together. If so, we can fold them.
1777  for (unsigned OtherIdx = Idx+1;
1778  OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1779  ++OtherIdx)
1780  if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1781  // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
1782  SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1783  AddRec->op_end());
1784  for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1785  ++OtherIdx)
1786  if (const SCEVAddRecExpr *OtherAddRec =
1787  dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1788  if (OtherAddRec->getLoop() == AddRecLoop) {
1789  for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1790  i != e; ++i) {
1791  if (i >= AddRecOps.size()) {
1792  AddRecOps.append(OtherAddRec->op_begin()+i,
1793  OtherAddRec->op_end());
1794  break;
1795  }
1796  AddRecOps[i] = getAddExpr(AddRecOps[i],
1797  OtherAddRec->getOperand(i));
1798  }
1799  Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1800  }
1801  // Step size has changed, so we cannot guarantee no self-wraparound.
1802  Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
1803  return getAddExpr(Ops);
1804  }
1805 
1806  // Otherwise couldn't fold anything into this recurrence. Move onto the
1807  // next one.
1808  }
1809 
1810  // Okay, it looks like we really DO need an add expr. Check to see if we
1811  // already have one, otherwise create a new one.
1813  ID.AddInteger(scAddExpr);
1814  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1815  ID.AddPointer(Ops[i]);
1816  void *IP = 0;
1817  SCEVAddExpr *S =
1818  static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1819  if (!S) {
1820  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1821  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1822  S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1823  O, Ops.size());
1824  UniqueSCEVs.InsertNode(S, IP);
1825  }
1826  S->setNoWrapFlags(Flags);
1827  return S;
1828 }
1829 
1830 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
1831  uint64_t k = i*j;
1832  if (j > 1 && k / j != i) Overflow = true;
1833  return k;
1834 }
1835 
1836 /// Compute the result of "n choose k", the binomial coefficient. If an
1837 /// intermediate computation overflows, Overflow will be set and the return will
1838 /// be garbage. Overflow is not cleared on absence of overflow.
1839 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
1840  // We use the multiplicative formula:
1841  // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
1842  // At each iteration, we take the n-th term of the numeral and divide by the
1843  // (k-n)th term of the denominator. This division will always produce an
1844  // integral result, and helps reduce the chance of overflow in the
1845  // intermediate computations. However, we can still overflow even when the
1846  // final result would fit.
1847 
1848  if (n == 0 || n == k) return 1;
1849  if (k > n) return 0;
1850 
1851  if (k > n/2)
1852  k = n-k;
1853 
1854  uint64_t r = 1;
1855  for (uint64_t i = 1; i <= k; ++i) {
1856  r = umul_ov(r, n-(i-1), Overflow);
1857  r /= i;
1858  }
1859  return r;
1860 }
1861 
1862 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1863 /// possible.
1866  assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
1867  "only nuw or nsw allowed");
1868  assert(!Ops.empty() && "Cannot get empty mul!");
1869  if (Ops.size() == 1) return Ops[0];
1870 #ifndef NDEBUG
1871  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1872  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1873  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1874  "SCEVMulExpr operand types don't match!");
1875 #endif
1876 
1877  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1878  // And vice-versa.
1879  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1880  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1881  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1882  bool All = true;
1884  E = Ops.end(); I != E; ++I)
1885  if (!isKnownNonNegative(*I)) {
1886  All = false;
1887  break;
1888  }
1889  if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1890  }
1891 
1892  // Sort by complexity, this groups all similar expression types together.
1893  GroupByComplexity(Ops, LI);
1894 
1895  // If there are any constants, fold them together.
1896  unsigned Idx = 0;
1897  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1898 
1899  // C1*(C2+V) -> C1*C2 + C1*V
1900  if (Ops.size() == 2)
1901  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1902  if (Add->getNumOperands() == 2 &&
1903  isa<SCEVConstant>(Add->getOperand(0)))
1904  return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1905  getMulExpr(LHSC, Add->getOperand(1)));
1906 
1907  ++Idx;
1908  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1909  // We found two constants, fold them together!
1911  LHSC->getValue()->getValue() *
1912  RHSC->getValue()->getValue());
1913  Ops[0] = getConstant(Fold);
1914  Ops.erase(Ops.begin()+1); // Erase the folded element
1915  if (Ops.size() == 1) return Ops[0];
1916  LHSC = cast<SCEVConstant>(Ops[0]);
1917  }
1918 
1919  // If we are left with a constant one being multiplied, strip it off.
1920  if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1921  Ops.erase(Ops.begin());
1922  --Idx;
1923  } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1924  // If we have a multiply of zero, it will always be zero.
1925  return Ops[0];
1926  } else if (Ops[0]->isAllOnesValue()) {
1927  // If we have a mul by -1 of an add, try distributing the -1 among the
1928  // add operands.
1929  if (Ops.size() == 2) {
1930  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1932  bool AnyFolded = false;
1933  for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
1934  E = Add->op_end(); I != E; ++I) {
1935  const SCEV *Mul = getMulExpr(Ops[0], *I);
1936  if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1937  NewOps.push_back(Mul);
1938  }
1939  if (AnyFolded)
1940  return getAddExpr(NewOps);
1941  }
1942  else if (const SCEVAddRecExpr *
1943  AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
1944  // Negation preserves a recurrence's no self-wrap property.
1946  for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
1947  E = AddRec->op_end(); I != E; ++I) {
1948  Operands.push_back(getMulExpr(Ops[0], *I));
1949  }
1950  return getAddRecExpr(Operands, AddRec->getLoop(),
1951  AddRec->getNoWrapFlags(SCEV::FlagNW));
1952  }
1953  }
1954  }
1955 
1956  if (Ops.size() == 1)
1957  return Ops[0];
1958  }
1959 
1960  // Skip over the add expression until we get to a multiply.
1961  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1962  ++Idx;
1963 
1964  // If there are mul operands inline them all into this expression.
1965  if (Idx < Ops.size()) {
1966  bool DeletedMul = false;
1967  while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1968  // If we have an mul, expand the mul operands onto the end of the operands
1969  // list.
1970  Ops.erase(Ops.begin()+Idx);
1971  Ops.append(Mul->op_begin(), Mul->op_end());
1972  DeletedMul = true;
1973  }
1974 
1975  // If we deleted at least one mul, we added operands to the end of the list,
1976  // and they are not necessarily sorted. Recurse to resort and resimplify
1977  // any operands we just acquired.
1978  if (DeletedMul)
1979  return getMulExpr(Ops);
1980  }
1981 
1982  // If there are any add recurrences in the operands list, see if any other
1983  // added values are loop invariant. If so, we can fold them into the
1984  // recurrence.
1985  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1986  ++Idx;
1987 
1988  // Scan over all recurrences, trying to fold loop invariants into them.
1989  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1990  // Scan all of the other operands to this mul and add them to the vector if
1991  // they are loop invariant w.r.t. the recurrence.
1993  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1994  const Loop *AddRecLoop = AddRec->getLoop();
1995  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1996  if (isLoopInvariant(Ops[i], AddRecLoop)) {
1997  LIOps.push_back(Ops[i]);
1998  Ops.erase(Ops.begin()+i);
1999  --i; --e;
2000  }
2001 
2002  // If we found some loop invariants, fold them into the recurrence.
2003  if (!LIOps.empty()) {
2004  // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2006  NewOps.reserve(AddRec->getNumOperands());
2007  const SCEV *Scale = getMulExpr(LIOps);
2008  for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2009  NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2010 
2011  // Build the new addrec. Propagate the NUW and NSW flags if both the
2012  // outer mul and the inner addrec are guaranteed to have no overflow.
2013  //
2014  // No self-wrap cannot be guaranteed after changing the step size, but
2015  // will be inferred if either NUW or NSW is true.
2016  Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2017  const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2018 
2019  // If all of the other operands were loop invariant, we are done.
2020  if (Ops.size() == 1) return NewRec;
2021 
2022  // Otherwise, multiply the folded AddRec by the non-invariant parts.
2023  for (unsigned i = 0;; ++i)
2024  if (Ops[i] == AddRec) {
2025  Ops[i] = NewRec;
2026  break;
2027  }
2028  return getMulExpr(Ops);
2029  }
2030 
2031  // Okay, if there weren't any loop invariants to be folded, check to see if
2032  // there are multiple AddRec's with the same loop induction variable being
2033  // multiplied together. If so, we can fold them.
2034  for (unsigned OtherIdx = Idx+1;
2035  OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2036  ++OtherIdx) {
2037  if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
2038  continue;
2039 
2040  // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2041  // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2042  // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2043  // ]]],+,...up to x=2n}.
2044  // Note that the arguments to choose() are always integers with values
2045  // known at compile time, never SCEV objects.
2046  //
2047  // The implementation avoids pointless extra computations when the two
2048  // addrec's are of different length (mathematically, it's equivalent to
2049  // an infinite stream of zeros on the right).
2050  bool OpsModified = false;
2051  for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2052  ++OtherIdx) {
2053  const SCEVAddRecExpr *OtherAddRec =
2054  dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2055  if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2056  continue;
2057 
2058  bool Overflow = false;
2059  Type *Ty = AddRec->getType();
2060  bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2061  SmallVector<const SCEV*, 7> AddRecOps;
2062  for (int x = 0, xe = AddRec->getNumOperands() +
2063  OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2064  const SCEV *Term = getConstant(Ty, 0);
2065  for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2066  uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2067  for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2068  ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2069  z < ze && !Overflow; ++z) {
2070  uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2071  uint64_t Coeff;
2072  if (LargerThan64Bits)
2073  Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2074  else
2075  Coeff = Coeff1*Coeff2;
2076  const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2077  const SCEV *Term1 = AddRec->getOperand(y-z);
2078  const SCEV *Term2 = OtherAddRec->getOperand(z);
2079  Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2080  }
2081  }
2082  AddRecOps.push_back(Term);
2083  }
2084  if (!Overflow) {
2085  const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2087  if (Ops.size() == 2) return NewAddRec;
2088  Ops[Idx] = NewAddRec;
2089  Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2090  OpsModified = true;
2091  AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2092  if (!AddRec)
2093  break;
2094  }
2095  }
2096  if (OpsModified)
2097  return getMulExpr(Ops);
2098  }
2099 
2100  // Otherwise couldn't fold anything into this recurrence. Move onto the
2101  // next one.
2102  }
2103 
2104  // Okay, it looks like we really DO need an mul expr. Check to see if we
2105  // already have one, otherwise create a new one.
2107  ID.AddInteger(scMulExpr);
2108  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2109  ID.AddPointer(Ops[i]);
2110  void *IP = 0;
2111  SCEVMulExpr *S =
2112  static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2113  if (!S) {
2114  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2115  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2116  S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2117  O, Ops.size());
2118  UniqueSCEVs.InsertNode(S, IP);
2119  }
2120  S->setNoWrapFlags(Flags);
2121  return S;
2122 }
2123 
2124 /// getUDivExpr - Get a canonical unsigned division expression, or something
2125 /// simpler if possible.
2127  const SCEV *RHS) {
2128  assert(getEffectiveSCEVType(LHS->getType()) ==
2129  getEffectiveSCEVType(RHS->getType()) &&
2130  "SCEVUDivExpr operand types don't match!");
2131 
2132  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2133  if (RHSC->getValue()->equalsInt(1))
2134  return LHS; // X udiv 1 --> x
2135  // If the denominator is zero, the result of the udiv is undefined. Don't
2136  // try to analyze it, because the resolution chosen here may differ from
2137  // the resolution chosen in other parts of the compiler.
2138  if (!RHSC->getValue()->isZero()) {
2139  // Determine if the division can be folded into the operands of
2140  // its operands.
2141  // TODO: Generalize this to non-constants by using known-bits information.
2142  Type *Ty = LHS->getType();
2143  unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
2144  unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2145  // For non-power-of-two values, effectively round the value up to the
2146  // nearest power of two.
2147  if (!RHSC->getValue()->getValue().isPowerOf2())
2148  ++MaxShiftAmt;
2149  IntegerType *ExtTy =
2150  IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2151  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2152  if (const SCEVConstant *Step =
2153  dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2154  // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2155  const APInt &StepInt = Step->getValue()->getValue();
2156  const APInt &DivInt = RHSC->getValue()->getValue();
2157  if (!StepInt.urem(DivInt) &&
2158  getZeroExtendExpr(AR, ExtTy) ==
2159  getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2160  getZeroExtendExpr(Step, ExtTy),
2161  AR->getLoop(), SCEV::FlagAnyWrap)) {
2163  for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2164  Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
2165  return getAddRecExpr(Operands, AR->getLoop(),
2166  SCEV::FlagNW);
2167  }
2168  /// Get a canonical UDivExpr for a recurrence.
2169  /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2170  // We can currently only fold X%N if X is constant.
2171  const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2172  if (StartC && !DivInt.urem(StepInt) &&
2173  getZeroExtendExpr(AR, ExtTy) ==
2174  getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2175  getZeroExtendExpr(Step, ExtTy),
2176  AR->getLoop(), SCEV::FlagAnyWrap)) {
2177  const APInt &StartInt = StartC->getValue()->getValue();
2178  const APInt &StartRem = StartInt.urem(StepInt);
2179  if (StartRem != 0)
2180  LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2181  AR->getLoop(), SCEV::FlagNW);
2182  }
2183  }
2184  // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2185  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2187  for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2188  Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2189  if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2190  // Find an operand that's safely divisible.
2191  for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2192  const SCEV *Op = M->getOperand(i);
2193  const SCEV *Div = getUDivExpr(Op, RHSC);
2194  if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2195  Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2196  M->op_end());
2197  Operands[i] = Div;
2198  return getMulExpr(Operands);
2199  }
2200  }
2201  }
2202  // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2203  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2205  for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2206  Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2207  if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2208  Operands.clear();
2209  for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2210  const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2211  if (isa<SCEVUDivExpr>(Op) ||
2212  getMulExpr(Op, RHS) != A->getOperand(i))
2213  break;
2214  Operands.push_back(Op);
2215  }
2216  if (Operands.size() == A->getNumOperands())
2217  return getAddExpr(Operands);
2218  }
2219  }
2220 
2221  // Fold if both operands are constant.
2222  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2223  Constant *LHSCV = LHSC->getValue();
2224  Constant *RHSCV = RHSC->getValue();
2225  return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2226  RHSCV)));
2227  }
2228  }
2229  }
2230 
2232  ID.AddInteger(scUDivExpr);
2233  ID.AddPointer(LHS);
2234  ID.AddPointer(RHS);
2235  void *IP = 0;
2236  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2237  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2238  LHS, RHS);
2239  UniqueSCEVs.InsertNode(S, IP);
2240  return S;
2241 }
2242 
2243 
2244 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2245 /// Simplify the expression as much as possible.
2246 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2247  const Loop *L,
2250  Operands.push_back(Start);
2251  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2252  if (StepChrec->getLoop() == L) {
2253  Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2254  return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2255  }
2256 
2257  Operands.push_back(Step);
2258  return getAddRecExpr(Operands, L, Flags);
2259 }
2260 
2261 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2262 /// Simplify the expression as much as possible.
2263 const SCEV *
2265  const Loop *L, SCEV::NoWrapFlags Flags) {
2266  if (Operands.size() == 1) return Operands[0];
2267 #ifndef NDEBUG
2268  Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2269  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2270  assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2271  "SCEVAddRecExpr operand types don't match!");
2272  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2273  assert(isLoopInvariant(Operands[i], L) &&
2274  "SCEVAddRecExpr operand is not loop-invariant!");
2275 #endif
2276 
2277  if (Operands.back()->isZero()) {
2278  Operands.pop_back();
2279  return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
2280  }
2281 
2282  // It's tempting to want to call getMaxBackedgeTakenCount count here and
2283  // use that information to infer NUW and NSW flags. However, computing a
2284  // BE count requires calling getAddRecExpr, so we may not yet have a
2285  // meaningful BE count at this point (and if we don't, we'd be stuck
2286  // with a SCEVCouldNotCompute as the cached BE count).
2287 
2288  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2289  // And vice-versa.
2290  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2291  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
2292  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
2293  bool All = true;
2295  E = Operands.end(); I != E; ++I)
2296  if (!isKnownNonNegative(*I)) {
2297  All = false;
2298  break;
2299  }
2300  if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2301  }
2302 
2303  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2304  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2305  const Loop *NestedLoop = NestedAR->getLoop();
2306  if (L->contains(NestedLoop) ?
2307  (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2308  (!NestedLoop->contains(L) &&
2309  DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2310  SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2311  NestedAR->op_end());
2312  Operands[0] = NestedAR->getStart();
2313  // AddRecs require their operands be loop-invariant with respect to their
2314  // loops. Don't perform this transformation if it would break this
2315  // requirement.
2316  bool AllInvariant = true;
2317  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2318  if (!isLoopInvariant(Operands[i], L)) {
2319  AllInvariant = false;
2320  break;
2321  }
2322  if (AllInvariant) {
2323  // Create a recurrence for the outer loop with the same step size.
2324  //
2325  // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2326  // inner recurrence has the same property.
2327  SCEV::NoWrapFlags OuterFlags =
2328  maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2329 
2330  NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2331  AllInvariant = true;
2332  for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2333  if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2334  AllInvariant = false;
2335  break;
2336  }
2337  if (AllInvariant) {
2338  // Ok, both add recurrences are valid after the transformation.
2339  //
2340  // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2341  // the outer recurrence has the same property.
2342  SCEV::NoWrapFlags InnerFlags =
2343  maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2344  return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2345  }
2346  }
2347  // Reset Operands to its original state.
2348  Operands[0] = NestedAR;
2349  }
2350  }
2351 
2352  // Okay, it looks like we really DO need an addrec expr. Check to see if we
2353  // already have one, otherwise create a new one.
2356  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2357  ID.AddPointer(Operands[i]);
2358  ID.AddPointer(L);
2359  void *IP = 0;
2360  SCEVAddRecExpr *S =
2361  static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2362  if (!S) {
2363  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2364  std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2365  S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2366  O, Operands.size(), L);
2367  UniqueSCEVs.InsertNode(S, IP);
2368  }
2369  S->setNoWrapFlags(Flags);
2370  return S;
2371 }
2372 
2374  const SCEV *RHS) {
2376  Ops.push_back(LHS);
2377  Ops.push_back(RHS);
2378  return getSMaxExpr(Ops);
2379 }
2380 
2381 const SCEV *
2383  assert(!Ops.empty() && "Cannot get empty smax!");
2384  if (Ops.size() == 1) return Ops[0];
2385 #ifndef NDEBUG
2386  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2387  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2388  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2389  "SCEVSMaxExpr operand types don't match!");
2390 #endif
2391 
2392  // Sort by complexity, this groups all similar expression types together.
2393  GroupByComplexity(Ops, LI);
2394 
2395  // If there are any constants, fold them together.
2396  unsigned Idx = 0;
2397  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2398  ++Idx;
2399  assert(Idx < Ops.size());
2400  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2401  // We found two constants, fold them together!
2403  APIntOps::smax(LHSC->getValue()->getValue(),
2404  RHSC->getValue()->getValue()));
2405  Ops[0] = getConstant(Fold);
2406  Ops.erase(Ops.begin()+1); // Erase the folded element
2407  if (Ops.size() == 1) return Ops[0];
2408  LHSC = cast<SCEVConstant>(Ops[0]);
2409  }
2410 
2411  // If we are left with a constant minimum-int, strip it off.
2412  if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2413  Ops.erase(Ops.begin());
2414  --Idx;
2415  } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2416  // If we have an smax with a constant maximum-int, it will always be
2417  // maximum-int.
2418  return Ops[0];
2419  }
2420 
2421  if (Ops.size() == 1) return Ops[0];
2422  }
2423 
2424  // Find the first SMax
2425  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2426  ++Idx;
2427 
2428  // Check to see if one of the operands is an SMax. If so, expand its operands
2429  // onto our operand list, and recurse to simplify.
2430  if (Idx < Ops.size()) {
2431  bool DeletedSMax = false;
2432  while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2433  Ops.erase(Ops.begin()+Idx);
2434  Ops.append(SMax->op_begin(), SMax->op_end());
2435  DeletedSMax = true;
2436  }
2437 
2438  if (DeletedSMax)
2439  return getSMaxExpr(Ops);
2440  }
2441 
2442  // Okay, check to see if the same value occurs in the operand list twice. If
2443  // so, delete one. Since we sorted the list, these values are required to
2444  // be adjacent.
2445  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2446  // X smax Y smax Y --> X smax Y
2447  // X smax Y --> X, if X is always greater than Y
2448  if (Ops[i] == Ops[i+1] ||
2449  isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2450  Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2451  --i; --e;
2452  } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2453  Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2454  --i; --e;
2455  }
2456 
2457  if (Ops.size() == 1) return Ops[0];
2458 
2459  assert(!Ops.empty() && "Reduced smax down to nothing!");
2460 
2461  // Okay, it looks like we really DO need an smax expr. Check to see if we
2462  // already have one, otherwise create a new one.
2464  ID.AddInteger(scSMaxExpr);
2465  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2466  ID.AddPointer(Ops[i]);
2467  void *IP = 0;
2468  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2469  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2470  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2471  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2472  O, Ops.size());
2473  UniqueSCEVs.InsertNode(S, IP);
2474  return S;
2475 }
2476 
2478  const SCEV *RHS) {
2480  Ops.push_back(LHS);
2481  Ops.push_back(RHS);
2482  return getUMaxExpr(Ops);
2483 }
2484 
2485 const SCEV *
2487  assert(!Ops.empty() && "Cannot get empty umax!");
2488  if (Ops.size() == 1) return Ops[0];
2489 #ifndef NDEBUG
2490  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2491  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2492  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2493  "SCEVUMaxExpr operand types don't match!");
2494 #endif
2495 
2496  // Sort by complexity, this groups all similar expression types together.
2497  GroupByComplexity(Ops, LI);
2498 
2499  // If there are any constants, fold them together.
2500  unsigned Idx = 0;
2501  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2502  ++Idx;
2503  assert(Idx < Ops.size());
2504  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2505  // We found two constants, fold them together!
2507  APIntOps::umax(LHSC->getValue()->getValue(),
2508  RHSC->getValue()->getValue()));
2509  Ops[0] = getConstant(Fold);
2510  Ops.erase(Ops.begin()+1); // Erase the folded element
2511  if (Ops.size() == 1) return Ops[0];
2512  LHSC = cast<SCEVConstant>(Ops[0]);
2513  }
2514 
2515  // If we are left with a constant minimum-int, strip it off.
2516  if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2517  Ops.erase(Ops.begin());
2518  --Idx;
2519  } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2520  // If we have an umax with a constant maximum-int, it will always be
2521  // maximum-int.
2522  return Ops[0];
2523  }
2524 
2525  if (Ops.size() == 1) return Ops[0];
2526  }
2527 
2528  // Find the first UMax
2529  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2530  ++Idx;
2531 
2532  // Check to see if one of the operands is a UMax. If so, expand its operands
2533  // onto our operand list, and recurse to simplify.
2534  if (Idx < Ops.size()) {
2535  bool DeletedUMax = false;
2536  while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2537  Ops.erase(Ops.begin()+Idx);
2538  Ops.append(UMax->op_begin(), UMax->op_end());
2539  DeletedUMax = true;
2540  }
2541 
2542  if (DeletedUMax)
2543  return getUMaxExpr(Ops);
2544  }
2545 
2546  // Okay, check to see if the same value occurs in the operand list twice. If
2547  // so, delete one. Since we sorted the list, these values are required to
2548  // be adjacent.
2549  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2550  // X umax Y umax Y --> X umax Y
2551  // X umax Y --> X, if X is always greater than Y
2552  if (Ops[i] == Ops[i+1] ||
2553  isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2554  Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2555  --i; --e;
2556  } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2557  Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2558  --i; --e;
2559  }
2560 
2561  if (Ops.size() == 1) return Ops[0];
2562 
2563  assert(!Ops.empty() && "Reduced umax down to nothing!");
2564 
2565  // Okay, it looks like we really DO need a umax expr. Check to see if we
2566  // already have one, otherwise create a new one.
2568  ID.AddInteger(scUMaxExpr);
2569  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2570  ID.AddPointer(Ops[i]);
2571  void *IP = 0;
2572  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2573  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2574  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2575  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2576  O, Ops.size());
2577  UniqueSCEVs.InsertNode(S, IP);
2578  return S;
2579 }
2580 
2582  const SCEV *RHS) {
2583  // ~smax(~x, ~y) == smin(x, y).
2584  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2585 }
2586 
2588  const SCEV *RHS) {
2589  // ~umax(~x, ~y) == umin(x, y)
2590  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2591 }
2592 
2593 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
2594  // If we have DataLayout, we can bypass creating a target-independent
2595  // constant expression and then folding it back into a ConstantInt.
2596  // This is just a compile-time optimization.
2597  if (TD)
2598  return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
2599 
2600  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2601  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2602  if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
2603  C = Folded;
2605  assert(Ty == IntTy && "Effective SCEV type doesn't match");
2606  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2607 }
2608 
2610  StructType *STy,
2611  unsigned FieldNo) {
2612  // If we have DataLayout, we can bypass creating a target-independent
2613  // constant expression and then folding it back into a ConstantInt.
2614  // This is just a compile-time optimization.
2615  if (TD) {
2616  return getConstant(IntTy,
2617  TD->getStructLayout(STy)->getElementOffset(FieldNo));
2618  }
2619 
2620  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2621  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2622  if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
2623  C = Folded;
2624 
2626  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2627 }
2628 
2630  // Don't attempt to do anything other than create a SCEVUnknown object
2631  // here. createSCEV only calls getUnknown after checking for all other
2632  // interesting possibilities, and any other code that calls getUnknown
2633  // is doing so in order to hide a value from SCEV canonicalization.
2634 
2636  ID.AddInteger(scUnknown);
2637  ID.AddPointer(V);
2638  void *IP = 0;
2639  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2640  assert(cast<SCEVUnknown>(S)->getValue() == V &&
2641  "Stale SCEVUnknown in uniquing map!");
2642  return S;
2643  }
2644  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2645  FirstUnknown);
2646  FirstUnknown = cast<SCEVUnknown>(S);
2647  UniqueSCEVs.InsertNode(S, IP);
2648  return S;
2649 }
2650 
2651 //===----------------------------------------------------------------------===//
2652 // Basic SCEV Analysis and PHI Idiom Recognition Code
2653 //
2654 
2655 /// isSCEVable - Test if values of the given type are analyzable within
2656 /// the SCEV framework. This primarily includes integer types, and it
2657 /// can optionally include pointer types if the ScalarEvolution class
2658 /// has access to target-specific information.
2660  // Integers and pointers are always SCEVable.
2661  return Ty->isIntegerTy() || Ty->isPointerTy();
2662 }
2663 
2664 /// getTypeSizeInBits - Return the size in bits of the specified type,
2665 /// for which isSCEVable must return true.
2667  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2668 
2669  // If we have a DataLayout, use it!
2670  if (TD)
2671  return TD->getTypeSizeInBits(Ty);
2672 
2673  // Integer types have fixed sizes.
2674  if (Ty->isIntegerTy())
2675  return Ty->getPrimitiveSizeInBits();
2676 
2677  // The only other support type is pointer. Without DataLayout, conservatively
2678  // assume pointers are 64-bit.
2679  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2680  return 64;
2681 }
2682 
2683 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2684 /// the given type and which represents how SCEV will treat the given
2685 /// type, for which isSCEVable must return true. For pointer types,
2686 /// this is the pointer-sized integer type.
2688  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2689 
2690  if (Ty->isIntegerTy()) {
2691  return Ty;
2692  }
2693 
2694  // The only other support type is pointer.
2695  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2696 
2697  if (TD)
2698  return TD->getIntPtrType(Ty);
2699 
2700  // Without DataLayout, conservatively assume pointers are 64-bit.
2701  return Type::getInt64Ty(getContext());
2702 }
2703 
2705  return &CouldNotCompute;
2706 }
2707 
2708 namespace {
2709  // Helper class working with SCEVTraversal to figure out if a SCEV contains
2710  // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
2711  // is set iff if find such SCEVUnknown.
2712  //
2713  struct FindInvalidSCEVUnknown {
2714  bool FindOne;
2715  FindInvalidSCEVUnknown() { FindOne = false; }
2716  bool follow(const SCEV *S) {
2717  switch (S->getSCEVType()) {
2718  case scConstant:
2719  return false;
2720  case scUnknown:
2721  if (!cast<SCEVUnknown>(S)->getValue())
2722  FindOne = true;
2723  return false;
2724  default:
2725  return true;
2726  }
2727  }
2728  bool isDone() const { return FindOne; }
2729  };
2730 }
2731 
2732 bool ScalarEvolution::checkValidity(const SCEV *S) const {
2733  FindInvalidSCEVUnknown F;
2735  ST.visitAll(S);
2736 
2737  return !F.FindOne;
2738 }
2739 
2740 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2741 /// expression and create a new one.
2743  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2744 
2745  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
2746  if (I != ValueExprMap.end()) {
2747  const SCEV *S = I->second;
2748  if (checkValidity(S))
2749  return S;
2750  else
2751  ValueExprMap.erase(I);
2752  }
2753  const SCEV *S = createSCEV(V);
2754 
2755  // The process of creating a SCEV for V may have caused other SCEVs
2756  // to have been created, so it's necessary to insert the new entry
2757  // from scratch, rather than trying to remember the insert position
2758  // above.
2759  ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2760  return S;
2761 }
2762 
2763 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2764 ///
2766  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2767  return getConstant(
2768  cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2769 
2770  Type *Ty = V->getType();
2771  Ty = getEffectiveSCEVType(Ty);
2772  return getMulExpr(V,
2773  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2774 }
2775 
2776 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2778  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2779  return getConstant(
2780  cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2781 
2782  Type *Ty = V->getType();
2783  Ty = getEffectiveSCEVType(Ty);
2784  const SCEV *AllOnes =
2785  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2786  return getMinusSCEV(AllOnes, V);
2787 }
2788 
2789 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
2790 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
2792  assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
2793 
2794  // Fast path: X - X --> 0.
2795  if (LHS == RHS)
2796  return getConstant(LHS->getType(), 0);
2797 
2798  // X - Y --> X + -Y
2799  return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
2800 }
2801 
2802 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2803 /// input value to the specified type. If the type must be extended, it is zero
2804 /// extended.
2805 const SCEV *
2807  Type *SrcTy = V->getType();
2808  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2809  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2810  "Cannot truncate or zero extend with non-integer arguments!");
2811  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2812  return V; // No conversion
2813  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2814  return getTruncateExpr(V, Ty);
2815  return getZeroExtendExpr(V, Ty);
2816 }
2817 
2818 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2819 /// input value to the specified type. If the type must be extended, it is sign
2820 /// extended.
2821 const SCEV *
2823  Type *Ty) {
2824  Type *SrcTy = V->getType();
2825  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2826  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2827  "Cannot truncate or zero extend with non-integer arguments!");
2828  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2829  return V; // No conversion
2830  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2831  return getTruncateExpr(V, Ty);
2832  return getSignExtendExpr(V, Ty);
2833 }
2834 
2835 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2836 /// input value to the specified type. If the type must be extended, it is zero
2837 /// extended. The conversion must not be narrowing.
2838 const SCEV *
2840  Type *SrcTy = V->getType();
2841  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2842  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2843  "Cannot noop or zero extend with non-integer arguments!");
2844  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2845  "getNoopOrZeroExtend cannot truncate!");
2846  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2847  return V; // No conversion
2848  return getZeroExtendExpr(V, Ty);
2849 }
2850 
2851 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2852 /// input value to the specified type. If the type must be extended, it is sign
2853 /// extended. The conversion must not be narrowing.
2854 const SCEV *
2856  Type *SrcTy = V->getType();
2857  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2858  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2859  "Cannot noop or sign extend with non-integer arguments!");
2860  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2861  "getNoopOrSignExtend cannot truncate!");
2862  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2863  return V; // No conversion
2864  return getSignExtendExpr(V, Ty);
2865 }
2866 
2867 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2868 /// the input value to the specified type. If the type must be extended,
2869 /// it is extended with unspecified bits. The conversion must not be
2870 /// narrowing.
2871 const SCEV *
2873  Type *SrcTy = V->getType();
2874  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2875  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2876  "Cannot noop or any extend with non-integer arguments!");
2877  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2878  "getNoopOrAnyExtend cannot truncate!");
2879  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2880  return V; // No conversion
2881  return getAnyExtendExpr(V, Ty);
2882 }
2883 
2884 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2885 /// input value to the specified type. The conversion must not be widening.
2886 const SCEV *
2888  Type *SrcTy = V->getType();
2889  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2890  (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2891  "Cannot truncate or noop with non-integer arguments!");
2892  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2893  "getTruncateOrNoop cannot extend!");
2894  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2895  return V; // No conversion
2896  return getTruncateExpr(V, Ty);
2897 }
2898 
2899 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2900 /// the types using zero-extension, and then perform a umax operation
2901 /// with them.
2903  const SCEV *RHS) {
2904  const SCEV *PromotedLHS = LHS;
2905  const SCEV *PromotedRHS = RHS;
2906 
2907  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2908  PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2909  else
2910  PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2911 
2912  return getUMaxExpr(PromotedLHS, PromotedRHS);
2913 }
2914 
2915 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2916 /// the types using zero-extension, and then perform a umin operation
2917 /// with them.
2919  const SCEV *RHS) {
2920  const SCEV *PromotedLHS = LHS;
2921  const SCEV *PromotedRHS = RHS;
2922 
2923  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2924  PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2925  else
2926  PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2927 
2928  return getUMinExpr(PromotedLHS, PromotedRHS);
2929 }
2930 
2931 /// getPointerBase - Transitively follow the chain of pointer-type operands
2932 /// until reaching a SCEV that does not have a single pointer operand. This
2933 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
2934 /// but corner cases do exist.
2936  // A pointer operand may evaluate to a nonpointer expression, such as null.
2937  if (!V->getType()->isPointerTy())
2938  return V;
2939 
2940  if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
2941  return getPointerBase(Cast->getOperand());
2942  }
2943  else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
2944  const SCEV *PtrOp = 0;
2945  for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
2946  I != E; ++I) {
2947  if ((*I)->getType()->isPointerTy()) {
2948  // Cannot find the base of an expression with multiple pointer operands.
2949  if (PtrOp)
2950  return V;
2951  PtrOp = *I;
2952  }
2953  }
2954  if (!PtrOp)
2955  return V;
2956  return getPointerBase(PtrOp);
2957  }
2958  return V;
2959 }
2960 
2961 /// PushDefUseChildren - Push users of the given Instruction
2962 /// onto the given Worklist.
2963 static void
2965  SmallVectorImpl<Instruction *> &Worklist) {
2966  // Push the def-use children onto the Worklist stack.
2967  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2968  UI != UE; ++UI)
2969  Worklist.push_back(cast<Instruction>(*UI));
2970 }
2971 
2972 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2973 /// instructions that depend on the given instruction and removes them from
2974 /// the ValueExprMapType map if they reference SymName. This is used during PHI
2975 /// resolution.
2976 void
2977 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2979  PushDefUseChildren(PN, Worklist);
2980 
2982  Visited.insert(PN);
2983  while (!Worklist.empty()) {
2984  Instruction *I = Worklist.pop_back_val();
2985  if (!Visited.insert(I)) continue;
2986 
2988  ValueExprMap.find_as(static_cast<Value *>(I));
2989  if (It != ValueExprMap.end()) {
2990  const SCEV *Old = It->second;
2991 
2992  // Short-circuit the def-use traversal if the symbolic name
2993  // ceases to appear in expressions.
2994  if (Old != SymName && !hasOperand(Old, SymName))
2995  continue;
2996 
2997  // SCEVUnknown for a PHI either means that it has an unrecognized
2998  // structure, it's a PHI that's in the progress of being computed
2999  // by createNodeForPHI, or it's a single-value PHI. In the first case,
3000  // additional loop trip count information isn't going to change anything.
3001  // In the second case, createNodeForPHI will perform the necessary
3002  // updates on its own when it gets to that point. In the third, we do
3003  // want to forget the SCEVUnknown.
3004  if (!isa<PHINode>(I) ||
3005  !isa<SCEVUnknown>(Old) ||
3006  (I != PN && Old == SymName)) {
3007  forgetMemoizedResults(Old);
3008  ValueExprMap.erase(It);
3009  }
3010  }
3011 
3012  PushDefUseChildren(I, Worklist);
3013  }
3014 }
3015 
3016 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
3017 /// a loop header, making it a potential recurrence, or it doesn't.
3018 ///
3019 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
3020  if (const Loop *L = LI->getLoopFor(PN->getParent()))
3021  if (L->getHeader() == PN->getParent()) {
3022  // The loop may have multiple entrances or multiple exits; we can analyze
3023  // this phi as an addrec if it has a unique entry value and a unique
3024  // backedge value.
3025  Value *BEValueV = 0, *StartValueV = 0;
3026  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
3027  Value *V = PN->getIncomingValue(i);
3028  if (L->contains(PN->getIncomingBlock(i))) {
3029  if (!BEValueV) {
3030  BEValueV = V;
3031  } else if (BEValueV != V) {
3032  BEValueV = 0;
3033  break;
3034  }
3035  } else if (!StartValueV) {
3036  StartValueV = V;
3037  } else if (StartValueV != V) {
3038  StartValueV = 0;
3039  break;
3040  }
3041  }
3042  if (BEValueV && StartValueV) {
3043  // While we are analyzing this PHI node, handle its value symbolically.
3044  const SCEV *SymbolicName = getUnknown(PN);
3045  assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
3046  "PHI node already processed?");
3047  ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
3048 
3049  // Using this symbolic name for the PHI, analyze the value coming around
3050  // the back-edge.
3051  const SCEV *BEValue = getSCEV(BEValueV);
3052 
3053  // NOTE: If BEValue is loop invariant, we know that the PHI node just
3054  // has a special value for the first iteration of the loop.
3055 
3056  // If the value coming around the backedge is an add with the symbolic
3057  // value we just inserted, then we found a simple induction variable!
3058  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
3059  // If there is a single occurrence of the symbolic value, replace it
3060  // with a recurrence.
3061  unsigned FoundIndex = Add->getNumOperands();
3062  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3063  if (Add->getOperand(i) == SymbolicName)
3064  if (FoundIndex == e) {
3065  FoundIndex = i;
3066  break;
3067  }
3068 
3069  if (FoundIndex != Add->getNumOperands()) {
3070  // Create an add with everything but the specified operand.
3072  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3073  if (i != FoundIndex)
3074  Ops.push_back(Add->getOperand(i));
3075  const SCEV *Accum = getAddExpr(Ops);
3076 
3077  // This is not a valid addrec if the step amount is varying each
3078  // loop iteration, but is not itself an addrec in this loop.
3079  if (isLoopInvariant(Accum, L) ||
3080  (isa<SCEVAddRecExpr>(Accum) &&
3081  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
3083 
3084  // If the increment doesn't overflow, then neither the addrec nor
3085  // the post-increment will overflow.
3086  if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3087  if (OBO->hasNoUnsignedWrap())
3088  Flags = setFlags(Flags, SCEV::FlagNUW);
3089  if (OBO->hasNoSignedWrap())
3090  Flags = setFlags(Flags, SCEV::FlagNSW);
3091  } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
3092  // If the increment is an inbounds GEP, then we know the address
3093  // space cannot be wrapped around. We cannot make any guarantee
3094  // about signed or unsigned overflow because pointers are
3095  // unsigned but we may have a negative index from the base
3096  // pointer. We can guarantee that no unsigned wrap occurs if the
3097  // indices form a positive value.
3098  if (GEP->isInBounds()) {
3099  Flags = setFlags(Flags, SCEV::FlagNW);
3100 
3101  const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
3102  if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
3103  Flags = setFlags(Flags, SCEV::FlagNUW);
3104  }
3105  } else if (const SubOperator *OBO =
3106  dyn_cast<SubOperator>(BEValueV)) {
3107  if (OBO->hasNoUnsignedWrap())
3108  Flags = setFlags(Flags, SCEV::FlagNUW);
3109  if (OBO->hasNoSignedWrap())
3110  Flags = setFlags(Flags, SCEV::FlagNSW);
3111  }
3112 
3113  const SCEV *StartVal = getSCEV(StartValueV);
3114  const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
3115 
3116  // Since the no-wrap flags are on the increment, they apply to the
3117  // post-incremented value as well.
3118  if (isLoopInvariant(Accum, L))
3119  (void)getAddRecExpr(getAddExpr(StartVal, Accum),
3120  Accum, L, Flags);
3121 
3122  // Okay, for the entire analysis of this edge we assumed the PHI
3123  // to be symbolic. We now need to go back and purge all of the
3124  // entries for the scalars that use the symbolic expression.
3125  ForgetSymbolicName(PN, SymbolicName);
3126  ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3127  return PHISCEV;
3128  }
3129  }
3130  } else if (const SCEVAddRecExpr *AddRec =
3131  dyn_cast<SCEVAddRecExpr>(BEValue)) {
3132  // Otherwise, this could be a loop like this:
3133  // i = 0; for (j = 1; ..; ++j) { .... i = j; }
3134  // In this case, j = {1,+,1} and BEValue is j.
3135  // Because the other in-value of i (0) fits the evolution of BEValue
3136  // i really is an addrec evolution.
3137  if (AddRec->getLoop() == L && AddRec->isAffine()) {
3138  const SCEV *StartVal = getSCEV(StartValueV);
3139 
3140  // If StartVal = j.start - j.stride, we can use StartVal as the
3141  // initial step of the addrec evolution.
3142  if (StartVal == getMinusSCEV(AddRec->getOperand(0),
3143  AddRec->getOperand(1))) {
3144  // FIXME: For constant StartVal, we should be able to infer
3145  // no-wrap flags.
3146  const SCEV *PHISCEV =
3147  getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3149 
3150  // Okay, for the entire analysis of this edge we assumed the PHI
3151  // to be symbolic. We now need to go back and purge all of the
3152  // entries for the scalars that use the symbolic expression.
3153  ForgetSymbolicName(PN, SymbolicName);
3154  ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3155  return PHISCEV;
3156  }
3157  }
3158  }
3159  }
3160  }
3161 
3162  // If the PHI has a single incoming value, follow that value, unless the
3163  // PHI's incoming blocks are in a different loop, in which case doing so
3164  // risks breaking LCSSA form. Instcombine would normally zap these, but
3165  // it doesn't have DominatorTree information, so it may miss cases.
3166  if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
3167  if (LI->replacementPreservesLCSSAForm(PN, V))
3168  return getSCEV(V);
3169 
3170  // If it's not a loop phi, we can't handle it yet.
3171  return getUnknown(PN);
3172 }
3173 
3174 /// createNodeForGEP - Expand GEP instructions into add and multiply
3175 /// operations. This allows them to be analyzed by regular SCEV code.
3176 ///
3177 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
3178  Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
3179  Value *Base = GEP->getOperand(0);
3180  // Don't attempt to analyze GEPs over unsized objects.
3181  if (!Base->getType()->getPointerElementType()->isSized())
3182  return getUnknown(GEP);
3183 
3184  // Don't blindly transfer the inbounds flag from the GEP instruction to the
3185  // Add expression, because the Instruction may be guarded by control flow
3186  // and the no-overflow bits may not be valid for the expression in any
3187  // context.
3189 
3190  const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
3191  gep_type_iterator GTI = gep_type_begin(GEP);
3193  E = GEP->op_end();
3194  I != E; ++I) {
3195  Value *Index = *I;
3196  // Compute the (potentially symbolic) offset in bytes for this index.
3197  if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
3198  // For a struct, add the member offset.
3199  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
3200  const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3201 
3202  // Add the field offset to the running total offset.
3203  TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3204  } else {
3205  // For an array, add the element offset, explicitly scaled.
3206  const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
3207  const SCEV *IndexS = getSCEV(Index);
3208  // Getelementptr indices are signed.
3209  IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3210 
3211  // Multiply the index by the element size to compute the element offset.
3212  const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
3213 
3214  // Add the element offset to the running total offset.
3215  TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3216  }
3217  }
3218 
3219  // Get the SCEV for the GEP base.
3220  const SCEV *BaseS = getSCEV(Base);
3221 
3222  // Add the total offset from all the GEP indices to the base.
3223  return getAddExpr(BaseS, TotalOffset, Wrap);
3224 }
3225 
3226 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3227 /// guaranteed to end in (at every loop iteration). It is, at the same time,
3228 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
3229 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
3230 uint32_t
3232  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3233  return C->getValue()->getValue().countTrailingZeros();
3234 
3235  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
3236  return std::min(GetMinTrailingZeros(T->getOperand()),
3237  (uint32_t)getTypeSizeInBits(T->getType()));
3238 
3239  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
3240  uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3241  return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3242  getTypeSizeInBits(E->getType()) : OpRes;
3243  }
3244 
3245  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
3246  uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3247  return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3248  getTypeSizeInBits(E->getType()) : OpRes;
3249  }
3250 
3251  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
3252  // The result is the min of all operands results.
3253  uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3254  for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3255  MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3256  return MinOpRes;
3257  }
3258 
3259  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
3260  // The result is the sum of all operands results.
3261  uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3262  uint32_t BitWidth = getTypeSizeInBits(M->getType());
3263  for (unsigned i = 1, e = M->getNumOperands();
3264  SumOpRes != BitWidth && i != e; ++i)
3265  SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
3266  BitWidth);
3267  return SumOpRes;
3268  }
3269 
3270  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
3271  // The result is the min of all operands results.
3272  uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3273  for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3274  MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3275  return MinOpRes;
3276  }
3277 
3278  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3279  // The result is the min of all operands results.
3280  uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3281  for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3282  MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3283  return MinOpRes;
3284  }
3285 
3286  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3287  // The result is the min of all operands results.
3288  uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3289  for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3290  MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3291  return MinOpRes;
3292  }
3293 
3294  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3295  // For a SCEVUnknown, ask ValueTracking.
3296  unsigned BitWidth = getTypeSizeInBits(U->getType());
3297  APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3298  ComputeMaskedBits(U->getValue(), Zeros, Ones);
3299  return Zeros.countTrailingOnes();
3300  }
3301 
3302  // SCEVUDivExpr
3303  return 0;
3304 }
3305 
3306 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3307 ///
3310  // See if we've computed this range already.
3311  DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3312  if (I != UnsignedRanges.end())
3313  return I->second;
3314 
3315  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3316  return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
3317 
3318  unsigned BitWidth = getTypeSizeInBits(S->getType());
3319  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3320 
3321  // If the value has known zeros, the maximum unsigned value will have those
3322  // known zeros as well.
3323  uint32_t TZ = GetMinTrailingZeros(S);
3324  if (TZ != 0)
3325  ConservativeResult =
3327  APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3328 
3329  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3330  ConstantRange X = getUnsignedRange(Add->getOperand(0));
3331  for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3332  X = X.add(getUnsignedRange(Add->getOperand(i)));
3333  return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
3334  }
3335 
3336  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3337  ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3338  for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3339  X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3340  return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
3341  }
3342 
3343  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3344  ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3345  for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3346  X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3347  return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
3348  }
3349 
3350  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3351  ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3352  for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3353  X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3354  return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
3355  }
3356 
3357  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3358  ConstantRange X = getUnsignedRange(UDiv->getLHS());
3359  ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3360  return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3361  }
3362 
3363  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3364  ConstantRange X = getUnsignedRange(ZExt->getOperand());
3365  return setUnsignedRange(ZExt,
3366  ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3367  }
3368 
3369  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3370  ConstantRange X = getUnsignedRange(SExt->getOperand());
3371  return setUnsignedRange(SExt,
3372  ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3373  }
3374 
3375  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3376  ConstantRange X = getUnsignedRange(Trunc->getOperand());
3377  return setUnsignedRange(Trunc,
3378  ConservativeResult.intersectWith(X.truncate(BitWidth)));
3379  }
3380 
3381  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3382  // If there's no unsigned wrap, the value will never be less than its
3383  // initial value.
3384  if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
3385  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3386  if (!C->getValue()->isZero())
3387  ConservativeResult =
3388  ConservativeResult.intersectWith(
3389  ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3390 
3391  // TODO: non-affine addrec
3392  if (AddRec->isAffine()) {
3393  Type *Ty = AddRec->getType();
3394  const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3395  if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3396  getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3397  MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3398 
3399  const SCEV *Start = AddRec->getStart();
3400  const SCEV *Step = AddRec->getStepRecurrence(*this);
3401 
3402  ConstantRange StartRange = getUnsignedRange(Start);
3403  ConstantRange StepRange = getSignedRange(Step);
3404  ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3405  ConstantRange EndRange =
3406  StartRange.add(MaxBECountRange.multiply(StepRange));
3407 
3408  // Check for overflow. This must be done with ConstantRange arithmetic
3409  // because we could be called from within the ScalarEvolution overflow
3410  // checking code.
3411  ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3412  ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3413  ConstantRange ExtMaxBECountRange =
3414  MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3415  ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3416  if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3417  ExtEndRange)
3418  return setUnsignedRange(AddRec, ConservativeResult);
3419 
3420  APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3421  EndRange.getUnsignedMin());
3422  APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3423  EndRange.getUnsignedMax());
3424  if (Min.isMinValue() && Max.isMaxValue())
3425  return setUnsignedRange(AddRec, ConservativeResult);
3426  return setUnsignedRange(AddRec,
3427  ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3428  }
3429  }
3430 
3431  return setUnsignedRange(AddRec, ConservativeResult);
3432  }
3433 
3434  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3435  // For a SCEVUnknown, ask ValueTracking.
3436  APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3437  ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
3438  if (Ones == ~Zeros + 1)
3439  return setUnsignedRange(U, ConservativeResult);
3440  return setUnsignedRange(U,
3441  ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
3442  }
3443 
3444  return setUnsignedRange(S, ConservativeResult);
3445 }
3446 
3447 /// getSignedRange - Determine the signed range for a particular SCEV.
3448 ///
3451  // See if we've computed this range already.
3452  DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3453  if (I != SignedRanges.end())
3454  return I->second;
3455 
3456  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3457  return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
3458 
3459  unsigned BitWidth = getTypeSizeInBits(S->getType());
3460  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3461 
3462  // If the value has known zeros, the maximum signed value will have those
3463  // known zeros as well.
3464  uint32_t TZ = GetMinTrailingZeros(S);
3465  if (TZ != 0)
3466  ConservativeResult =
3468  APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3469 
3470  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3471  ConstantRange X = getSignedRange(Add->getOperand(0));
3472  for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3473  X = X.add(getSignedRange(Add->getOperand(i)));
3474  return setSignedRange(Add, ConservativeResult.intersectWith(X));
3475  }
3476 
3477  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3478  ConstantRange X = getSignedRange(Mul->getOperand(0));
3479  for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3480  X = X.multiply(getSignedRange(Mul->getOperand(i)));
3481  return setSignedRange(Mul, ConservativeResult.intersectWith(X));
3482  }
3483 
3484  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3485  ConstantRange X = getSignedRange(SMax->getOperand(0));
3486  for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3487  X = X.smax(getSignedRange(SMax->getOperand(i)));
3488  return setSignedRange(SMax, ConservativeResult.intersectWith(X));
3489  }
3490 
3491  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3492  ConstantRange X = getSignedRange(UMax->getOperand(0));
3493  for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3494  X = X.umax(getSignedRange(UMax->getOperand(i)));
3495  return setSignedRange(UMax, ConservativeResult.intersectWith(X));
3496  }
3497 
3498  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3499  ConstantRange X = getSignedRange(UDiv->getLHS());
3500  ConstantRange Y = getSignedRange(UDiv->getRHS());
3501  return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3502  }
3503 
3504  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3505  ConstantRange X = getSignedRange(ZExt->getOperand());
3506  return setSignedRange(ZExt,
3507  ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3508  }
3509 
3510  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3511  ConstantRange X = getSignedRange(SExt->getOperand());
3512  return setSignedRange(SExt,
3513  ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3514  }
3515 
3516  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3517  ConstantRange X = getSignedRange(Trunc->getOperand());
3518  return setSignedRange(Trunc,
3519  ConservativeResult.intersectWith(X.truncate(BitWidth)));
3520  }
3521 
3522  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3523  // If there's no signed wrap, and all the operands have the same sign or
3524  // zero, the value won't ever change sign.
3525  if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
3526  bool AllNonNeg = true;
3527  bool AllNonPos = true;
3528  for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3529  if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3530  if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3531  }
3532  if (AllNonNeg)
3533  ConservativeResult = ConservativeResult.intersectWith(
3534  ConstantRange(APInt(BitWidth, 0),
3535  APInt::getSignedMinValue(BitWidth)));
3536  else if (AllNonPos)
3537  ConservativeResult = ConservativeResult.intersectWith(
3539  APInt(BitWidth, 1)));
3540  }
3541 
3542  // TODO: non-affine addrec
3543  if (AddRec->isAffine()) {
3544  Type *Ty = AddRec->getType();
3545  const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3546  if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3547  getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3548  MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3549 
3550  const SCEV *Start = AddRec->getStart();
3551  const SCEV *Step = AddRec->getStepRecurrence(*this);
3552 
3553  ConstantRange StartRange = getSignedRange(Start);
3554  ConstantRange StepRange = getSignedRange(Step);
3555  ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3556  ConstantRange EndRange =
3557  StartRange.add(MaxBECountRange.multiply(StepRange));
3558 
3559  // Check for overflow. This must be done with ConstantRange arithmetic
3560  // because we could be called from within the ScalarEvolution overflow
3561  // checking code.
3562  ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3563  ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3564  ConstantRange ExtMaxBECountRange =
3565  MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3566  ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3567  if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3568  ExtEndRange)
3569  return setSignedRange(AddRec, ConservativeResult);
3570 
3571  APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3572  EndRange.getSignedMin());
3573  APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3574  EndRange.getSignedMax());
3575  if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3576  return setSignedRange(AddRec, ConservativeResult);
3577  return setSignedRange(AddRec,
3578  ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3579  }
3580  }
3581 
3582  return setSignedRange(AddRec, ConservativeResult);
3583  }
3584 
3585  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3586  // For a SCEVUnknown, ask ValueTracking.
3587  if (!U->getValue()->getType()->isIntegerTy() && !TD)
3588  return setSignedRange(U, ConservativeResult);
3589  unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3590  if (NS <= 1)
3591  return setSignedRange(U, ConservativeResult);
3592  return setSignedRange(U, ConservativeResult.intersectWith(
3593  ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3594  APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
3595  }
3596 
3597  return setSignedRange(S, ConservativeResult);
3598 }
3599 
3600 /// createSCEV - We know that there is no SCEV for the specified value.
3601 /// Analyze the expression.
3602 ///
3603 const SCEV *ScalarEvolution::createSCEV(Value *V) {
3604  if (!isSCEVable(V->getType()))
3605  return getUnknown(V);
3606 
3607  unsigned Opcode = Instruction::UserOp1;
3608  if (Instruction *I = dyn_cast<Instruction>(V)) {
3609  Opcode = I->getOpcode();
3610 
3611  // Don't attempt to analyze instructions in blocks that aren't
3612  // reachable. Such instructions don't matter, and they aren't required
3613  // to obey basic rules for definitions dominating uses which this
3614  // analysis depends on.
3615  if (!DT->isReachableFromEntry(I->getParent()))
3616  return getUnknown(V);
3617  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3618  Opcode = CE->getOpcode();
3619  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3620  return getConstant(CI);
3621  else if (isa<ConstantPointerNull>(V))
3622  return getConstant(V->getType(), 0);
3623  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3624  return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3625  else
3626  return getUnknown(V);
3627 
3628  Operator *U = cast<Operator>(V);
3629  switch (Opcode) {
3630  case Instruction::Add: {
3631  // The simple thing to do would be to just call getSCEV on both operands
3632  // and call getAddExpr with the result. However if we're looking at a
3633  // bunch of things all added together, this can be quite inefficient,
3634  // because it leads to N-1 getAddExpr calls for N ultimate operands.
3635  // Instead, gather up all the operands and make a single getAddExpr call.
3636  // LLVM IR canonical form means we need only traverse the left operands.
3637  //
3638  // Don't apply this instruction's NSW or NUW flags to the new
3639  // expression. The instruction may be guarded by control flow that the
3640  // no-wrap behavior depends on. Non-control-equivalent instructions can be
3641  // mapped to the same SCEV expression, and it would be incorrect to transfer
3642  // NSW/NUW semantics to those operations.
3644  AddOps.push_back(getSCEV(U->getOperand(1)));
3645  for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3646  unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3647  if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3648  break;
3649  U = cast<Operator>(Op);
3650  const SCEV *Op1 = getSCEV(U->getOperand(1));
3651  if (Opcode == Instruction::Sub)
3652  AddOps.push_back(getNegativeSCEV(Op1));
3653  else
3654  AddOps.push_back(Op1);
3655  }
3656  AddOps.push_back(getSCEV(U->getOperand(0)));
3657  return getAddExpr(AddOps);
3658  }
3659  case Instruction::Mul: {
3660  // Don't transfer NSW/NUW for the same reason as AddExpr.
3662  MulOps.push_back(getSCEV(U->getOperand(1)));
3663  for (Value *Op = U->getOperand(0);
3664  Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3665  Op = U->getOperand(0)) {
3666  U = cast<Operator>(Op);
3667  MulOps.push_back(getSCEV(U->getOperand(1)));
3668  }
3669  MulOps.push_back(getSCEV(U->getOperand(0)));
3670  return getMulExpr(MulOps);
3671  }
3672  case Instruction::UDiv:
3673  return getUDivExpr(getSCEV(U->getOperand(0)),
3674  getSCEV(U->getOperand(1)));
3675  case Instruction::Sub:
3676  return getMinusSCEV(getSCEV(U->getOperand(0)),
3677  getSCEV(U->getOperand(1)));
3678  case Instruction::And:
3679  // For an expression like x&255 that merely masks off the high bits,
3680  // use zext(trunc(x)) as the SCEV expression.
3681  if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3682  if (CI->isNullValue())
3683  return getSCEV(U->getOperand(1));
3684  if (CI->isAllOnesValue())
3685  return getSCEV(U->getOperand(0));
3686  const APInt &A = CI->getValue();
3687 
3688  // Instcombine's ShrinkDemandedConstant may strip bits out of
3689  // constants, obscuring what would otherwise be a low-bits mask.
3690  // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3691  // knew about to reconstruct a low-bits mask value.
3692  unsigned LZ = A.countLeadingZeros();
3693  unsigned BitWidth = A.getBitWidth();
3694  APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3695  ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
3696 
3697  APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3698 
3699  if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3700  return
3702  IntegerType::get(getContext(), BitWidth - LZ)),
3703  U->getType());
3704  }
3705  break;
3706 
3707  case Instruction::Or:
3708  // If the RHS of the Or is a constant, we may have something like:
3709  // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
3710  // optimizations will transparently handle this case.
3711  //
3712  // In order for this transformation to be safe, the LHS must be of the
3713  // form X*(2^n) and the Or constant must be less than 2^n.
3714  if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3715  const SCEV *LHS = getSCEV(U->getOperand(0));
3716  const APInt &CIVal = CI->getValue();
3717  if (GetMinTrailingZeros(LHS) >=
3718  (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3719  // Build a plain add SCEV.
3720  const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3721  // If the LHS of the add was an addrec and it has no-wrap flags,
3722  // transfer the no-wrap flags, since an or won't introduce a wrap.
3723  if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3724  const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3725  const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
3726  OldAR->getNoWrapFlags());
3727  }
3728  return S;
3729  }
3730  }
3731  break;
3732  case Instruction::Xor:
3733  if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3734  // If the RHS of the xor is a signbit, then this is just an add.
3735  // Instcombine turns add of signbit into xor as a strength reduction step.
3736  if (CI->getValue().isSignBit())
3737  return getAddExpr(getSCEV(U->getOperand(0)),
3738  getSCEV(U->getOperand(1)));
3739 
3740  // If the RHS of xor is -1, then this is a not operation.
3741  if (CI->isAllOnesValue())
3742  return getNotSCEV(getSCEV(U->getOperand(0)));
3743 
3744  // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3745  // This is a variant of the check for xor with -1, and it handles
3746  // the case where instcombine has trimmed non-demanded bits out
3747  // of an xor with -1.
3748  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3749  if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3750  if (BO->getOpcode() == Instruction::And &&
3751  LCI->getValue() == CI->getValue())
3752  if (const SCEVZeroExtendExpr *Z =
3753  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3754  Type *UTy = U->getType();
3755  const SCEV *Z0 = Z->getOperand();
3756  Type *Z0Ty = Z0->getType();
3757  unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3758 
3759  // If C is a low-bits mask, the zero extend is serving to
3760  // mask off the high bits. Complement the operand and
3761  // re-apply the zext.
3762  if (APIntOps::isMask(Z0TySize, CI->getValue()))
3763  return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3764 
3765  // If C is a single bit, it may be in the sign-bit position
3766  // before the zero-extend. In this case, represent the xor
3767  // using an add, which is equivalent, and re-apply the zext.
3768  APInt Trunc = CI->getValue().trunc(Z0TySize);
3769  if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3770  Trunc.isSignBit())
3771  return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3772  UTy);
3773  }
3774  }
3775  break;
3776 
3777  case Instruction::Shl:
3778  // Turn shift left of a constant amount into a multiply.
3779  if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3780  uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3781 
3782  // If the shift count is not less than the bitwidth, the result of
3783  // the shift is undefined. Don't try to analyze it, because the
3784  // resolution chosen here may differ from the resolution chosen in
3785  // other parts of the compiler.
3786  if (SA->getValue().uge(BitWidth))
3787  break;
3788 
3790  APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
3791  return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3792  }
3793  break;
3794 
3795  case Instruction::LShr:
3796  // Turn logical shift right of a constant into a unsigned divide.
3797  if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3798  uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3799 
3800  // If the shift count is not less than the bitwidth, the result of
3801  // the shift is undefined. Don't try to analyze it, because the
3802  // resolution chosen here may differ from the resolution chosen in
3803  // other parts of the compiler.
3804  if (SA->getValue().uge(BitWidth))
3805  break;
3806 
3808  APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
3809  return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3810  }
3811  break;
3812 
3813  case Instruction::AShr:
3814  // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3815  if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3816  if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3817  if (L->getOpcode() == Instruction::Shl &&
3818  L->getOperand(1) == U->getOperand(1)) {
3819  uint64_t BitWidth = getTypeSizeInBits(U->getType());
3820 
3821  // If the shift count is not less than the bitwidth, the result of
3822  // the shift is undefined. Don't try to analyze it, because the
3823  // resolution chosen here may differ from the resolution chosen in
3824  // other parts of the compiler.
3825  if (CI->getValue().uge(BitWidth))
3826  break;
3827 
3828  uint64_t Amt = BitWidth - CI->getZExtValue();
3829  if (Amt == BitWidth)
3830  return getSCEV(L->getOperand(0)); // shift by zero --> noop
3831  return
3832  getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3834  Amt)),
3835  U->getType());
3836  }
3837  break;
3838 
3839  case Instruction::Trunc:
3840  return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3841 
3842  case Instruction::ZExt:
3843  return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3844 
3845  case Instruction::SExt:
3846  return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3847 
3848  case Instruction::BitCast:
3849  // BitCasts are no-op casts so we just eliminate the cast.
3850  if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3851  return getSCEV(U->getOperand(0));
3852  break;
3853 
3854  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3855  // lead to pointer expressions which cannot safely be expanded to GEPs,
3856  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3857  // simplifying integer expressions.
3858 
3859  case Instruction::GetElementPtr:
3860  return createNodeForGEP(cast<GEPOperator>(U));
3861 
3862  case Instruction::PHI:
3863  return createNodeForPHI(cast<PHINode>(U));
3864 
3865  case Instruction::Select:
3866  // This could be a smax or umax that was lowered earlier.
3867  // Try to recover it.
3868  if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3869  Value *LHS = ICI->getOperand(0);
3870  Value *RHS = ICI->getOperand(1);
3871  switch (ICI->getPredicate()) {
3872  case ICmpInst::ICMP_SLT:
3873  case ICmpInst::ICMP_SLE:
3874  std::swap(LHS, RHS);
3875  // fall through
3876  case ICmpInst::ICMP_SGT:
3877  case ICmpInst::ICMP_SGE:
3878  // a >s b ? a+x : b+x -> smax(a, b)+x
3879  // a >s b ? b+x : a+x -> smin(a, b)+x
3880  if (LHS->getType() == U->getType()) {
3881  const SCEV *LS = getSCEV(LHS);
3882  const SCEV *RS = getSCEV(RHS);
3883  const SCEV *LA = getSCEV(U->getOperand(1));
3884  const SCEV *RA = getSCEV(U->getOperand(2));
3885  const SCEV *LDiff = getMinusSCEV(LA, LS);
3886  const SCEV *RDiff = getMinusSCEV(RA, RS);
3887  if (LDiff == RDiff)
3888  return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3889  LDiff = getMinusSCEV(LA, RS);
3890  RDiff = getMinusSCEV(RA, LS);
3891  if (LDiff == RDiff)
3892  return getAddExpr(getSMinExpr(LS, RS), LDiff);
3893  }
3894  break;
3895  case ICmpInst::ICMP_ULT:
3896  case ICmpInst::ICMP_ULE:
3897  std::swap(LHS, RHS);
3898  // fall through
3899  case ICmpInst::ICMP_UGT:
3900  case ICmpInst::ICMP_UGE:
3901  // a >u b ? a+x : b+x -> umax(a, b)+x
3902  // a >u b ? b+x : a+x -> umin(a, b)+x
3903  if (LHS->getType() == U->getType()) {
3904  const SCEV *LS = getSCEV(LHS);
3905  const SCEV *RS = getSCEV(RHS);
3906  const SCEV *LA = getSCEV(U->getOperand(1));
3907  const SCEV *RA = getSCEV(U->getOperand(2));
3908  const SCEV *LDiff = getMinusSCEV(LA, LS);
3909  const SCEV *RDiff = getMinusSCEV(RA, RS);
3910  if (LDiff == RDiff)
3911  return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3912  LDiff = getMinusSCEV(LA, RS);
3913  RDiff = getMinusSCEV(RA, LS);
3914  if (LDiff == RDiff)
3915  return getAddExpr(getUMinExpr(LS, RS), LDiff);
3916  }
3917  break;
3918  case ICmpInst::ICMP_NE:
3919  // n != 0 ? n+x : 1+x -> umax(n, 1)+x
3920  if (LHS->getType() == U->getType() &&
3921  isa<ConstantInt>(RHS) &&
3922  cast<ConstantInt>(RHS)->isZero()) {
3923  const SCEV *One = getConstant(LHS->getType(), 1);
3924  const SCEV *LS = getSCEV(LHS);
3925  const SCEV *LA = getSCEV(U->getOperand(1));
3926  const SCEV *RA = getSCEV(U->getOperand(2));
3927  const SCEV *LDiff = getMinusSCEV(LA, LS);
3928  const SCEV *RDiff = getMinusSCEV(RA, One);
3929  if (LDiff == RDiff)
3930  return getAddExpr(getUMaxExpr(One, LS), LDiff);
3931  }
3932  break;
3933  case ICmpInst::ICMP_EQ:
3934  // n == 0 ? 1+x : n+x -> umax(n, 1)+x
3935  if (LHS->getType() == U->getType() &&
3936  isa<ConstantInt>(RHS) &&
3937  cast<ConstantInt>(RHS)->isZero()) {
3938  const SCEV *One = getConstant(LHS->getType(), 1);
3939  const SCEV *LS = getSCEV(LHS);
3940  const SCEV *LA = getSCEV(U->getOperand(1));
3941  const SCEV *RA = getSCEV(U->getOperand(2));
3942  const SCEV *LDiff = getMinusSCEV(LA, One);
3943  const SCEV *RDiff = getMinusSCEV(RA, LS);
3944  if (LDiff == RDiff)
3945  return getAddExpr(getUMaxExpr(One, LS), LDiff);
3946  }
3947  break;
3948  default:
3949  break;
3950  }
3951  }
3952 
3953  default: // We cannot analyze this expression.
3954  break;
3955  }
3956 
3957  return getUnknown(V);
3958 }
3959 
3960 
3961 
3962 //===----------------------------------------------------------------------===//
3963 // Iteration Count Computation Code
3964 //
3965 
3966 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
3967 /// normal unsigned value. Returns 0 if the trip count is unknown or not
3968 /// constant. Will also return 0 if the maximum trip count is very large (>=
3969 /// 2^32).
3970 ///
3971 /// This "trip count" assumes that control exits via ExitingBlock. More
3972 /// precisely, it is the number of times that control may reach ExitingBlock
3973 /// before taking the branch. For loops with multiple exits, it may not be the
3974 /// number times that the loop header executes because the loop may exit
3975 /// prematurely via another branch.
3976 ///
3977 /// FIXME: We conservatively call getBackedgeTakenCount(L) instead of
3978 /// getExitCount(L, ExitingBlock) to compute a safe trip count considering all
3979 /// loop exits. getExitCount() may return an exact count for this branch
3980 /// assuming no-signed-wrap. The number of well-defined iterations may actually
3981 /// be higher than this trip count if this exit test is skipped and the loop
3982 /// exits via a different branch. Ideally, getExitCount() would know whether it
3983 /// depends on a NSW assumption, and we would only fall back to a conservative
3984 /// trip count in that case.
3985 unsigned ScalarEvolution::
3986 getSmallConstantTripCount(Loop *L, BasicBlock * /*ExitingBlock*/) {
3987  const SCEVConstant *ExitCount =
3989  if (!ExitCount)
3990  return 0;
3991 
3992  ConstantInt *ExitConst = ExitCount->getValue();
3993 
3994  // Guard against huge trip counts.
3995  if (ExitConst->getValue().getActiveBits() > 32)
3996  return 0;
3997 
3998  // In case of integer overflow, this returns 0, which is correct.
3999  return ((unsigned)ExitConst->getZExtValue()) + 1;
4000 }
4001 
4002 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
4003 /// trip count of this loop as a normal unsigned value, if possible. This
4004 /// means that the actual trip count is always a multiple of the returned
4005 /// value (don't forget the trip count could very well be zero as well!).
4006 ///
4007 /// Returns 1 if the trip count is unknown or not guaranteed to be the
4008 /// multiple of a constant (which is also the case if the trip count is simply
4009 /// constant, use getSmallConstantTripCount for that case), Will also return 1
4010 /// if the trip count is very large (>= 2^32).
4011 ///
4012 /// As explained in the comments for getSmallConstantTripCount, this assumes
4013 /// that control exits the loop via ExitingBlock.
4014 unsigned ScalarEvolution::
4016  const SCEV *ExitCount = getBackedgeTakenCount(L);
4017  if (ExitCount == getCouldNotCompute())
4018  return 1;
4019 
4020  // Get the trip count from the BE count by adding 1.
4021  const SCEV *TCMul = getAddExpr(ExitCount,
4022  getConstant(ExitCount->getType(), 1));
4023  // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
4024  // to factor simple cases.
4025  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
4026  TCMul = Mul->getOperand(0);
4027 
4028  const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
4029  if (!MulC)
4030  return 1;
4031 
4032  ConstantInt *Result = MulC->getValue();
4033 
4034  // Guard against huge trip counts (this requires checking
4035  // for zero to handle the case where the trip count == -1 and the
4036  // addition wraps).
4037  if (!Result || Result->getValue().getActiveBits() > 32 ||
4038  Result->getValue().getActiveBits() == 0)
4039  return 1;
4040 
4041  return (unsigned)Result->getZExtValue();
4042 }
4043 
4044 // getExitCount - Get the expression for the number of loop iterations for which
4045 // this loop is guaranteed not to exit via ExitingBlock. Otherwise return
4046 // SCEVCouldNotCompute.
4048  return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
4049 }
4050 
4051 /// getBackedgeTakenCount - If the specified loop has a predictable
4052 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
4053 /// object. The backedge-taken count is the number of times the loop header
4054 /// will be branched to from within the loop. This is one less than the
4055 /// trip count of the loop, since it doesn't count the first iteration,
4056 /// when the header is branched to from outside the loop.
4057 ///
4058 /// Note that it is not valid to call this method on a loop without a
4059 /// loop-invariant backedge-taken count (see
4060 /// hasLoopInvariantBackedgeTakenCount).
4061 ///
4063  return getBackedgeTakenInfo(L).getExact(this);
4064 }
4065 
4066 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
4067 /// return the least SCEV value that is known never to be less than the
4068 /// actual backedge taken count.
4070  return getBackedgeTakenInfo(L).getMax(this);
4071 }
4072 
4073 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
4074 /// onto the given Worklist.
4075 static void
4077  BasicBlock *Header = L->getHeader();
4078 
4079  // Push all Loop-header PHIs onto the Worklist stack.
4080  for (BasicBlock::iterator I = Header->begin();
4081  PHINode *PN = dyn_cast<PHINode>(I); ++I)
4082  Worklist.push_back(PN);
4083 }
4084 
4085 const ScalarEvolution::BackedgeTakenInfo &
4086 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
4087  // Initially insert an invalid entry for this loop. If the insertion
4088  // succeeds, proceed to actually compute a backedge-taken count and
4089  // update the value. The temporary CouldNotCompute value tells SCEV
4090  // code elsewhere that it shouldn't attempt to request a new
4091  // backedge-taken count, which could result in infinite recursion.
4092  std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
4093  BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
4094  if (!Pair.second)
4095  return Pair.first->second;
4096 
4097  // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
4098  // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
4099  // must be cleared in this scope.
4100  BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
4101 
4102  if (Result.getExact(this) != getCouldNotCompute()) {
4103  assert(isLoopInvariant(Result.getExact(this), L) &&
4104  isLoopInvariant(Result.getMax(this), L) &&
4105  "Computed backedge-taken count isn't loop invariant for loop!");
4106  ++NumTripCountsComputed;
4107  }
4108  else if (Result.getMax(this) == getCouldNotCompute() &&
4109  isa<PHINode>(L->getHeader()->begin())) {
4110  // Only count loops that have phi nodes as not being computable.
4111  ++NumTripCountsNotComputed;
4112  }
4113 
4114  // Now that we know more about the trip count for this loop, forget any
4115  // existing SCEV values for PHI nodes in this loop since they are only
4116  // conservative estimates made without the benefit of trip count
4117  // information. This is similar to the code in forgetLoop, except that
4118  // it handles SCEVUnknown PHI nodes specially.
4119  if (Result.hasAnyInfo()) {
4121  PushLoopPHIs(L, Worklist);
4122 
4124  while (!Worklist.empty()) {
4125  Instruction *I = Worklist.pop_back_val();
4126  if (!Visited.insert(I)) continue;
4127 
4129  ValueExprMap.find_as(static_cast<Value *>(I));
4130  if (It != ValueExprMap.end()) {
4131  const SCEV *Old = It->second;
4132 
4133  // SCEVUnknown for a PHI either means that it has an unrecognized
4134  // structure, or it's a PHI that's in the progress of being computed
4135  // by createNodeForPHI. In the former case, additional loop trip
4136  // count information isn't going to change anything. In the later
4137  // case, createNodeForPHI will perform the necessary updates on its
4138  // own when it gets to that point.
4139  if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
4140  forgetMemoizedResults(Old);
4141  ValueExprMap.erase(It);
4142  }
4143  if (PHINode *PN = dyn_cast<PHINode>(I))
4144  ConstantEvolutionLoopExitValue.erase(PN);
4145  }
4146 
4147  PushDefUseChildren(I, Worklist);
4148  }
4149  }
4150 
4151  // Re-lookup the insert position, since the call to
4152  // ComputeBackedgeTakenCount above could result in a
4153  // recusive call to getBackedgeTakenInfo (on a different
4154  // loop), which would invalidate the iterator computed
4155  // earlier.
4156  return BackedgeTakenCounts.find(L)->second = Result;
4157 }
4158 
4159 /// forgetLoop - This method should be called by the client when it has
4160 /// changed a loop in a way that may effect ScalarEvolution's ability to
4161 /// compute a trip count, or if the loop is deleted.
4163  // Drop any stored trip count value.
4165  BackedgeTakenCounts.find(L);
4166  if (BTCPos != BackedgeTakenCounts.end()) {
4167  BTCPos->second.clear();
4168  BackedgeTakenCounts.erase(BTCPos);
4169  }
4170 
4171  // Drop information about expressions based on loop-header PHIs.
4173  PushLoopPHIs(L, Worklist);
4174 
4176  while (!Worklist.empty()) {
4177  Instruction *I = Worklist.pop_back_val();
4178  if (!Visited.insert(I)) continue;
4179 
4181  ValueExprMap.find_as(static_cast<Value *>(I));
4182  if (It != ValueExprMap.end()) {
4183  forgetMemoizedResults(It->second);
4184  ValueExprMap.erase(It);
4185  if (PHINode *PN = dyn_cast<PHINode>(I))
4186  ConstantEvolutionLoopExitValue.erase(PN);
4187  }
4188 
4189  PushDefUseChildren(I, Worklist);
4190  }
4191 
4192  // Forget all contained loops too, to avoid dangling entries in the
4193  // ValuesAtScopes map.
4194  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4195  forgetLoop(*I);
4196 }
4197 
4198 /// forgetValue - This method should be called by the client when it has
4199 /// changed a value in a way that may effect its value, or which may
4200 /// disconnect it from a def-use chain linking it to a loop.
4202  Instruction *I = dyn_cast<Instruction>(V);
4203  if (!I) return;
4204 
4205  // Drop information about expressions based on loop-header PHIs.
4207  Worklist.push_back(I);
4208 
4210  while (!Worklist.empty()) {
4211  I = Worklist.pop_back_val();
4212  if (!Visited.insert(I)) continue;
4213 
4215  ValueExprMap.find_as(static_cast<Value *>(I));
4216  if (It != ValueExprMap.end()) {
4217  forgetMemoizedResults(It->second);
4218  ValueExprMap.erase(It);
4219  if (PHINode *PN = dyn_cast<PHINode>(I))
4220  ConstantEvolutionLoopExitValue.erase(PN);
4221  }
4222 
4223  PushDefUseChildren(I, Worklist);
4224  }
4225 }
4226 
4227 /// getExact - Get the exact loop backedge taken count considering all loop
4228 /// exits. A computable result can only be return for loops with a single exit.
4229 /// Returning the minimum taken count among all exits is incorrect because one
4230 /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
4231 /// the limit of each loop test is never skipped. This is a valid assumption as
4232 /// long as the loop exits via that test. For precise results, it is the
4233 /// caller's responsibility to specify the relevant loop exit using
4234 /// getExact(ExitingBlock, SE).
4235 const SCEV *
4236 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
4237  // If any exits were not computable, the loop is not computable.
4238  if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
4239 
4240  // We need exactly one computable exit.
4241  if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
4242  assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
4243 
4244  const SCEV *BECount = 0;
4245  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4246  ENT != 0; ENT = ENT->getNextExit()) {
4247 
4248  assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
4249 
4250  if (!BECount)
4251  BECount = ENT->ExactNotTaken;
4252  else if (BECount != ENT->ExactNotTaken)
4253  return SE->getCouldNotCompute();
4254  }
4255  assert(BECount && "Invalid not taken count for loop exit");
4256  return BECount;
4257 }
4258 
4259 /// getExact - Get the exact not taken count for this loop exit.
4260 const SCEV *
4261 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
4262  ScalarEvolution *SE) const {
4263  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4264  ENT != 0; ENT = ENT->getNextExit()) {
4265 
4266  if (ENT->ExitingBlock == ExitingBlock)
4267  return ENT->ExactNotTaken;
4268  }
4269  return SE->getCouldNotCompute();
4270 }
4271 
4272 /// getMax - Get the max backedge taken count for the loop.
4273 const SCEV *
4274 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
4275  return Max ? Max : SE->getCouldNotCompute();
4276 }
4277 
4278 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
4279  ScalarEvolution *SE) const {
4280  if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
4281  return true;
4282 
4283  if (!ExitNotTaken.ExitingBlock)
4284  return false;
4285 
4286  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4287  ENT != 0; ENT = ENT->getNextExit()) {
4288 
4289  if (ENT->ExactNotTaken != SE->getCouldNotCompute()
4290  && SE->hasOperand(ENT->ExactNotTaken, S)) {
4291  return true;
4292  }
4293  }
4294  return false;
4295 }
4296 
4297 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
4298 /// computable exit into a persistent ExitNotTakenInfo array.
4299 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
4300  SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
4301  bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
4302 
4303  if (!Complete)
4304  ExitNotTaken.setIncomplete();
4305 
4306  unsigned NumExits = ExitCounts.size();
4307  if (NumExits == 0) return;
4308 
4309  ExitNotTaken.ExitingBlock = ExitCounts[0].first;
4310  ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
4311  if (NumExits == 1) return;
4312 
4313  // Handle the rare case of multiple computable exits.
4314  ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
4315 
4316  ExitNotTakenInfo *PrevENT = &ExitNotTaken;
4317  for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
4318  PrevENT->setNextExit(ENT);
4319  ENT->ExitingBlock = ExitCounts[i].first;
4320  ENT->ExactNotTaken = ExitCounts[i].second;
4321  }
4322 }
4323 
4324 /// clear - Invalidate this result and free the ExitNotTakenInfo array.
4325 void ScalarEvolution::BackedgeTakenInfo::clear() {
4326  ExitNotTaken.ExitingBlock = 0;
4327  ExitNotTaken.ExactNotTaken = 0;
4328  delete[] ExitNotTaken.getNextExit();
4329 }
4330 
4331 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
4332 /// of the specified loop will execute.
4333 ScalarEvolution::BackedgeTakenInfo
4334 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
4335  SmallVector<BasicBlock *, 8> ExitingBlocks;
4336  L->getExitingBlocks(ExitingBlocks);
4337 
4338  // Examine all exits and pick the most conservative values.
4339  const SCEV *MaxBECount = getCouldNotCompute();
4340  bool CouldComputeBECount = true;
4342  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
4343  ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
4344  if (EL.Exact == getCouldNotCompute())
4345  // We couldn't compute an exact value for this exit, so
4346  // we won't be able to compute an exact value for the loop.
4347  CouldComputeBECount = false;
4348  else
4349  ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
4350 
4351  if (MaxBECount == getCouldNotCompute())
4352  MaxBECount = EL.Max;
4353  else if (EL.Max != getCouldNotCompute()) {
4354  // We cannot take the "min" MaxBECount, because non-unit stride loops may
4355  // skip some loop tests. Taking the max over the exits is sufficiently
4356  // conservative. TODO: We could do better taking into consideration
4357  // that (1) the loop has unit stride (2) the last loop test is
4358  // less-than/greater-than (3) any loop test is less-than/greater-than AND
4359  // falls-through some constant times less then the other tests.
4360  MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
4361  }
4362  }
4363 
4364  return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
4365 }
4366 
4367 /// ComputeExitLimit - Compute the number of times the backedge of the specified
4368 /// loop will execute if it exits via the specified block.
4369 ScalarEvolution::ExitLimit
4370 ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
4371 
4372  // Okay, we've chosen an exiting block. See what condition causes us to
4373  // exit at this block.
4374  //
4375  // FIXME: we should be able to handle switch instructions (with a single exit)
4376  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
4377  if (ExitBr == 0) return getCouldNotCompute();
4378  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
4379 
4380  // At this point, we know we have a conditional branch that determines whether
4381  // the loop is exited. However, we don't know if the branch is executed each
4382  // time through the loop. If not, then the execution count of the branch will
4383  // not be equal to the trip count of the loop.
4384  //
4385  // Currently we check for this by checking to see if the Exit branch goes to
4386  // the loop header. If so, we know it will always execute the same number of
4387  // times as the loop. We also handle the case where the exit block *is* the
4388  // loop header. This is common for un-rotated loops.
4389  //
4390  // If both of those tests fail, walk up the unique predecessor chain to the
4391  // header, stopping if there is an edge that doesn't exit the loop. If the
4392  // header is reached, the execution count of the branch will be equal to the
4393  // trip count of the loop.
4394  //
4395  // More extensive analysis could be done to handle more cases here.
4396  //
4397  if (ExitBr->getSuccessor(0) != L->getHeader() &&
4398  ExitBr->getSuccessor(1) != L->getHeader() &&
4399  ExitBr->getParent() != L->getHeader()) {
4400  // The simple checks failed, try climbing the unique predecessor chain
4401  // up to the header.
4402  bool Ok = false;
4403  for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
4404  BasicBlock *Pred = BB->getUniquePredecessor();
4405  if (!Pred)
4406  return getCouldNotCompute();
4407  TerminatorInst *PredTerm = Pred->getTerminator();
4408  for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
4409  BasicBlock *PredSucc = PredTerm->getSuccessor(i);
4410  if (PredSucc == BB)
4411  continue;
4412  // If the predecessor has a successor that isn't BB and isn't
4413  // outside the loop, assume the worst.
4414  if (L->contains(PredSucc))
4415  return getCouldNotCompute();
4416  }
4417  if (Pred == L->getHeader()) {
4418  Ok = true;
4419  break;
4420  }
4421  BB = Pred;
4422  }
4423  if (!Ok)
4424  return getCouldNotCompute();
4425  }
4426 
4427  // Proceed to the next level to examine the exit condition expression.
4428  return ComputeExitLimitFromCond(L, ExitBr->getCondition(),
4429  ExitBr->getSuccessor(0),
4430  ExitBr->getSuccessor(1),
4431  /*IsSubExpr=*/false);
4432 }
4433 
4434 /// ComputeExitLimitFromCond - Compute the number of times the
4435 /// backedge of the specified loop will execute if its exit condition
4436 /// were a conditional branch of ExitCond, TBB, and FBB.
4437 ///
4438 /// @param IsSubExpr is true if ExitCond does not directly control the exit
4439 /// branch. In this case, we cannot assume that the loop only exits when the
4440 /// condition is true and cannot infer that failing to meet the condition prior
4441 /// to integer wraparound results in undefined behavior.
4442 ScalarEvolution::ExitLimit
4443 ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
4444  Value *ExitCond,
4445  BasicBlock *TBB,
4446  BasicBlock *FBB,
4447  bool IsSubExpr) {
4448  // Check if the controlling expression for this loop is an And or Or.
4449  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
4450  if (BO->getOpcode() == Instruction::And) {
4451  // Recurse on the operands of the and.
4452  bool EitherMayExit = L->contains(TBB);
4453  ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4454  IsSubExpr || EitherMayExit);
4455  ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4456  IsSubExpr || EitherMayExit);
4457  const SCEV *BECount = getCouldNotCompute();
4458  const SCEV *MaxBECount = getCouldNotCompute();
4459  if (EitherMayExit) {
4460  // Both conditions must be true for the loop to continue executing.
4461  // Choose the less conservative count.
4462  if (EL0.Exact == getCouldNotCompute() ||
4463  EL1.Exact == getCouldNotCompute())
4464  BECount = getCouldNotCompute();
4465  else
4466  BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4467  if (EL0.Max == getCouldNotCompute())
4468  MaxBECount = EL1.Max;
4469  else if (EL1.Max == getCouldNotCompute())
4470  MaxBECount = EL0.Max;
4471  else
4472  MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4473  } else {
4474  // Both conditions must be true at the same time for the loop to exit.
4475  // For now, be conservative.
4476  assert(L->contains(FBB) && "Loop block has no successor in loop!");
4477  if (EL0.Max == EL1.Max)
4478  MaxBECount = EL0.Max;
4479  if (EL0.Exact == EL1.Exact)
4480  BECount = EL0.Exact;
4481  }
4482 
4483  return ExitLimit(BECount, MaxBECount);
4484  }
4485  if (BO->getOpcode() == Instruction::Or) {
4486  // Recurse on the operands of the or.
4487  bool EitherMayExit = L->contains(FBB);
4488  ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4489  IsSubExpr || EitherMayExit);
4490  ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4491  IsSubExpr || EitherMayExit);
4492  const SCEV *BECount = getCouldNotCompute();
4493  const SCEV *MaxBECount = getCouldNotCompute();
4494  if (EitherMayExit) {
4495  // Both conditions must be false for the loop to continue executing.
4496  // Choose the less conservative count.
4497  if (EL0.Exact == getCouldNotCompute() ||
4498  EL1.Exact == getCouldNotCompute())
4499  BECount = getCouldNotCompute();
4500  else
4501  BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4502  if (EL0.Max == getCouldNotCompute())
4503  MaxBECount = EL1.Max;
4504  else if (EL1.Max == getCouldNotCompute())
4505  MaxBECount = EL0.Max;
4506  else
4507  MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4508  } else {
4509  // Both conditions must be false at the same time for the loop to exit.
4510  // For now, be conservative.
4511  assert(L->contains(TBB) && "Loop block has no successor in loop!");
4512  if (EL0.Max == EL1.Max)
4513  MaxBECount = EL0.Max;
4514  if (EL0.Exact == EL1.Exact)
4515  BECount = EL0.Exact;
4516  }
4517 
4518  return ExitLimit(BECount, MaxBECount);
4519  }
4520  }
4521 
4522  // With an icmp, it may be feasible to compute an exact backedge-taken count.
4523  // Proceed to the next level to examine the icmp.
4524  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
4525  return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, IsSubExpr);
4526 
4527  // Check for a constant condition. These are normally stripped out by
4528  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
4529  // preserve the CFG and is temporarily leaving constant conditions
4530  // in place.
4531  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
4532  if (L->contains(FBB) == !CI->getZExtValue())
4533  // The backedge is always taken.
4534  return getCouldNotCompute();
4535  else
4536  // The backedge is never taken.
4537  return getConstant(CI->getType(), 0);
4538  }
4539 
4540  // If it's not an integer or pointer comparison then compute it the hard way.
4541  return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4542 }
4543 
4544 /// ComputeExitLimitFromICmp - Compute the number of times the
4545 /// backedge of the specified loop will execute if its exit condition
4546 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4547 ScalarEvolution::ExitLimit
4548 ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
4549  ICmpInst *ExitCond,
4550  BasicBlock *TBB,
4551  BasicBlock *FBB,
4552  bool IsSubExpr) {
4553 
4554  // If the condition was exit on true, convert the condition to exit on false
4555  ICmpInst::Predicate Cond;
4556  if (!L->contains(FBB))
4557  Cond = ExitCond->getPredicate();
4558  else
4559  Cond = ExitCond->getInversePredicate();
4560 
4561  // Handle common loops like: for (X = "string"; *X; ++X)
4562  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4563  if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
4564  ExitLimit ItCnt =
4565  ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
4566  if (ItCnt.hasAnyInfo())
4567  return ItCnt;
4568  }
4569 
4570  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4571  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
4572 
4573  // Try to evaluate any dependencies out of the loop.
4574  LHS = getSCEVAtScope(LHS, L);
4575  RHS = getSCEVAtScope(RHS, L);
4576 
4577  // At this point, we would like to compute how many iterations of the
4578  // loop the predicate will return true for these inputs.
4579  if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
4580  // If there is a loop-invariant, force it into the RHS.
4581  std::swap(LHS, RHS);
4582  Cond = ICmpInst::getSwappedPredicate(Cond);
4583  }
4584 
4585  // Simplify the operands before analyzing them.
4586  (void)SimplifyICmpOperands(Cond, LHS, RHS);
4587 
4588  // If we have a comparison of a chrec against a constant, try to use value
4589  // ranges to answer this query.
4590  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4591  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4592  if (AddRec->getLoop() == L) {
4593  // Form the constant range.
4594  ConstantRange CompRange(
4595  ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4596 
4597  const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4598  if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4599  }
4600 
4601  switch (Cond) {
4602  case ICmpInst::ICMP_NE: { // while (X != Y)
4603  // Convert to: while (X-Y != 0)
4604  ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
4605  if (EL.hasAnyInfo()) return EL;
4606  break;
4607  }
4608  case ICmpInst::ICMP_EQ: { // while (X == Y)
4609  // Convert to: while (X-Y == 0)
4610  ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4611  if (EL.hasAnyInfo()) return EL;
4612  break;
4613  }
4614  case ICmpInst::ICMP_SLT:
4615  case ICmpInst::ICMP_ULT: { // while (X < Y)
4616  bool IsSigned = Cond == ICmpInst::ICMP_SLT;
4617  ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, IsSubExpr);
4618  if (EL.hasAnyInfo()) return EL;
4619  break;
4620  }
4621  case ICmpInst::ICMP_SGT:
4622  case ICmpInst::ICMP_UGT: { // while (X > Y)
4623  bool IsSigned = Cond == ICmpInst::ICMP_SGT;
4624  ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, IsSubExpr);
4625  if (EL.hasAnyInfo()) return EL;
4626  break;
4627  }
4628  default:
4629 #if 0
4630  dbgs() << "ComputeBackedgeTakenCount ";
4631  if (ExitCond->getOperand(0)->getType()->isUnsigned())
4632  dbgs() << "[unsigned] ";
4633  dbgs() << *LHS << " "
4634  << Instruction::getOpcodeName(Instruction::ICmp)
4635  << " " << *RHS << "\n";
4636 #endif
4637  break;
4638  }
4639  return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4640 }
4641 
4642 static ConstantInt *
4644  ScalarEvolution &SE) {
4645  const SCEV *InVal = SE.getConstant(C);
4646  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4647  assert(isa<SCEVConstant>(Val) &&
4648  "Evaluation of SCEV at constant didn't fold correctly?");
4649  return cast<SCEVConstant>(Val)->getValue();
4650 }
4651 
4652 /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
4653 /// 'icmp op load X, cst', try to see if we can compute the backedge
4654 /// execution count.
4655 ScalarEvolution::ExitLimit
4656 ScalarEvolution::ComputeLoadConstantCompareExitLimit(
4657  LoadInst *LI,
4658  Constant *RHS,
4659  const Loop *L,
4660  ICmpInst::Predicate predicate) {
4661 
4662  if (LI->isVolatile()) return getCouldNotCompute();
4663 
4664  // Check to see if the loaded pointer is a getelementptr of a global.
4665  // TODO: Use SCEV instead of manually grubbing with GEPs.
4667  if (!GEP) return getCouldNotCompute();
4668 
4669  // Make sure that it is really a constant global we are gepping, with an
4670  // initializer, and make sure the first IDX is really 0.
4672  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4673  GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4674  !cast<Constant>(GEP->getOperand(1))->isNullValue())
4675  return getCouldNotCompute();
4676 
4677  // Okay, we allow one non-constant index into the GEP instruction.
4678  Value *VarIdx = 0;
4679  std::vector<Constant*> Indexes;
4680  unsigned VarIdxNum = 0;
4681  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4682  if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4683  Indexes.push_back(CI);
4684  } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4685  if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
4686  VarIdx = GEP->getOperand(i);
4687  VarIdxNum = i-2;
4688  Indexes.push_back(0);
4689  }
4690 
4691  // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
4692  if (!VarIdx)
4693  return getCouldNotCompute();
4694 
4695  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4696  // Check to see if X is a loop variant variable value now.
4697  const SCEV *Idx = getSCEV(VarIdx);
4698  Idx = getSCEVAtScope(Idx, L);
4699 
4700  // We can only recognize very limited forms of loop index expressions, in
4701  // particular, only affine AddRec's like {C1,+,C2}.
4702  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4703  if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
4704  !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4705  !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4706  return getCouldNotCompute();
4707 
4708  unsigned MaxSteps = MaxBruteForceIterations;
4709  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4710  ConstantInt *ItCst = ConstantInt::get(
4711  cast<IntegerType>(IdxExpr->getType()), IterationNum);
4712  ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4713 
4714  // Form the GEP offset.
4715  Indexes[VarIdxNum] = Val;
4716 
4718  Indexes);
4719  if (Result == 0) break; // Cannot compute!
4720 
4721  // Evaluate the condition for this iteration.
4722  Result = ConstantExpr::getICmp(predicate, Result, RHS);
4723  if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
4724  if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4725 #if 0
4726  dbgs() << "\n***\n*** Computed loop count " << *ItCst
4727  << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4728  << "***\n";
4729 #endif
4730  ++NumArrayLenItCounts;
4731  return getConstant(ItCst); // Found terminating iteration!
4732  }
4733  }
4734  return getCouldNotCompute();
4735 }
4736 
4737 
4738 /// CanConstantFold - Return true if we can constant fold an instruction of the
4739 /// specified type, assuming that all operands were constants.
4740 static bool CanConstantFold(const Instruction *I) {
4741  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4742  isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
4743  isa<LoadInst>(I))
4744  return true;
4745 
4746  if (const CallInst *CI = dyn_cast<CallInst>(I))
4747  if (const Function *F = CI->getCalledFunction())
4748  return canConstantFoldCallTo(F);
4749  return false;
4750 }
4751 
4752 /// Determine whether this instruction can constant evolve within this loop
4753 /// assuming its operands can all constant evolve.
4754 static bool canConstantEvolve(Instruction *I, const Loop *L) {
4755  // An instruction outside of the loop can't be derived from a loop PHI.
4756  if (!L->contains(I)) return false;
4757 
4758  if (isa<PHINode>(I)) {
4759  if (L->getHeader() == I->getParent())
4760  return true;
4761  else
4762  // We don't currently keep track of the control flow needed to evaluate
4763  // PHIs, so we cannot handle PHIs inside of loops.
4764  return false;
4765  }
4766 
4767  // If we won't be able to constant fold this expression even if the operands
4768  // are constants, bail early.
4769  return CanConstantFold(I);
4770 }
4771 
4772 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
4773 /// recursing through each instruction operand until reaching a loop header phi.
4774 static PHINode *
4777 
4778  // Otherwise, we can evaluate this instruction if all of its operands are
4779  // constant or derived from a PHI node themselves.
4780  PHINode *PHI = 0;
4781  for (Instruction::op_iterator OpI = UseInst->op_begin(),
4782  OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
4783 
4784  if (isa<Constant>(*OpI)) continue;
4785 
4786  Instruction *OpInst = dyn_cast<Instruction>(*OpI);
4787  if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
4788 
4789  PHINode *P = dyn_cast<PHINode>(OpInst);
4790  if (!P)
4791  // If this operand is already visited, reuse the prior result.
4792  // We may have P != PHI if this is the deepest point at which the
4793  // inconsistent paths meet.
4794  P = PHIMap.lookup(OpInst);
4795  if (!P) {
4796  // Recurse and memoize the results, whether a phi is found or not.
4797  // This recursive call invalidates pointers into PHIMap.
4798  P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
4799  PHIMap[OpInst] = P;
4800  }
4801  if (P == 0) return 0; // Not evolving from PHI
4802  if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs.
4803  PHI = P;
4804  }
4805  // This is a expression evolving from a constant PHI!
4806  return PHI;
4807 }
4808 
4809 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4810 /// in the loop that V is derived from. We allow arbitrary operations along the
4811 /// way, but the operands of an operation must either be constants or a value
4812 /// derived from a constant PHI. If this expression does not fit with these
4813 /// constraints, return null.
4814 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4815  Instruction *I = dyn_cast<Instruction>(V);
4816  if (I == 0 || !canConstantEvolve(I, L)) return 0;
4817 
4818  if (PHINode *PN = dyn_cast<PHINode>(I)) {
4819  return PN;
4820  }
4821 
4822  // Record non-constant instructions contained by the loop.
4824  return getConstantEvolvingPHIOperands(I, L, PHIMap);
4825 }
4826 
4827 /// EvaluateExpression - Given an expression that passes the
4828 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4829 /// in the loop has the value PHIVal. If we can't fold this expression for some
4830 /// reason, return null.
4831 static Constant *EvaluateExpression(Value *V, const Loop *L,
4833  const DataLayout *TD,
4834  const TargetLibraryInfo *TLI) {
4835  // Convenient constant check, but redundant for recursive calls.
4836  if (Constant *C = dyn_cast<Constant>(V)) return C;
4837  Instruction *I = dyn_cast<Instruction>(V);
4838  if (!I) return 0;
4839 
4840  if (Constant *C = Vals.lookup(I)) return C;
4841 
4842  // An instruction inside the loop depends on a value outside the loop that we
4843  // weren't given a mapping for, or a value such as a call inside the loop.
4844  if (!canConstantEvolve(I, L)) return 0;
4845 
4846  // An unmapped PHI can be due to a branch or another loop inside this loop,
4847  // or due to this not being the initial iteration through a loop where we
4848  // couldn't compute the evolution of this particular PHI last time.
4849  if (isa<PHINode>(I)) return 0;
4850 
4851  std::vector<Constant*> Operands(I->getNumOperands());
4852 
4853  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4854  Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
4855  if (!Operand) {
4856  Operands[i] = dyn_cast<Constant>(I->getOperand(i));
4857  if (!Operands[i]) return 0;
4858  continue;
4859  }
4860  Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
4861  Vals[Operand] = C;
4862  if (!C) return 0;
4863  Operands[i] = C;
4864  }
4865 
4866  if (CmpInst *CI = dyn_cast<CmpInst>(I))
4867  return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4868  Operands[1], TD, TLI);
4869  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
4870  if (!LI->isVolatile())
4871  return ConstantFoldLoadFromConstPtr(Operands[0], TD);
4872  }
4873  return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
4874  TLI);
4875 }
4876 
4877 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4878 /// in the header of its containing loop, we know the loop executes a
4879 /// constant number of times, and the PHI node is just a recurrence
4880 /// involving constants, fold it.
4881 Constant *
4882 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4883  const APInt &BEs,
4884  const Loop *L) {
4886  ConstantEvolutionLoopExitValue.find(PN);
4887  if (I != ConstantEvolutionLoopExitValue.end())
4888  return I->second;
4889 
4891  return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
4892 
4893  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4894 
4895  DenseMap<Instruction *, Constant *> CurrentIterVals;
4896  BasicBlock *Header = L->getHeader();
4897  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
4898 
4899  // Since the loop is canonicalized, the PHI node must have two entries. One
4900  // entry must be a constant (coming in from outside of the loop), and the
4901  // second must be derived from the same PHI.
4902  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4903  PHINode *PHI = 0;
4904  for (BasicBlock::iterator I = Header->begin();
4905  (PHI = dyn_cast<PHINode>(I)); ++I) {
4906  Constant *StartCST =
4907  dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
4908  if (StartCST == 0) continue;
4909  CurrentIterVals[PHI] = StartCST;
4910  }
4911  if (!CurrentIterVals.count(PN))
4912  return RetVal = 0;
4913 
4914  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4915 
4916  // Execute the loop symbolically to determine the exit value.
4917  if (BEs.getActiveBits() >= 32)
4918  return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4919 
4920  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4921  unsigned IterationNum = 0;
4922  for (; ; ++IterationNum) {
4923  if (IterationNum == NumIterations)
4924  return RetVal = CurrentIterVals[PN]; // Got exit value!
4925 
4926  // Compute the value of the PHIs for the next iteration.
4927  // EvaluateExpression adds non-phi values to the CurrentIterVals map.
4929  Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
4930  TLI);
4931  if (NextPHI == 0)
4932  return 0; // Couldn't evaluate!
4933  NextIterVals[PN] = NextPHI;
4934 
4935  bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
4936 
4937  // Also evaluate the other PHI nodes. However, we don't get to stop if we
4938  // cease to be able to evaluate one of them or if they stop evolving,
4939  // because that doesn't necessarily prevent us from computing PN.
4942  I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
4943  PHINode *PHI = dyn_cast<PHINode>(I->first);
4944  if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
4945  PHIsToCompute.push_back(std::make_pair(PHI, I->second));
4946  }
4947  // We use two distinct loops because EvaluateExpression may invalidate any
4948  // iterators into CurrentIterVals.
4949  for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
4950  I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
4951  PHINode *PHI = I->first;
4952  Constant *&NextPHI = NextIterVals[PHI];
4953  if (!NextPHI) { // Not already computed.
4954  Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
4955  NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
4956  }
4957  if (NextPHI != I->second)
4958  StoppedEvolving = false;
4959  }
4960 
4961  // If all entries in CurrentIterVals == NextIterVals then we can stop
4962  // iterating, the loop can't continue to change.
4963  if (StoppedEvolving)
4964  return RetVal = CurrentIterVals[PN];
4965 
4966  CurrentIterVals.swap(NextIterVals);
4967  }
4968 }
4969 
4970 /// ComputeExitCountExhaustively - If the loop is known to execute a
4971 /// constant number of times (the condition evolves only from constants),
4972 /// try to evaluate a few iterations of the loop until we get the exit
4973 /// condition gets a value of ExitWhen (true or false). If we cannot
4974 /// evaluate the trip count of the loop, return getCouldNotCompute().
4975 const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
4976  Value *Cond,
4977  bool ExitWhen) {
4978  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4979  if (PN == 0) return getCouldNotCompute();
4980 
4981  // If the loop is canonicalized, the PHI will have exactly two entries.
4982  // That's the only form we support here.
4983  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4984 
4985  DenseMap<Instruction *, Constant *> CurrentIterVals;
4986  BasicBlock *Header = L->getHeader();
4987  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
4988 
4989  // One entry must be a constant (coming in from outside of the loop), and the
4990  // second must be derived from the same PHI.
4991  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4992  PHINode *PHI = 0;
4993  for (BasicBlock::iterator I = Header->begin();
4994  (PHI = dyn_cast<PHINode>(I)); ++I) {
4995  Constant *StartCST =
4996  dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
4997  if (StartCST == 0) continue;
4998  CurrentIterVals[PHI] = StartCST;
4999  }
5000  if (!CurrentIterVals.count(PN))
5001  return getCouldNotCompute();
5002 
5003  // Okay, we find a PHI node that defines the trip count of this loop. Execute
5004  // the loop symbolically to determine when the condition gets a value of
5005  // "ExitWhen".
5006 
5007  unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
5008  for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
5009  ConstantInt *CondVal =
5010  dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
5011  TD, TLI));
5012 
5013  // Couldn't symbolically evaluate.
5014  if (!CondVal) return getCouldNotCompute();
5015 
5016  if (CondVal->getValue() == uint64_t(ExitWhen)) {
5017  ++NumBruteForceTripCountsComputed;
5018  return getConstant(Type::getInt32Ty(getContext()), IterationNum);
5019  }
5020 
5021  // Update all the PHI nodes for the next iteration.
5023 
5024  // Create a list of which PHIs we need to compute. We want to do this before
5025  // calling EvaluateExpression on them because that may invalidate iterators
5026  // into CurrentIterVals.
5027  SmallVector<PHINode *, 8> PHIsToCompute;
5029  I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5030  PHINode *PHI = dyn_cast<PHINode>(I->first);
5031  if (!PHI || PHI->getParent() != Header) continue;
5032  PHIsToCompute.push_back(PHI);
5033  }
5034  for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
5035  E = PHIsToCompute.end(); I != E; ++I) {
5036  PHINode *PHI = *I;
5037  Constant *&NextPHI = NextIterVals[PHI];
5038  if (NextPHI) continue; // Already computed!
5039 
5040  Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
5041  NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
5042  }
5043  CurrentIterVals.swap(NextIterVals);
5044  }
5045 
5046  // Too many iterations were needed to evaluate.
5047  return getCouldNotCompute();
5048 }
5049 
5050 /// getSCEVAtScope - Return a SCEV expression for the specified value
5051 /// at the specified scope in the program. The L value specifies a loop
5052 /// nest to evaluate the expression at, where null is the top-level or a
5053 /// specified loop is immediately inside of the loop.
5054 ///
5055 /// This method can be used to compute the exit value for a variable defined
5056 /// in a loop by querying what the value will hold in the parent loop.
5057 ///
5058 /// In the case that a relevant loop exit value cannot be computed, the
5059 /// original value V is returned.
5060 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
5061  // Check to see if we've folded this expression at this loop before.
5062  SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
5063  for (unsigned u = 0; u < Values.size(); u++) {
5064  if (Values[u].first == L)
5065  return Values[u].second ? Values[u].second : V;
5066  }
5067  Values.push_back(std::make_pair(L, static_cast<const SCEV *>(0)));
5068  // Otherwise compute it.
5069  const SCEV *C = computeSCEVAtScope(V, L);
5070  SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
5071  for (unsigned u = Values2.size(); u > 0; u--) {
5072  if (Values2[u - 1].first == L) {
5073  Values2[u - 1].second = C;
5074  break;
5075  }
5076  }
5077  return C;
5078 }
5079 
5080 /// This builds up a Constant using the ConstantExpr interface. That way, we
5081 /// will return Constants for objects which aren't represented by a
5082 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
5083 /// Returns NULL if the SCEV isn't representable as a Constant.
5085  switch (V->getSCEVType()) {
5086  default: // TODO: smax, umax.
5087  case scCouldNotCompute:
5088  case scAddRecExpr:
5089  break;
5090  case scConstant:
5091  return cast<SCEVConstant>(V)->getValue();
5092  case scUnknown:
5093  return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
5094  case scSignExtend: {
5095  const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
5096  if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
5097  return ConstantExpr::getSExt(CastOp, SS->getType());
5098  break;
5099  }
5100  case scZeroExtend: {
5101  const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
5102  if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
5103  return ConstantExpr::getZExt(CastOp, SZ->getType());
5104  break;
5105  }
5106  case scTruncate: {
5107  const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
5108  if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
5109  return ConstantExpr::getTrunc(CastOp, ST->getType());
5110  break;
5111  }
5112  case scAddExpr: {
5113  const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
5114  if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
5115  if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5116  unsigned AS = PTy->getAddressSpace();
5117  Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5118  C = ConstantExpr::getBitCast(C, DestPtrTy);
5119  }
5120  for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
5122  if (!C2) return 0;
5123 
5124  // First pointer!
5125  if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
5126  unsigned AS = C2->getType()->getPointerAddressSpace();
5127  std::swap(C, C2);
5128  Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5129  // The offsets have been converted to bytes. We can add bytes to an
5130  // i8* by GEP with the byte count in the first index.
5131  C = ConstantExpr::getBitCast(C, DestPtrTy);
5132  }
5133 
5134  // Don't bother trying to sum two pointers. We probably can't
5135  // statically compute a load that results from it anyway.
5136  if (C2->getType()->isPointerTy())
5137  return 0;
5138 
5139  if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5140  if (PTy->getElementType()->isStructTy())
5142  C2, Type::getInt32Ty(C->getContext()), true);
5144  } else
5145  C = ConstantExpr::getAdd(C, C2);
5146  }
5147  return C;
5148  }
5149  break;
5150  }
5151  case scMulExpr: {
5152  const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
5153  if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
5154  // Don't bother with pointers at all.
5155  if (C->getType()->isPointerTy()) return 0;
5156  for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
5158  if (!C2 || C2->getType()->isPointerTy()) return 0;
5159  C = ConstantExpr::getMul(C, C2);
5160  }
5161  return C;
5162  }
5163  break;
5164  }
5165  case scUDivExpr: {
5166  const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
5167  if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
5168  if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
5169  if (LHS->getType() == RHS->getType())
5170  return ConstantExpr::getUDiv(LHS, RHS);
5171  break;
5172  }
5173  }
5174  return 0;
5175 }
5176 
5177 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
5178  if (isa<SCEVConstant>(V)) return V;
5179 
5180  // If this instruction is evolved from a constant-evolving PHI, compute the
5181  // exit value from the loop without using SCEVs.
5182  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
5183  if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
5184  const Loop *LI = (*this->LI)[I->getParent()];
5185  if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
5186  if (PHINode *PN = dyn_cast<PHINode>(I))
5187  if (PN->getParent() == LI->getHeader()) {
5188  // Okay, there is no closed form solution for the PHI node. Check
5189  // to see if the loop that contains it has a known backedge-taken
5190  // count. If so, we may be able to force computation of the exit
5191  // value.
5192  const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
5193  if (const SCEVConstant *BTCC =
5194  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
5195  // Okay, we know how many times the containing loop executes. If
5196  // this is a constant evolving PHI node, get the final value at
5197  // the specified iteration number.
5198  Constant *RV = getConstantEvolutionLoopExitValue(PN,
5199  BTCC->getValue()->getValue(),
5200  LI);
5201  if (RV) return getSCEV(RV);
5202  }
5203  }
5204 
5205  // Okay, this is an expression that we cannot symbolically evaluate
5206  // into a SCEV. Check to see if it's possible to symbolically evaluate
5207  // the arguments into constants, and if so, try to constant propagate the
5208  // result. This is particularly useful for computing loop exit values.
5209  if (CanConstantFold(I)) {
5210  SmallVector<Constant *, 4> Operands;
5211  bool MadeImprovement = false;
5212  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
5213  Value *Op = I->getOperand(i);
5214  if (Constant *C = dyn_cast<Constant>(Op)) {
5215  Operands.push_back(C);
5216  continue;
5217  }
5218 
5219  // If any of the operands is non-constant and if they are
5220  // non-integer and non-pointer, don't even try to analyze them
5221  // with scev techniques.
5222  if (!isSCEVable(Op->getType()))
5223  return V;
5224 
5225  const SCEV *OrigV = getSCEV(Op);
5226  const SCEV *OpV = getSCEVAtScope(OrigV, L);
5227  MadeImprovement |= OrigV != OpV;
5228 
5230  if (!C) return V;
5231  if (C->getType() != Op->getType())
5233  Op->getType(),
5234  false),
5235  C, Op->getType());
5236  Operands.push_back(C);
5237  }
5238 
5239  // Check to see if getSCEVAtScope actually made an improvement.
5240  if (MadeImprovement) {
5241  Constant *C = 0;
5242  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
5243  C = ConstantFoldCompareInstOperands(CI->getPredicate(),
5244  Operands[0], Operands[1], TD,
5245  TLI);
5246  else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
5247  if (!LI->isVolatile())
5248  C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
5249  } else
5250  C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
5251  Operands, TD, TLI);
5252  if (!C) return V;
5253  return getSCEV(C);
5254  }
5255  }
5256  }
5257 
5258  // This is some other type of SCEVUnknown, just return it.
5259  return V;
5260  }
5261 
5262  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
5263  // Avoid performing the look-up in the common case where the specified
5264  // expression has no loop-variant portions.
5265  for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
5266  const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5267  if (OpAtScope != Comm->getOperand(i)) {
5268  // Okay, at least one of these operands is loop variant but might be
5269  // foldable. Build a new instance of the folded commutative expression.
5270  SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
5271  Comm->op_begin()+i);
5272  NewOps.push_back(OpAtScope);
5273 
5274  for (++i; i != e; ++i) {
5275  OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5276  NewOps.push_back(OpAtScope);
5277  }
5278  if (isa<SCEVAddExpr>(Comm))
5279  return getAddExpr(NewOps);
5280  if (isa<SCEVMulExpr>(Comm))
5281  return getMulExpr(NewOps);
5282  if (isa<SCEVSMaxExpr>(Comm))
5283  return getSMaxExpr(NewOps);
5284  if (isa<SCEVUMaxExpr>(Comm))
5285  return getUMaxExpr(NewOps);
5286  llvm_unreachable("Unknown commutative SCEV type!");
5287  }
5288  }
5289  // If we got here, all operands are loop invariant.
5290  return Comm;
5291  }
5292 
5293  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
5294  const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
5295  const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
5296  if (LHS == Div->getLHS() && RHS == Div->getRHS())
5297  return Div; // must be loop invariant
5298  return getUDivExpr(LHS, RHS);
5299  }
5300 
5301  // If this is a loop recurrence for a loop that does not contain L, then we
5302  // are dealing with the final value computed by the loop.
5303  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
5304  // First, attempt to evaluate each operand.
5305  // Avoid performing the look-up in the common case where the specified
5306  // expression has no loop-variant portions.
5307  for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5308  const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
5309  if (OpAtScope == AddRec->getOperand(i))
5310  continue;
5311 
5312  // Okay, at least one of these operands is loop variant but might be
5313  // foldable. Build a new instance of the folded commutative expression.
5314  SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
5315  AddRec->op_begin()+i);
5316  NewOps.push_back(OpAtScope);
5317  for (++i; i != e; ++i)
5318  NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
5319 
5320  const SCEV *FoldedRec =
5321  getAddRecExpr(NewOps, AddRec->getLoop(),
5322  AddRec->getNoWrapFlags(SCEV::FlagNW));
5323  AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
5324  // The addrec may be folded to a nonrecurrence, for example, if the
5325  // induction variable is multiplied by zero after constant folding. Go
5326  // ahead and return the folded value.
5327  if (!AddRec)
5328  return FoldedRec;
5329  break;
5330  }
5331 
5332  // If the scope is outside the addrec's loop, evaluate it by using the
5333  // loop exit value of the addrec.
5334  if (!AddRec->getLoop()->contains(L)) {
5335  // To evaluate this recurrence, we need to know how many times the AddRec
5336  // loop iterates. Compute this now.
5337  const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
5338  if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
5339 
5340  // Then, evaluate the AddRec.
5341  return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
5342  }
5343 
5344  return AddRec;
5345  }
5346 
5347  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
5348  const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5349  if (Op == Cast->getOperand())
5350  return Cast; // must be loop invariant
5351  return getZeroExtendExpr(Op, Cast->getType());
5352  }
5353 
5354  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
5355  const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5356  if (Op == Cast->getOperand())
5357  return Cast; // must be loop invariant
5358  return getSignExtendExpr(Op, Cast->getType());
5359  }
5360 
5361  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
5362  const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5363  if (Op == Cast->getOperand())
5364  return Cast; // must be loop invariant
5365  return getTruncateExpr(Op, Cast->getType());
5366  }
5367 
5368  llvm_unreachable("Unknown SCEV type!");
5369 }
5370 
5371 /// getSCEVAtScope - This is a convenience function which does
5372 /// getSCEVAtScope(getSCEV(V), L).
5374  return getSCEVAtScope(getSCEV(V), L);
5375 }
5376 
5377 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
5378 /// following equation:
5379 ///
5380 /// A * X = B (mod N)
5381 ///
5382 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
5383 /// A and B isn't important.
5384 ///
5385 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
5386 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
5387  ScalarEvolution &SE) {
5388  uint32_t BW = A.getBitWidth();
5389  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
5390  assert(A != 0 && "A must be non-zero.");
5391 
5392  // 1. D = gcd(A, N)
5393  //
5394  // The gcd of A and N may have only one prime factor: 2. The number of
5395  // trailing zeros in A is its multiplicity
5396  uint32_t Mult2 = A.countTrailingZeros();
5397  // D = 2^Mult2
5398 
5399  // 2. Check if B is divisible by D.
5400  //
5401  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
5402  // is not less than multiplicity of this prime factor for D.
5403  if (B.countTrailingZeros() < Mult2)
5404  return SE.getCouldNotCompute();
5405 
5406  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
5407  // modulo (N / D).
5408  //
5409  // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
5410  // bit width during computations.
5411  APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
5412  APInt Mod(BW + 1, 0);
5413  Mod.setBit(BW - Mult2); // Mod = N / D
5414  APInt I = AD.multiplicativeInverse(Mod);
5415 
5416  // 4. Compute the minimum unsigned root of the equation:
5417  // I * (B / D) mod (N / D)
5418  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
5419 
5420  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
5421  // bits.
5422  return SE.getConstant(Result.trunc(BW));
5423 }
5424 
5425 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
5426 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
5427 /// might be the same) or two SCEVCouldNotCompute objects.
5428 ///
5429 static std::pair<const SCEV *,const SCEV *>
5430 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
5431  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
5432  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
5433  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
5434  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
5435 
5436  // We currently can only solve this if the coefficients are constants.
5437  if (!LC || !MC || !NC) {
5438  const SCEV *CNC = SE.getCouldNotCompute();
5439  return std::make_pair(CNC, CNC);
5440  }
5441 
5442  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
5443  const APInt &L = LC->getValue()->getValue();
5444  const APInt &M = MC->getValue()->getValue();
5445  const APInt &N = NC->getValue()->getValue();
5446  APInt Two(BitWidth, 2);
5447  APInt Four(BitWidth, 4);
5448 
5449  {
5450  using namespace APIntOps;
5451  const APInt& C = L;
5452  // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
5453  // The B coefficient is M-N/2
5454  APInt B(M);
5455  B -= sdiv(N,Two);
5456 
5457  // The A coefficient is N/2
5458  APInt A(N.sdiv(Two));
5459 
5460  // Compute the B^2-4ac term.
5461  APInt SqrtTerm(B);
5462  SqrtTerm *= B;
5463  SqrtTerm -= Four * (A * C);
5464 
5465  if (SqrtTerm.isNegative()) {
5466  // The loop is provably infinite.
5467  const SCEV *CNC = SE.getCouldNotCompute();
5468  return std::make_pair(CNC, CNC);
5469  }
5470 
5471  // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
5472  // integer value or else APInt::sqrt() will assert.
5473  APInt SqrtVal(SqrtTerm.sqrt());
5474 
5475  // Compute the two solutions for the quadratic formula.
5476  // The divisions must be performed as signed divisions.
5477  APInt NegB(-B);
5478  APInt TwoA(A << 1);
5479  if (TwoA.isMinValue()) {
5480  const SCEV *CNC = SE.getCouldNotCompute();
5481  return std::make_pair(CNC, CNC);
5482  }
5483 
5484  LLVMContext &Context = SE.getContext();
5485 
5486  ConstantInt *Solution1 =
5487  ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
5488  ConstantInt *Solution2 =
5489  ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
5490 
5491  return std::make_pair(SE.getConstant(Solution1),
5492  SE.getConstant(Solution2));
5493  } // end APIntOps namespace
5494 }
5495 
5496 /// HowFarToZero - Return the number of times a backedge comparing the specified
5497 /// value to zero will execute. If not computable, return CouldNotCompute.
5498 ///
5499 /// This is only used for loops with a "x != y" exit test. The exit condition is
5500 /// now expressed as a single expression, V = x-y. So the exit test is
5501 /// effectively V != 0. We know and take advantage of the fact that this
5502 /// expression only being used in a comparison by zero context.
5503 ScalarEvolution::ExitLimit
5504 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
5505  // If the value is a constant
5506  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5507  // If the value is already zero, the branch will execute zero times.
5508  if (C->getValue()->isZero()) return C;
5509  return getCouldNotCompute(); // Otherwise it will loop infinitely.
5510  }
5511 
5512  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
5513  if (!AddRec || AddRec->getLoop() != L)
5514  return getCouldNotCompute();
5515 
5516  // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
5517  // the quadratic equation to solve it.
5518  if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
5519  std::pair<const SCEV *,const SCEV *> Roots =
5520  SolveQuadraticEquation(AddRec, *this);
5521  const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5522  const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5523  if (R1 && R2) {
5524 #if 0
5525  dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
5526  << " sol#2: " << *R2 << "\n";
5527 #endif
5528  // Pick the smallest positive root value.
5529  if (ConstantInt *CB =
5530  dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
5531  R1->getValue(),
5532  R2->getValue()))) {
5533  if (CB->getZExtValue() == false)
5534  std::swap(R1, R2); // R1 is the minimum root now.
5535 
5536  // We can only use this value if the chrec ends up with an exact zero
5537  // value at this index. When solving for "X*X != 5", for example, we
5538  // should not accept a root of 2.
5539  const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
5540  if (Val->isZero())
5541  return R1; // We found a quadratic root!
5542  }
5543  }
5544  return getCouldNotCompute();
5545  }
5546 
5547  // Otherwise we can only handle this if it is affine.
5548  if (!AddRec->isAffine())
5549  return getCouldNotCompute();
5550 
5551  // If this is an affine expression, the execution count of this branch is
5552  // the minimum unsigned root of the following equation:
5553  //
5554  // Start + Step*N = 0 (mod 2^BW)
5555  //
5556  // equivalent to:
5557  //
5558  // Step*N = -Start (mod 2^BW)
5559  //
5560  // where BW is the common bit width of Start and Step.
5561 
5562  // Get the initial value for the loop.
5563  const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
5564  const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
5565 
5566  // For now we handle only constant steps.
5567  //
5568  // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
5569  // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
5570  // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
5571  // We have not yet seen any such cases.
5572  const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
5573  if (StepC == 0 || StepC->getValue()->equalsInt(0))
5574  return getCouldNotCompute();
5575 
5576  // For positive steps (counting up until unsigned overflow):
5577  // N = -Start/Step (as unsigned)
5578  // For negative steps (counting down to zero):
5579  // N = Start/-Step
5580  // First compute the unsigned distance from zero in the direction of Step.
5581  bool CountDown = StepC->getValue()->getValue().isNegative();
5582  const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
5583 
5584  // Handle unitary steps, which cannot wraparound.
5585  // 1*N = -Start; -1*N = Start (mod 2^BW), so:
5586  // N = Distance (as unsigned)
5587  if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
5588  ConstantRange CR = getUnsignedRange(Start);
5589  const SCEV *MaxBECount;
5590  if (!CountDown && CR.getUnsignedMin().isMinValue())
5591  // When counting up, the worst starting value is 1, not 0.
5592  MaxBECount = CR.getUnsignedMax().isMinValue()
5594  : getConstant(APInt::getMaxValue(CR.getBitWidth()));
5595  else
5596  MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
5597  : -CR.getUnsignedMin());
5598  return ExitLimit(Distance, MaxBECount);
5599  }
5600 
5601  // If the recurrence is known not to wraparound, unsigned divide computes the
5602  // back edge count. (Ideally we would have an "isexact" bit for udiv). We know
5603  // that the value will either become zero (and thus the loop terminates), that
5604  // the loop will terminate through some other exit condition first, or that
5605  // the loop has undefined behavior. This means we can't "miss" the exit
5606  // value, even with nonunit stride.
5607  //
5608  // This is only valid for expressions that directly compute the loop exit. It
5609  // is invalid for subexpressions in which the loop may exit through this
5610  // branch even if this subexpression is false. In that case, the trip count
5611  // computed by this udiv could be smaller than the number of well-defined
5612  // iterations.
5613  if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW))
5614  return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
5615 
5616  // Then, try to solve the above equation provided that Start is constant.
5617  if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
5618  return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
5619  -StartC->getValue()->getValue(),
5620  *this);
5621  return getCouldNotCompute();
5622 }
5623 
5624 /// HowFarToNonZero - Return the number of times a backedge checking the
5625 /// specified value for nonzero will execute. If not computable, return
5626 /// CouldNotCompute
5627 ScalarEvolution::ExitLimit
5628 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
5629  // Loops that look like: while (X == 0) are very strange indeed. We don't
5630  // handle them yet except for the trivial case. This could be expanded in the
5631  // future as needed.
5632 
5633  // If the value is a constant, check to see if it is known to be non-zero
5634  // already. If so, the backedge will execute zero times.
5635  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5636  if (!C->getValue()->isNullValue())
5637  return getConstant(C->getType(), 0);
5638  return getCouldNotCompute(); // Otherwise it will loop infinitely.
5639  }
5640 
5641  // We could implement others, but I really doubt anyone writes loops like
5642  // this, and if they did, they would already be constant folded.
5643  return getCouldNotCompute();
5644 }
5645 
5646 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
5647 /// (which may not be an immediate predecessor) which has exactly one
5648 /// successor from which BB is reachable, or null if no such block is
5649 /// found.
5650 ///
5651 std::pair<BasicBlock *, BasicBlock *>
5652 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
5653  // If the block has a unique predecessor, then there is no path from the
5654  // predecessor to the block that does not go through the direct edge
5655  // from the predecessor to the block.
5656  if (BasicBlock *Pred = BB->getSinglePredecessor())
5657  return std::make_pair(Pred, BB);
5658 
5659  // A loop's header is defined to be a block that dominates the loop.
5660  // If the header has a unique predecessor outside the loop, it must be
5661  // a block that has exactly one successor that can reach the loop.
5662  if (Loop *L = LI->getLoopFor(BB))
5663  return std::make_pair(L->getLoopPredecessor(), L->getHeader());
5664 
5665  return std::pair<BasicBlock *, BasicBlock *>();
5666 }
5667 
5668 /// HasSameValue - SCEV structural equivalence is usually sufficient for
5669 /// testing whether two expressions are equal, however for the purposes of
5670 /// looking for a condition guarding a loop, it can be useful to be a little
5671 /// more general, since a front-end may have replicated the controlling
5672 /// expression.
5673 ///
5674 static bool HasSameValue(const SCEV *A, const SCEV *B) {
5675  // Quick check to see if they are the same SCEV.
5676  if (A == B) return true;
5677 
5678  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
5679  // two different instructions with the same value. Check for this case.
5680  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
5681  if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
5682  if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
5683  if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
5684  if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
5685  return true;
5686 
5687  // Otherwise assume they may have a different value.
5688  return false;
5689 }
5690 
5691 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
5692 /// predicate Pred. Return true iff any changes were made.
5693 ///
5695  const SCEV *&LHS, const SCEV *&RHS,
5696  unsigned Depth) {
5697  bool Changed = false;
5698 
5699  // If we hit the max recursion limit bail out.
5700  if (Depth >= 3)
5701  return false;
5702 
5703  // Canonicalize a constant to the right side.
5704  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
5705  // Check for both operands constant.
5706  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
5707  if (ConstantExpr::getICmp(Pred,
5708  LHSC->getValue(),
5709  RHSC->getValue())->isNullValue())
5710  goto trivially_false;
5711  else
5712  goto trivially_true;
5713  }
5714  // Otherwise swap the operands to put the constant on the right.
5715  std::swap(LHS, RHS);
5716  Pred = ICmpInst::getSwappedPredicate(Pred);
5717  Changed = true;
5718  }
5719 
5720  // If we're comparing an addrec with a value which is loop-invariant in the
5721  // addrec's loop, put the addrec on the left. Also make a dominance check,
5722  // as both operands could be addrecs loop-invariant in each other's loop.
5723  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
5724  const Loop *L = AR->getLoop();
5725  if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
5726  std::swap(LHS, RHS);
5727  Pred = ICmpInst::getSwappedPredicate(Pred);
5728  Changed = true;
5729  }
5730  }
5731 
5732  // If there's a constant operand, canonicalize comparisons with boundary
5733  // cases, and canonicalize *-or-equal comparisons to regular comparisons.
5734  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
5735  const APInt &RA = RC->getValue()->getValue();
5736  switch (Pred) {
5737  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5738  case ICmpInst::ICMP_EQ:
5739  case ICmpInst::ICMP_NE:
5740  // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
5741  if (!RA)
5742  if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
5743  if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
5744  if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
5745  ME->getOperand(0)->isAllOnesValue()) {
5746  RHS = AE->getOperand(1);
5747  LHS = ME->getOperand(1);
5748  Changed = true;
5749  }
5750  break;
5751  case ICmpInst::ICMP_UGE:
5752  if ((RA - 1).isMinValue()) {
5753  Pred = ICmpInst::ICMP_NE;
5754  RHS = getConstant(RA - 1);
5755  Changed = true;
5756  break;
5757  }
5758  if (RA.isMaxValue()) {
5759  Pred = ICmpInst::ICMP_EQ;
5760  Changed = true;
5761  break;
5762  }
5763  if (RA.isMinValue()) goto trivially_true;
5764 
5765  Pred = ICmpInst::ICMP_UGT;
5766  RHS = getConstant(RA - 1);
5767  Changed = true;
5768  break;
5769  case ICmpInst::ICMP_ULE:
5770  if ((RA + 1).isMaxValue()) {
5771  Pred = ICmpInst::ICMP_NE;
5772  RHS = getConstant(RA + 1);
5773  Changed = true;
5774  break;
5775  }
5776  if (RA.isMinValue()) {
5777  Pred = ICmpInst::ICMP_EQ;
5778  Changed = true;
5779  break;
5780  }
5781  if (RA.isMaxValue()) goto trivially_true;
5782 
5783  Pred = ICmpInst::ICMP_ULT;
5784  RHS = getConstant(RA + 1);
5785  Changed = true;
5786  break;
5787  case ICmpInst::ICMP_SGE:
5788  if ((RA - 1).isMinSignedValue()) {
5789  Pred = ICmpInst::ICMP_NE;
5790  RHS = getConstant(RA - 1);
5791  Changed = true;
5792  break;
5793  }
5794  if (RA.isMaxSignedValue()) {
5795  Pred = ICmpInst::ICMP_EQ;
5796  Changed = true;
5797  break;
5798  }
5799  if (RA.isMinSignedValue()) goto trivially_true;
5800 
5801  Pred = ICmpInst::ICMP_SGT;
5802  RHS = getConstant(RA - 1);
5803  Changed = true;
5804  break;
5805  case ICmpInst::ICMP_SLE:
5806  if ((RA + 1).isMaxSignedValue()) {
5807  Pred = ICmpInst::ICMP_NE;
5808  RHS = getConstant(RA + 1);
5809  Changed = true;
5810  break;
5811  }
5812  if (RA.isMinSignedValue()) {
5813  Pred = ICmpInst::ICMP_EQ;
5814  Changed = true;
5815  break;
5816  }
5817  if (RA.isMaxSignedValue()) goto trivially_true;
5818 
5819  Pred = ICmpInst::ICMP_SLT;
5820  RHS = getConstant(RA + 1);
5821  Changed = true;
5822  break;
5823  case ICmpInst::ICMP_UGT:
5824  if (RA.isMinValue()) {
5825  Pred = ICmpInst::ICMP_NE;
5826  Changed = true;
5827  break;
5828  }
5829  if ((RA + 1).isMaxValue()) {
5830  Pred = ICmpInst::ICMP_EQ;
5831  RHS = getConstant(RA + 1);
5832  Changed = true;
5833  break;
5834  }
5835  if (RA.isMaxValue()) goto trivially_false;
5836  break;
5837  case ICmpInst::ICMP_ULT:
5838  if (RA.isMaxValue()) {
5839  Pred = ICmpInst::ICMP_NE;
5840  Changed = true;
5841  break;
5842  }
5843  if ((RA - 1).isMinValue()) {
5844  Pred = ICmpInst::ICMP_EQ;
5845  RHS = getConstant(RA - 1);
5846  Changed = true;
5847  break;
5848  }
5849  if (RA.isMinValue()) goto trivially_false;
5850  break;
5851  case ICmpInst::ICMP_SGT:
5852  if (RA.isMinSignedValue()) {
5853  Pred = ICmpInst::ICMP_NE;
5854  Changed = true;
5855  break;
5856  }
5857  if ((RA + 1).isMaxSignedValue()) {
5858  Pred = ICmpInst::ICMP_EQ;
5859  RHS = getConstant(RA + 1);
5860  Changed = true;
5861  break;
5862  }
5863  if (RA.isMaxSignedValue()) goto trivially_false;
5864  break;
5865  case ICmpInst::ICMP_SLT:
5866  if (RA.isMaxSignedValue()) {
5867  Pred = ICmpInst::ICMP_NE;
5868  Changed = true;
5869  break;
5870  }
5871  if ((RA - 1).isMinSignedValue()) {
5872  Pred = ICmpInst::ICMP_EQ;
5873  RHS = getConstant(RA - 1);
5874  Changed = true;
5875  break;
5876  }
5877  if (RA.isMinSignedValue()) goto trivially_false;
5878  break;
5879  }
5880  }
5881 
5882  // Check for obvious equality.
5883  if (HasSameValue(LHS, RHS)) {
5884  if (ICmpInst::isTrueWhenEqual(Pred))
5885  goto trivially_true;
5886  if (ICmpInst::isFalseWhenEqual(Pred))
5887  goto trivially_false;
5888  }
5889 
5890  // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5891  // adding or subtracting 1 from one of the operands.
5892  switch (Pred) {
5893  case ICmpInst::ICMP_SLE:
5895  RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5896  SCEV::FlagNSW);
5897  Pred = ICmpInst::ICMP_SLT;
5898  Changed = true;
5899  } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5900  LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5901  SCEV::FlagNSW);
5902  Pred = ICmpInst::ICMP_SLT;
5903  Changed = true;
5904  }
5905  break;
5906  case ICmpInst::ICMP_SGE:
5908  RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5909  SCEV::FlagNSW);
5910  Pred = ICmpInst::ICMP_SGT;
5911  Changed = true;
5912  } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5913  LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5914  SCEV::FlagNSW);
5915  Pred = ICmpInst::ICMP_SGT;
5916  Changed = true;
5917  }
5918  break;
5919  case ICmpInst::ICMP_ULE:
5920  if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5921  RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5922  SCEV::FlagNUW);
5923  Pred = ICmpInst::ICMP_ULT;
5924  Changed = true;
5925  } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5926  LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5927  SCEV::FlagNUW);
5928  Pred = ICmpInst::ICMP_ULT;
5929  Changed = true;
5930  }
5931  break;
5932  case ICmpInst::ICMP_UGE:
5933  if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5934  RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5935  SCEV::FlagNUW);
5936  Pred = ICmpInst::ICMP_UGT;
5937  Changed = true;
5938  } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5939  LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5940  SCEV::FlagNUW);
5941  Pred = ICmpInst::ICMP_UGT;
5942  Changed = true;
5943  }
5944  break;
5945  default:
5946  break;
5947  }
5948 
5949  // TODO: More simplifications are possible here.
5950 
5951  // Recursively simplify until we either hit a recursion limit or nothing
5952  // changes.
5953  if (Changed)
5954  return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
5955 
5956  return Changed;
5957 
5958 trivially_true:
5959  // Return 0 == 0.
5961  Pred = ICmpInst::ICMP_EQ;
5962  return true;
5963 
5964 trivially_false:
5965  // Return 0 != 0.
5967  Pred = ICmpInst::ICMP_NE;
5968  return true;
5969 }
5970 
5972  return getSignedRange(S).getSignedMax().isNegative();
5973 }
5974 
5977 }
5978 
5980  return !getSignedRange(S).getSignedMin().isNegative();
5981 }
5982 
5985 }
5986 
5988  return isKnownNegative(S) || isKnownPositive(S);
5989 }
5990 
5992  const SCEV *LHS, const SCEV *RHS) {
5993  // Canonicalize the inputs first.
5994  (void)SimplifyICmpOperands(Pred, LHS, RHS);
5995 
5996  // If LHS or RHS is an addrec, check to see if the condition is true in
5997  // every iteration of the loop.
5998  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
6000  AR->getLoop(), Pred, AR->getStart(), RHS) &&
6002  AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
6003  return true;
6004  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
6006  AR->getLoop(), Pred, LHS, AR->getStart()) &&
6008  AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
6009  return true;
6010 
6011  // Otherwise see what can be done with known constant ranges.
6012  return isKnownPredicateWithRanges(Pred, LHS, RHS);
6013 }
6014 
6015 bool
6016 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
6017  const SCEV *LHS, const SCEV *RHS) {
6018  if (HasSameValue(LHS, RHS))
6019  return ICmpInst::isTrueWhenEqual(Pred);
6020 
6021  // This code is split out from isKnownPredicate because it is called from
6022  // within isLoopEntryGuardedByCond.
6023  switch (Pred) {
6024  default:
6025  llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6026  case ICmpInst::ICMP_SGT:
6027  Pred = ICmpInst::ICMP_SLT;
6028  std::swap(LHS, RHS);
6029  case ICmpInst::ICMP_SLT: {
6030  ConstantRange LHSRange = getSignedRange(LHS);
6031  ConstantRange RHSRange = getSignedRange(RHS);
6032  if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
6033  return true;
6034  if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
6035  return false;
6036  break;
6037  }
6038  case ICmpInst::ICMP_SGE:
6039  Pred = ICmpInst::ICMP_SLE;
6040  std::swap(LHS, RHS);
6041  case ICmpInst::ICMP_SLE: {
6042  ConstantRange LHSRange = getSignedRange(LHS);
6043  ConstantRange RHSRange = getSignedRange(RHS);
6044  if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
6045  return true;
6046  if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
6047  return false;
6048  break;
6049  }
6050  case ICmpInst::ICMP_UGT:
6051  Pred = ICmpInst::ICMP_ULT;
6052  std::swap(LHS, RHS);
6053  case ICmpInst::ICMP_ULT: {
6054  ConstantRange LHSRange = getUnsignedRange(LHS);
6055  ConstantRange RHSRange = getUnsignedRange(RHS);
6056  if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
6057  return true;
6058  if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
6059  return false;
6060  break;
6061  }
6062  case ICmpInst::ICMP_UGE:
6063  Pred = ICmpInst::ICMP_ULE;
6064  std::swap(LHS, RHS);
6065  case ICmpInst::ICMP_ULE: {
6066  ConstantRange LHSRange = getUnsignedRange(LHS);
6067  ConstantRange RHSRange = getUnsignedRange(RHS);
6068  if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
6069  return true;
6070  if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
6071  return false;
6072  break;
6073  }
6074  case ICmpInst::ICMP_NE: {
6076  return true;
6078  return true;
6079 
6080  const SCEV *Diff = getMinusSCEV(LHS, RHS);
6081  if (isKnownNonZero(Diff))
6082  return true;
6083  break;
6084  }
6085  case ICmpInst::ICMP_EQ:
6086  // The check at the top of the function catches the case where
6087  // the values are known to be equal.
6088  break;
6089  }
6090  return false;
6091 }
6092 
6093 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
6094 /// protected by a conditional between LHS and RHS. This is used to
6095 /// to eliminate casts.
6096 bool
6098  ICmpInst::Predicate Pred,
6099  const SCEV *LHS, const SCEV *RHS) {
6100  // Interpret a null as meaning no loop, where there is obviously no guard
6101  // (interprocedural conditions notwithstanding).
6102  if (!L) return true;
6103 
6104  BasicBlock *Latch = L->getLoopLatch();
6105  if (!Latch)
6106  return false;
6107 
6108  BranchInst *LoopContinuePredicate =
6109  dyn_cast<BranchInst>(Latch->getTerminator());
6110  if (!LoopContinuePredicate ||
6111  LoopContinuePredicate->isUnconditional())
6112  return false;
6113 
6114  return isImpliedCond(Pred, LHS, RHS,
6115  LoopContinuePredicate->getCondition(),
6116  LoopContinuePredicate->getSuccessor(0) != L->getHeader());
6117 }
6118 
6119 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
6120 /// by a conditional between LHS and RHS. This is used to help avoid max
6121 /// expressions in loop trip counts, and to eliminate casts.
6122 bool
6124  ICmpInst::Predicate Pred,
6125  const SCEV *LHS, const SCEV *RHS) {
6126  // Interpret a null as meaning no loop, where there is obviously no guard
6127  // (interprocedural conditions notwithstanding).
6128  if (!L) return false;
6129 
6130  // Starting at the loop predecessor, climb up the predecessor chain, as long
6131  // as there are predecessors that can be found that have unique successors
6132  // leading to the original header.
6133  for (std::pair<BasicBlock *, BasicBlock *>
6134  Pair(L->getLoopPredecessor(), L->getHeader());
6135  Pair.first;
6136  Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
6137 
6138  BranchInst *LoopEntryPredicate =
6139  dyn_cast<BranchInst>(Pair.first->getTerminator());
6140  if (!LoopEntryPredicate ||
6141  LoopEntryPredicate->isUnconditional())
6142  continue;
6143 
6144  if (isImpliedCond(Pred, LHS, RHS,
6145  LoopEntryPredicate->getCondition(),
6146  LoopEntryPredicate->getSuccessor(0) != Pair.second))
6147  return true;
6148  }
6149 
6150  return false;
6151 }
6152 
6153 /// RAII wrapper to prevent recursive application of isImpliedCond.
6154 /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
6155 /// currently evaluating isImpliedCond.
6159  bool Pending;
6160 
6162  : Cond(C), LoopPreds(LP) {
6163  Pending = !LoopPreds.insert(Cond).second;
6164  }
6166  if (!Pending)
6167  LoopPreds.erase(Cond);
6168  }
6169 };
6170 
6171 /// isImpliedCond - Test whether the condition described by Pred, LHS,
6172 /// and RHS is true whenever the given Cond value evaluates to true.
6173 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
6174  const SCEV *LHS, const SCEV *RHS,
6175  Value *FoundCondValue,
6176  bool Inverse) {
6177  MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
6178  if (Mark.Pending)
6179  return false;
6180 
6181  // Recursively handle And and Or conditions.
6182  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
6183  if (BO->getOpcode() == Instruction::And) {
6184  if (!Inverse)
6185  return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6186  isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6187  } else if (BO->getOpcode() == Instruction::Or) {
6188  if (Inverse)
6189  return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6190  isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6191  }
6192  }
6193 
6194  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
6195  if (!ICI) return false;
6196 
6197  // Bail if the ICmp's operands' types are wider than the needed type
6198  // before attempting to call getSCEV on them. This avoids infinite
6199  // recursion, since the analysis of widening casts can require loop
6200  // exit condition information for overflow checking, which would
6201  // lead back here.
6202  if (getTypeSizeInBits(LHS->getType()) <
6203  getTypeSizeInBits(ICI->getOperand(0)->getType()))
6204  return false;
6205 
6206  // Now that we found a conditional branch that dominates the loop or controls
6207  // the loop latch. Check to see if it is the comparison we are looking for.
6208  ICmpInst::Predicate FoundPred;
6209  if (Inverse)
6210  FoundPred = ICI->getInversePredicate();
6211  else
6212  FoundPred = ICI->getPredicate();
6213 
6214  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
6215  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
6216 
6217  // Balance the types. The case where FoundLHS' type is wider than
6218  // LHS' type is checked for above.
6219  if (getTypeSizeInBits(LHS->getType()) >
6220  getTypeSizeInBits(FoundLHS->getType())) {
6221  if (CmpInst::isSigned(Pred)) {
6222  FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
6223  FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
6224  } else {
6225  FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
6226  FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
6227  }
6228  }
6229 
6230  // Canonicalize the query to match the way instcombine will have
6231  // canonicalized the comparison.
6232  if (SimplifyICmpOperands(Pred, LHS, RHS))
6233  if (LHS == RHS)
6234  return CmpInst::isTrueWhenEqual(Pred);
6235  if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
6236  if (FoundLHS == FoundRHS)
6237  return CmpInst::isFalseWhenEqual(FoundPred);
6238 
6239  // Check to see if we can make the LHS or RHS match.
6240  if (LHS == FoundRHS || RHS == FoundLHS) {
6241  if (isa<SCEVConstant>(RHS)) {
6242  std::swap(FoundLHS, FoundRHS);
6243  FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
6244  } else {
6245  std::swap(LHS, RHS);
6246  Pred = ICmpInst::getSwappedPredicate(Pred);
6247  }
6248  }
6249 
6250  // Check whether the found predicate is the same as the desired predicate.
6251  if (FoundPred == Pred)
6252  return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
6253 
6254  // Check whether swapping the found predicate makes it the same as the
6255  // desired predicate.
6256  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
6257  if (isa<SCEVConstant>(RHS))
6258  return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
6259  else
6260  return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
6261  RHS, LHS, FoundLHS, FoundRHS);
6262  }
6263 
6264  // Check whether the actual condition is beyond sufficient.
6265  if (FoundPred == ICmpInst::ICMP_EQ)
6266  if (ICmpInst::isTrueWhenEqual(Pred))
6267  if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
6268  return true;
6269  if (Pred == ICmpInst::ICMP_NE)
6270  if (!ICmpInst::isTrueWhenEqual(FoundPred))
6271  if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
6272  return true;
6273 
6274  // Otherwise assume the worst.
6275  return false;
6276 }
6277 
6278 /// isImpliedCondOperands - Test whether the condition described by Pred,
6279 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
6280 /// and FoundRHS is true.
6281 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
6282  const SCEV *LHS, const SCEV *RHS,
6283  const SCEV *FoundLHS,
6284  const SCEV *FoundRHS) {
6285  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
6286  FoundLHS, FoundRHS) ||
6287  // ~x < ~y --> x > y
6288  isImpliedCondOperandsHelper(Pred, LHS, RHS,
6289  getNotSCEV(FoundRHS),
6290  getNotSCEV(FoundLHS));
6291 }
6292 
6293 /// isImpliedCondOperandsHelper - Test whether the condition described by
6294 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
6295 /// FoundLHS, and FoundRHS is true.
6296 bool
6297 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
6298  const SCEV *LHS, const SCEV *RHS,
6299  const SCEV *FoundLHS,
6300  const SCEV *FoundRHS) {
6301  switch (Pred) {
6302  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6303  case ICmpInst::ICMP_EQ:
6304  case ICmpInst::ICMP_NE:
6305  if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
6306  return true;
6307  break;
6308  case ICmpInst::ICMP_SLT:
6309  case ICmpInst::ICMP_SLE:
6310  if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
6311  isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
6312  return true;
6313  break;
6314  case ICmpInst::ICMP_SGT:
6315  case ICmpInst::ICMP_SGE:
6316  if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
6317  isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
6318  return true;
6319  break;
6320  case ICmpInst::ICMP_ULT:
6321  case ICmpInst::ICMP_ULE:
6322  if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
6323  isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
6324  return true;
6325  break;
6326  case ICmpInst::ICMP_UGT:
6327  case ICmpInst::ICMP_UGE:
6328  if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
6329  isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
6330  return true;
6331  break;
6332  }
6333 
6334  return false;
6335 }
6336 
6337 // Verify if an linear IV with positive stride can overflow when in a
6338 // less-than comparison, knowing the invariant term of the comparison, the
6339 // stride and the knowledge of NSW/NUW flags on the recurrence.
6340 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
6341  bool IsSigned, bool NoWrap) {
6342  if (NoWrap) return false;
6343 
6344  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6345  const SCEV *One = getConstant(Stride->getType(), 1);
6346 
6347  if (IsSigned) {
6348  APInt MaxRHS = getSignedRange(RHS).getSignedMax();
6349  APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
6350  APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6351  .getSignedMax();
6352 
6353  // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
6354  return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
6355  }
6356 
6357  APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
6358  APInt MaxValue = APInt::getMaxValue(BitWidth);
6359  APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6360  .getUnsignedMax();
6361 
6362  // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
6363  return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
6364 }
6365 
6366 // Verify if an linear IV with negative stride can overflow when in a
6367 // greater-than comparison, knowing the invariant term of the comparison,
6368 // the stride and the knowledge of NSW/NUW flags on the recurrence.
6369 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
6370  bool IsSigned, bool NoWrap) {
6371  if (NoWrap) return false;
6372 
6373  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6374  const SCEV *One = getConstant(Stride->getType(), 1);
6375 
6376  if (IsSigned) {
6377  APInt MinRHS = getSignedRange(RHS).getSignedMin();
6378  APInt MinValue = APInt::getSignedMinValue(BitWidth);
6379  APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6380  .getSignedMax();
6381 
6382  // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
6383  return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
6384  }
6385 
6386  APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
6387  APInt MinValue = APInt::getMinValue(BitWidth);
6388  APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6389  .getUnsignedMax();
6390 
6391  // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
6392  return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
6393 }
6394 
6395 // Compute the backedge taken count knowing the interval difference, the
6396 // stride and presence of the equality in the comparison.
6397 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
6398  bool Equality) {
6399  const SCEV *One = getConstant(Step->getType(), 1);
6400  Delta = Equality ? getAddExpr(Delta, Step)
6401  : getAddExpr(Delta, getMinusSCEV(Step, One));
6402  return getUDivExpr(Delta, Step);
6403 }
6404 
6405 /// HowManyLessThans - Return the number of times a backedge containing the
6406 /// specified less-than comparison will execute. If not computable, return
6407 /// CouldNotCompute.
6408 ///
6409 /// @param IsSubExpr is true when the LHS < RHS condition does not directly
6410 /// control the branch. In this case, we can only compute an iteration count for
6411 /// a subexpression that cannot overflow before evaluating true.
6412 ScalarEvolution::ExitLimit
6413 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
6414  const Loop *L, bool IsSigned,
6415  bool IsSubExpr) {
6416  // We handle only IV < Invariant
6417  if (!isLoopInvariant(RHS, L))
6418  return getCouldNotCompute();
6419 
6420  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
6421 
6422  // Avoid weird loops
6423  if (!IV || IV->getLoop() != L || !IV->isAffine())
6424  return getCouldNotCompute();
6425 
6426  bool NoWrap = !IsSubExpr &&
6427  IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
6428 
6429  const SCEV *Stride = IV->getStepRecurrence(*this);
6430 
6431  // Avoid negative or zero stride values
6432  if (!isKnownPositive(Stride))
6433  return getCouldNotCompute();
6434 
6435  // Avoid proven overflow cases: this will ensure that the backedge taken count
6436  // will not generate any unsigned overflow. Relaxed no-overflow conditions
6437  // exploit NoWrapFlags, allowing to optimize in presence of undefined
6438  // behaviors like the case of C language.
6439  if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
6440  return getCouldNotCompute();
6441 
6442  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
6444  const SCEV *Start = IV->getStart();
6445  const SCEV *End = RHS;
6446  if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
6447  End = IsSigned ? getSMaxExpr(RHS, Start)
6448  : getUMaxExpr(RHS, Start);
6449 
6450  const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
6451 
6452  APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
6453  : getUnsignedRange(Start).getUnsignedMin();
6454 
6455  APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6456  : getUnsignedRange(Stride).getUnsignedMin();
6457 
6458  unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6459  APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
6460  : APInt::getMaxValue(BitWidth) - (MinStride - 1);
6461 
6462  // Although End can be a MAX expression we estimate MaxEnd considering only
6463  // the case End = RHS. This is safe because in the other case (End - Start)
6464  // is zero, leading to a zero maximum backedge taken count.
6465  APInt MaxEnd =
6466  IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
6467  : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
6468 
6469  const SCEV *MaxBECount = getCouldNotCompute();
6470  if (isa<SCEVConstant>(BECount))
6471  MaxBECount = BECount;
6472  else
6473  MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
6474  getConstant(MinStride), false);
6475 
6476  if (isa<SCEVCouldNotCompute>(MaxBECount))
6477  MaxBECount = BECount;
6478 
6479  return ExitLimit(BECount, MaxBECount);
6480 }
6481 
6482 ScalarEvolution::ExitLimit
6483 ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
6484  const Loop *L, bool IsSigned,
6485  bool IsSubExpr) {
6486  // We handle only IV > Invariant
6487  if (!isLoopInvariant(RHS, L))
6488  return getCouldNotCompute();
6489 
6490  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
6491 
6492  // Avoid weird loops
6493  if (!IV || IV->getLoop() != L || !IV->isAffine())
6494  return getCouldNotCompute();
6495 
6496  bool NoWrap = !IsSubExpr &&
6497  IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
6498 
6499  const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
6500 
6501  // Avoid negative or zero stride values
6502  if (!isKnownPositive(Stride))
6503  return getCouldNotCompute();
6504 
6505  // Avoid proven overflow cases: this will ensure that the backedge taken count
6506  // will not generate any unsigned overflow. Relaxed no-overflow conditions
6507  // exploit NoWrapFlags, allowing to optimize in presence of undefined
6508  // behaviors like the case of C language.
6509  if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
6510  return getCouldNotCompute();
6511 
6512  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
6514 
6515  const SCEV *Start = IV->getStart();
6516  const SCEV *End = RHS;
6517  if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
6518  End = IsSigned ? getSMinExpr(RHS, Start)
6519  : getUMinExpr(RHS, Start);
6520 
6521  const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
6522 
6523  APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
6524  : getUnsignedRange(Start).getUnsignedMax();
6525 
6526  APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6527  : getUnsignedRange(Stride).getUnsignedMin();
6528 
6529  unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6530  APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
6531  : APInt::getMinValue(BitWidth) + (MinStride - 1);
6532 
6533  // Although End can be a MIN expression we estimate MinEnd considering only
6534  // the case End = RHS. This is safe because in the other case (Start - End)
6535  // is zero, leading to a zero maximum backedge taken count.
6536  APInt MinEnd =
6537  IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
6538  : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
6539 
6540 
6541  const SCEV *MaxBECount = getCouldNotCompute();
6542  if (isa<SCEVConstant>(BECount))
6543  MaxBECount = BECount;
6544  else
6545  MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
6546  getConstant(MinStride), false);
6547 
6548  if (isa<SCEVCouldNotCompute>(MaxBECount))
6549  MaxBECount = BECount;
6550 
6551  return ExitLimit(BECount, MaxBECount);
6552 }
6553 
6554 /// getNumIterationsInRange - Return the number of iterations of this loop that
6555 /// produce values in the specified constant range. Another way of looking at
6556 /// this is that it returns the first iteration number where the value is not in
6557 /// the condition, thus computing the exit count. If the iteration count can't
6558 /// be computed, an instance of SCEVCouldNotCompute is returned.
6560  ScalarEvolution &SE) const {
6561  if (Range.isFullSet()) // Infinite loop.
6562  return SE.getCouldNotCompute();
6563 
6564  // If the start is a non-zero constant, shift the range to simplify things.
6565  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
6566  if (!SC->getValue()->isZero()) {
6568  Operands[0] = SE.getConstant(SC->getType(), 0);
6569  const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
6571  if (const SCEVAddRecExpr *ShiftedAddRec =
6572  dyn_cast<SCEVAddRecExpr>(Shifted))
6573  return ShiftedAddRec->getNumIterationsInRange(
6574  Range.subtract(SC->getValue()->getValue()), SE);
6575  // This is strange and shouldn't happen.
6576  return SE.getCouldNotCompute();
6577  }
6578 
6579  // The only time we can solve this is when we have all constant indices.
6580  // Otherwise, we cannot determine the overflow conditions.
6581  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6582  if (!isa<SCEVConstant>(getOperand(i)))
6583  return SE.getCouldNotCompute();
6584 
6585 
6586  // Okay at this point we know that all elements of the chrec are constants and
6587  // that the start element is zero.
6588 
6589  // First check to see if the range contains zero. If not, the first
6590  // iteration exits.
6591  unsigned BitWidth = SE.getTypeSizeInBits(getType());
6592  if (!Range.contains(APInt(BitWidth, 0)))
6593  return SE.getConstant(getType(), 0);
6594 
6595  if (isAffine()) {
6596  // If this is an affine expression then we have this situation:
6597  // Solve {0,+,A} in Range === Ax in Range
6598 
6599  // We know that zero is in the range. If A is positive then we know that
6600  // the upper value of the range must be the first possible exit value.
6601  // If A is negative then the lower of the range is the last possible loop
6602  // value. Also note that we already checked for a full range.
6603  APInt One(BitWidth,1);
6604  APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
6605  APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
6606 
6607  // The exit value should be (End+A)/A.
6608  APInt ExitVal = (End + A).udiv(A);
6609  ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
6610 
6611  // Evaluate at the exit value. If we really did fall out of the valid
6612  // range, then we computed our trip count, otherwise wrap around or other
6613  // things must have happened.
6614  ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
6615  if (Range.contains(Val->getValue()))
6616  return SE.getCouldNotCompute(); // Something strange happened
6617 
6618  // Ensure that the previous value is in the range. This is a sanity check.
6619  assert(Range.contains(
6621  ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
6622  "Linear scev computation is off in a bad way!");
6623  return SE.getConstant(ExitValue);
6624  } else if (isQuadratic()) {
6625  // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
6626  // quadratic equation to solve it. To do this, we must frame our problem in
6627  // terms of figuring out when zero is crossed, instead of when
6628  // Range.getUpper() is crossed.
6630  NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
6631  const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
6632  // getNoWrapFlags(FlagNW)
6633  FlagAnyWrap);
6634 
6635  // Next, solve the constructed addrec
6636  std::pair<const SCEV *,const SCEV *> Roots =
6637  SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
6638  const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
6639  const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
6640  if (R1) {
6641  // Pick the smallest positive root value.
6642  if (ConstantInt *CB =
6643  dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
6644  R1->getValue(), R2->getValue()))) {
6645  if (CB->getZExtValue() == false)
6646  std::swap(R1, R2); // R1 is the minimum root now.
6647 
6648  // Make sure the root is not off by one. The returned iteration should
6649  // not be in the range, but the previous one should be. When solving
6650  // for "X*X < 5", for example, we should not return a root of 2.
6652  R1->getValue(),
6653  SE);
6654  if (Range.contains(R1Val->getValue())) {
6655  // The next iteration must be out of the range...
6656  ConstantInt *NextVal =
6657  ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
6658 
6659  R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6660  if (!Range.contains(R1Val->getValue()))
6661  return SE.getConstant(NextVal);
6662  return SE.getCouldNotCompute(); // Something strange happened
6663  }
6664 
6665  // If R1 was not in the range, then it is a good return value. Make
6666  // sure that R1-1 WAS in the range though, just in case.
6667  ConstantInt *NextVal =
6668  ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
6669  R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6670  if (Range.contains(R1Val->getValue()))
6671  return R1;
6672  return SE.getCouldNotCompute(); // Something strange happened
6673  }
6674  }
6675  }
6676 
6677  return SE.getCouldNotCompute();
6678 }
6679 
6680 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
6681  APInt A = C1->getValue()->getValue().abs();
6682  APInt B = C2->getValue()->getValue().abs();
6683  uint32_t ABW = A.getBitWidth();
6684  uint32_t BBW = B.getBitWidth();
6685 
6686  if (ABW > BBW)
6687  B = B.zext(ABW);
6688  else if (ABW < BBW)
6689  A = A.zext(BBW);
6690 
6691  return APIntOps::GreatestCommonDivisor(A, B);
6692 }
6693 
6694 static const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2) {
6695  APInt A = C1->getValue()->getValue();
6696  APInt B = C2->getValue()->getValue();
6697  uint32_t ABW = A.getBitWidth();
6698  uint32_t BBW = B.getBitWidth();
6699 
6700  if (ABW > BBW)
6701  B = B.sext(ABW);
6702  else if (ABW < BBW)
6703  A = A.sext(BBW);
6704 
6705  return APIntOps::srem(A, B);
6706 }
6707 
6708 static const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2) {
6709  APInt A = C1->getValue()->getValue();
6710  APInt B = C2->getValue()->getValue();
6711  uint32_t ABW = A.getBitWidth();
6712  uint32_t BBW = B.getBitWidth();
6713 
6714  if (ABW > BBW)
6715  B = B.sext(ABW);
6716  else if (ABW < BBW)
6717  A = A.sext(BBW);
6718 
6719  return APIntOps::sdiv(A, B);
6720 }
6721 
6722 namespace {
6723 struct SCEVGCD : public SCEVVisitor<SCEVGCD, const SCEV *> {
6724 public:
6725  // Pattern match Step into Start. When Step is a multiply expression, find
6726  // the largest subexpression of Step that appears in Start. When Start is an
6727  // add expression, try to match Step in the subexpressions of Start, non
6728  // matching subexpressions are returned under Remainder.
6729  static const SCEV *findGCD(ScalarEvolution &SE, const SCEV *Start,
6730  const SCEV *Step, const SCEV **Remainder) {
6731  assert(Remainder && "Remainder should not be NULL");
6732  SCEVGCD R(SE, Step, SE.getConstant(Step->getType(), 0));
6733  const SCEV *Res = R.visit(Start);
6734  *Remainder = R.Remainder;
6735  return Res;
6736  }
6737 
6738  SCEVGCD(ScalarEvolution &S, const SCEV *G, const SCEV *R)
6739  : SE(S), GCD(G), Remainder(R) {
6740  Zero = SE.getConstant(GCD->getType(), 0);
6741  One = SE.getConstant(GCD->getType(), 1);
6742  }
6743 
6744  const SCEV *visitConstant(const SCEVConstant *Constant) {
6745  if (GCD == Constant || Constant == Zero)
6746  return GCD;
6747 
6748  if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD)) {
6749  const SCEV *Res = SE.getConstant(gcd(Constant, CGCD));
6750  if (Res != One)
6751  return Res;
6752 
6753  Remainder = SE.getConstant(srem(Constant, CGCD));
6754  Constant = cast<SCEVConstant>(SE.getMinusSCEV(Constant, Remainder));
6755  Res = SE.getConstant(gcd(Constant, CGCD));
6756  return Res;
6757  }
6758 
6759  // When GCD is not a constant, it could be that the GCD is an Add, Mul,
6760  // AddRec, etc., in which case we want to find out how many times the
6761  // Constant divides the GCD: we then return that as the new GCD.
6762  const SCEV *Rem = Zero;
6763  const SCEV *Res = findGCD(SE, GCD, Constant, &Rem);
6764 
6765  if (Res == One || Rem != Zero) {
6766  Remainder = Constant;
6767  return One;
6768  }
6769 
6770  assert(isa<SCEVConstant>(Res) && "Res should be a constant");
6771  Remainder = SE.getConstant(srem(Constant, cast<SCEVConstant>(Res)));
6772  return Res;
6773  }
6774 
6775  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
6776  if (GCD != Expr)
6777  Remainder = Expr;
6778  return GCD;
6779  }
6780 
6781  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
6782  if (GCD != Expr)
6783  Remainder = Expr;
6784  return GCD;
6785  }
6786 
6787  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
6788  if (GCD != Expr)
6789  Remainder = Expr;
6790  return GCD;
6791  }
6792 
6793  const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
6794  if (GCD == Expr)
6795  return GCD;
6796 
6797  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6798  const SCEV *Rem = Zero;
6799  const SCEV *Res = findGCD(SE, Expr->getOperand(e - 1 - i), GCD, &Rem);
6800 
6801  // FIXME: There may be ambiguous situations: for instance,
6802  // GCD(-4 + (3 * %m), 2 * %m) where 2 divides -4 and %m divides (3 * %m).
6803  // The order in which the AddExpr is traversed computes a different GCD
6804  // and Remainder.
6805  if (Res != One)
6806  GCD = Res;
6807  if (Rem != Zero)
6808  Remainder = SE.getAddExpr(Remainder, Rem);
6809  }
6810 
6811  return GCD;
6812  }
6813 
6814  const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
6815  if (GCD == Expr)
6816  return GCD;
6817 
6818  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6819  if (Expr->getOperand(i) == GCD)
6820  return GCD;
6821  }
6822 
6823  // If we have not returned yet, it means that GCD is not part of Expr.
6824  const SCEV *PartialGCD = One;
6825  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6826  const SCEV *Rem = Zero;
6827  const SCEV *Res = findGCD(SE, Expr->getOperand(i), GCD, &Rem);
6828  if (Rem != Zero)
6829  // GCD does not divide Expr->getOperand(i).
6830  continue;
6831 
6832  if (Res == GCD)
6833  return GCD;
6834  PartialGCD = SE.getMulExpr(PartialGCD, Res);
6835  if (PartialGCD == GCD)
6836  return GCD;
6837  }
6838 
6839  if (PartialGCD != One)
6840  return PartialGCD;
6841 
6842  Remainder = Expr;
6843  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(GCD);
6844  if (!Mul)
6845  return PartialGCD;
6846 
6847  // When the GCD is a multiply expression, try to decompose it:
6848  // this occurs when Step does not divide the Start expression
6849  // as in: {(-4 + (3 * %m)),+,(2 * %m)}
6850  for (int i = 0, e = Mul->getNumOperands(); i < e; ++i) {
6851  const SCEV *Rem = Zero;
6852  const SCEV *Res = findGCD(SE, Expr, Mul->getOperand(i), &Rem);
6853  if (Rem == Zero) {
6854  Remainder = Rem;
6855  return Res;
6856  }
6857  }
6858 
6859  return PartialGCD;
6860  }
6861 
6862  const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
6863  if (GCD != Expr)
6864  Remainder = Expr;
6865  return GCD;
6866  }
6867 
6868  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
6869  if (GCD == Expr)
6870  return GCD;
6871 
6872  if (!Expr->isAffine()) {
6873  Remainder = Expr;
6874  return GCD;
6875  }
6876 
6877  const SCEV *Rem = Zero;
6878  const SCEV *Res = findGCD(SE, Expr->getOperand(0), GCD, &Rem);
6879  if (Rem != Zero)
6880  Remainder = SE.getAddExpr(Remainder, Rem);
6881 
6882  Rem = Zero;
6883  Res = findGCD(SE, Expr->getOperand(1), Res, &Rem);
6884  if (Rem != Zero) {
6885  Remainder = Expr;
6886  return GCD;
6887  }
6888 
6889  return Res;
6890  }
6891 
6892  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
6893  if (GCD != Expr)
6894  Remainder = Expr;
6895  return GCD;
6896  }
6897 
6898  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
6899  if (GCD != Expr)
6900  Remainder = Expr;
6901  return GCD;
6902  }
6903 
6904  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
6905  if (GCD != Expr)
6906  Remainder = Expr;
6907  return GCD;
6908  }
6909 
6910  const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
6911  return One;
6912  }
6913 
6914 private:
6915  ScalarEvolution &SE;
6916  const SCEV *GCD, *Remainder, *Zero, *One;
6917 };
6918 
6919 struct SCEVDivision : public SCEVVisitor<SCEVDivision, const SCEV *> {
6920 public:
6921  // Remove from Start all multiples of Step.
6922  static const SCEV *divide(ScalarEvolution &SE, const SCEV *Start,
6923  const SCEV *Step) {
6924  SCEVDivision D(SE, Step);
6925  const SCEV *Rem = D.Zero;
6926  (void)Rem;
6927  // The division is guaranteed to succeed: Step should divide Start with no
6928  // remainder.
6929  assert(Step == SCEVGCD::findGCD(SE, Start, Step, &Rem) && Rem == D.Zero &&
6930  "Step should divide Start with no remainder.");
6931  return D.visit(Start);
6932  }
6933 
6934  SCEVDivision(ScalarEvolution &S, const SCEV *G) : SE(S), GCD(G) {
6935  Zero = SE.getConstant(GCD->getType(), 0);
6936  One = SE.getConstant(GCD->getType(), 1);
6937  }
6938 
6939  const SCEV *visitConstant(const SCEVConstant *Constant) {
6940  if (GCD == Constant)
6941  return One;
6942 
6943  if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD))
6944  return SE.getConstant(sdiv(Constant, CGCD));
6945  return Constant;
6946  }
6947 
6948  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
6949  if (GCD == Expr)
6950  return One;
6951  return Expr;
6952  }
6953 
6954  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
6955  if (GCD == Expr)
6956  return One;
6957  return Expr;
6958  }
6959 
6960  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
6961  if (GCD == Expr)
6962  return One;
6963  return Expr;
6964  }
6965 
6966  const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
6967  if (GCD == Expr)
6968  return One;
6969 
6971  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
6972  Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
6973 
6974  if (Operands.size() == 1)
6975  return Operands[0];
6976  return SE.getAddExpr(Operands);
6977  }
6978 
6979  const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
6980  if (GCD == Expr)
6981  return One;
6982 
6983  bool FoundGCDTerm = false;
6984  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
6985  if (Expr->getOperand(i) == GCD)
6986  FoundGCDTerm = true;
6987 
6989  if (FoundGCDTerm) {
6990  FoundGCDTerm = false;
6991  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6992  if (FoundGCDTerm)
6993  Operands.push_back(Expr->getOperand(i));
6994  else if (Expr->getOperand(i) == GCD)
6995  FoundGCDTerm = true;
6996  else
6997  Operands.push_back(Expr->getOperand(i));
6998  }
6999  } else {
7000  FoundGCDTerm = false;
7001  const SCEV *PartialGCD = One;
7002  for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
7003  if (PartialGCD == GCD) {
7004  Operands.push_back(Expr->getOperand(i));
7005  continue;
7006  }
7007 
7008  const SCEV *Rem = Zero;
7009  const SCEV *Res = SCEVGCD::findGCD(SE, Expr->getOperand(i), GCD, &Rem);
7010  if (Rem == Zero) {
7011  PartialGCD = SE.getMulExpr(PartialGCD, Res);
7012  Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
7013  } else {
7014  Operands.push_back(Expr->getOperand(i));
7015  }
7016  }
7017  }
7018 
7019  if (Operands.size() == 1)
7020  return Operands[0];
7021  return SE.getMulExpr(Operands);
7022  }
7023 
7024  const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
7025  if (GCD == Expr)
7026  return One;
7027  return Expr;
7028  }
7029 
7030  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
7031  if (GCD == Expr)
7032  return One;
7033 
7034  assert(Expr->isAffine() && "Expr should be affine");
7035 
7036  const SCEV *Start = divide(SE, Expr->getStart(), GCD);
7037  const SCEV *Step = divide(SE, Expr->getStepRecurrence(SE), GCD);
7038 
7039  return SE.getAddRecExpr(Start, Step, Expr->getLoop(),
7040  Expr->getNoWrapFlags());
7041  }
7042 
7043  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
7044  if (GCD == Expr)
7045  return One;
7046  return Expr;
7047  }
7048 
7049  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
7050  if (GCD == Expr)
7051  return One;
7052  return Expr;
7053  }
7054 
7055  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
7056  if (GCD == Expr)
7057  return One;
7058  return Expr;
7059  }
7060 
7061  const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
7062  return Expr;
7063  }
7064 
7065 private:
7066  ScalarEvolution &SE;
7067  const SCEV *GCD, *Zero, *One;
7068 };
7069 }
7070 
7071 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
7072 /// sizes of an array access. Returns the remainder of the delinearization that
7073 /// is the offset start of the array. The SCEV->delinearize algorithm computes
7074 /// the multiples of SCEV coefficients: that is a pattern matching of sub
7075 /// expressions in the stride and base of a SCEV corresponding to the
7076 /// computation of a GCD (greatest common divisor) of base and stride. When
7077 /// SCEV->delinearize fails, it returns the SCEV unchanged.
7078 ///
7079 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
7080 ///
7081 /// void foo(long n, long m, long o, double A[n][m][o]) {
7082 ///
7083 /// for (long i = 0; i < n; i++)
7084 /// for (long j = 0; j < m; j++)
7085 /// for (long k = 0; k < o; k++)
7086 /// A[i][j][k] = 1.0;
7087 /// }
7088 ///
7089 /// the delinearization input is the following AddRec SCEV:
7090 ///
7091 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
7092 ///
7093 /// From this SCEV, we are able to say that the base offset of the access is %A
7094 /// because it appears as an offset that does not divide any of the strides in
7095 /// the loops:
7096 ///
7097 /// CHECK: Base offset: %A
7098 ///
7099 /// and then SCEV->delinearize determines the size of some of the dimensions of
7100 /// the array as these are the multiples by which the strides are happening:
7101 ///
7102 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
7103 ///
7104 /// Note that the outermost dimension remains of UnknownSize because there are
7105 /// no strides that would help identifying the size of the last dimension: when
7106 /// the array has been statically allocated, one could compute the size of that
7107 /// dimension by dividing the overall size of the array by the size of the known
7108 /// dimensions: %m * %o * 8.
7109 ///
7110 /// Finally delinearize provides the access functions for the array reference
7111 /// that does correspond to A[i][j][k] of the above C testcase:
7112 ///
7113 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
7114 ///
7115 /// The testcases are checking the output of a function pass:
7116 /// DelinearizationPass that walks through all loads and stores of a function
7117 /// asking for the SCEV of the memory access with respect to all enclosing
7118 /// loops, calling SCEV->delinearize on that and printing the results.
7119 
7120 const SCEV *
7121 SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
7122  SmallVectorImpl<const SCEV *> &Subscripts,
7123  SmallVectorImpl<const SCEV *> &Sizes) const {
7124  // Early exit in case this SCEV is not an affine multivariate function.
7125  if (!this->isAffine())
7126  return this;
7127 
7128  const SCEV *Start = this->getStart();
7129  const SCEV *Step = this->getStepRecurrence(SE);
7130 
7131  // Build the SCEV representation of the cannonical induction variable in the
7132  // loop of this SCEV.
7133  const SCEV *Zero = SE.getConstant(this->getType(), 0);
7134  const SCEV *One = SE.getConstant(this->getType(), 1);
7135  const SCEV *IV =
7136  SE.getAddRecExpr(Zero, One, this->getLoop(), this->getNoWrapFlags());
7137 
7138  DEBUG(dbgs() << "(delinearize: " << *this << "\n");
7139 
7140  // Currently we fail to delinearize when the stride of this SCEV is 1. We
7141  // could decide to not fail in this case: we could just return 1 for the size
7142  // of the subscript, and this same SCEV for the access function.
7143  if (Step == One) {
7144  DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
7145  return this;
7146  }
7147 
7148  // Find the GCD and Remainder of the Start and Step coefficients of this SCEV.
7149  const SCEV *Remainder = NULL;
7150  const SCEV *GCD = SCEVGCD::findGCD(SE, Start, Step, &Remainder);
7151 
7152  DEBUG(dbgs() << "GCD: " << *GCD << "\n");
7153  DEBUG(dbgs() << "Remainder: " << *Remainder << "\n");
7154 
7155  // Same remark as above: we currently fail the delinearization, although we
7156  // can very well handle this special case.
7157  if (GCD == One) {
7158  DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
7159  return this;
7160  }
7161 
7162  // As findGCD computed Remainder, GCD divides "Start - Remainder." The
7163  // Quotient is then this SCEV without Remainder, scaled down by the GCD. The
7164  // Quotient is what will be used in the next subscript delinearization.
7165  const SCEV *Quotient =
7166  SCEVDivision::divide(SE, SE.getMinusSCEV(Start, Remainder), GCD);
7167  DEBUG(dbgs() << "Quotient: " << *Quotient << "\n");
7168 
7169  const SCEV *Rem;
7170  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Quotient))
7171  // Recursively call delinearize on the Quotient until there are no more
7172  // multiples that can be recognized.
7173  Rem = AR->delinearize(SE, Subscripts, Sizes);
7174  else
7175  Rem = Quotient;
7176 
7177  // Scale up the cannonical induction variable IV by whatever remains from the
7178  // Step after division by the GCD: the GCD is the size of all the sub-array.
7179  if (Step != GCD) {
7180  Step = SCEVDivision::divide(SE, Step, GCD);
7181  IV = SE.getMulExpr(IV, Step);
7182  }
7183  // The access function in the current subscript is computed as the cannonical
7184  // induction variable IV (potentially scaled up by the step) and offset by
7185  // Rem, the offset of delinearization in the sub-array.
7186  const SCEV *Index = SE.getAddExpr(IV, Rem);
7187 
7188  // Record the access function and the size of the current subscript.
7189  Subscripts.push_back(Index);
7190  Sizes.push_back(GCD);
7191 
7192 #ifndef NDEBUG
7193  int Size = Sizes.size();
7194  DEBUG(dbgs() << "succeeded to delinearize " << *this << "\n");
7195  DEBUG(dbgs() << "ArrayDecl[UnknownSize]");
7196  for (int i = 0; i < Size - 1; i++)
7197  DEBUG(dbgs() << "[" << *Sizes[i] << "]");
7198  DEBUG(dbgs() << " with elements of " << *Sizes[Size - 1] << " bytes.\n");
7199 
7200  DEBUG(dbgs() << "ArrayRef");
7201  for (int i = 0; i < Size; i++)
7202  DEBUG(dbgs() << "[" << *Subscripts[i] << "]");
7203  DEBUG(dbgs() << "\n)\n");
7204 #endif
7205 
7206  return Remainder;
7207 }
7208 
7209 //===----------------------------------------------------------------------===//
7210 // SCEVCallbackVH Class Implementation
7211 //===----------------------------------------------------------------------===//
7212 
7213 void ScalarEvolution::SCEVCallbackVH::deleted() {
7214  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7215  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
7216  SE->ConstantEvolutionLoopExitValue.erase(PN);
7217  SE->ValueExprMap.erase(getValPtr());
7218  // this now dangles!
7219 }
7220 
7221 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
7222  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7223 
7224  // Forget all the expressions associated with users of the old value,
7225  // so that future queries will recompute the expressions using the new
7226  // value.
7227  Value *Old = getValPtr();
7228  SmallVector<User *, 16> Worklist;
7229  SmallPtrSet<User *, 8> Visited;
7230  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
7231  UI != UE; ++UI)
7232  Worklist.push_back(*UI);
7233  while (!Worklist.empty()) {
7234  User *U = Worklist.pop_back_val();
7235  // Deleting the Old value will cause this to dangle. Postpone
7236  // that until everything else is done.
7237  if (U == Old)
7238  continue;
7239  if (!Visited.insert(U))
7240  continue;
7241  if (PHINode *PN = dyn_cast<PHINode>(U))
7242  SE->ConstantEvolutionLoopExitValue.erase(PN);
7243  SE->ValueExprMap.erase(U);
7244  for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
7245  UI != UE; ++UI)
7246  Worklist.push_back(*UI);
7247  }
7248  // Delete the Old value.
7249  if (PHINode *PN = dyn_cast<PHINode>(Old))
7250  SE->ConstantEvolutionLoopExitValue.erase(PN);
7251  SE->ValueExprMap.erase(Old);
7252  // this now dangles!
7253 }
7254 
7255 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
7256  : CallbackVH(V), SE(se) {}
7257 
7258 //===----------------------------------------------------------------------===//
7259 // ScalarEvolution Class Implementation
7260 //===----------------------------------------------------------------------===//
7261 
7263  : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), FirstUnknown(0) {
7265 }
7266 
7268  this->F = &F;
7269  LI = &getAnalysis<LoopInfo>();
7270  TD = getAnalysisIfAvailable<DataLayout>();
7271  TLI = &getAnalysis<TargetLibraryInfo>();
7272  DT = &getAnalysis<DominatorTree>();
7273  return false;
7274 }
7275 
7277  // Iterate through all the SCEVUnknown instances and call their
7278  // destructors, so that they release their references to their values.
7279  for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
7280  U->~SCEVUnknown();
7281  FirstUnknown = 0;
7282 
7283  ValueExprMap.clear();
7284 
7285  // Free any extra memory created for ExitNotTakenInfo in the unlikely event
7286  // that a loop had multiple computable exits.
7288  BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
7289  I != E; ++I) {
7290  I->second.clear();
7291  }
7292 
7293  assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
7294 
7295  BackedgeTakenCounts.clear();
7296  ConstantEvolutionLoopExitValue.clear();
7297  ValuesAtScopes.clear();
7298  LoopDispositions.clear();
7299  BlockDispositions.clear();
7300  UnsignedRanges.clear();
7301  SignedRanges.clear();
7302  UniqueSCEVs.clear();
7303  SCEVAllocator.Reset();
7304 }
7305 
7307  AU.setPreservesAll();
7311 }
7312 
7314  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
7315 }
7316 
7317 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
7318  const Loop *L) {
7319  // Print all inner loops first
7320  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
7321  PrintLoopInfo(OS, SE, *I);
7322 
7323  OS << "Loop ";
7324  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
7325  OS << ": ";
7326 
7327  SmallVector<BasicBlock *, 8> ExitBlocks;
7328  L->getExitBlocks(ExitBlocks);
7329  if (ExitBlocks.size() != 1)
7330  OS << "<multiple exits> ";
7331 
7333  OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
7334  } else {
7335  OS << "Unpredictable backedge-taken count. ";
7336  }
7337 
7338  OS << "\n"
7339  "Loop ";
7340  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
7341  OS << ": ";
7342 
7343  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
7344  OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
7345  } else {
7346  OS << "Unpredictable max backedge-taken count. ";
7347  }
7348 
7349  OS << "\n";
7350 }
7351 
7352 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
7353  // ScalarEvolution's implementation of the print method is to print
7354  // out SCEV values of all instructions that are interesting. Doing
7355  // this potentially causes it to create new SCEV objects though,
7356  // which technically conflicts with the const qualifier. This isn't
7357  // observable from outside the class though, so casting away the
7358  // const isn't dangerous.
7359  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7360 
7361  OS << "Classifying expressions for: ";
7362  WriteAsOperand(OS, F, /*PrintType=*/false);
7363  OS << "\n";
7364  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
7365  if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
7366  OS << *I << '\n';
7367  OS << " --> ";
7368  const SCEV *SV = SE.getSCEV(&*I);
7369  SV->print(OS);
7370 
7371  const Loop *L = LI->getLoopFor((*I).getParent());
7372 
7373  const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
7374  if (AtUse != SV) {
7375  OS << " --> ";
7376  AtUse->print(OS);
7377  }
7378 
7379  if (L) {
7380  OS << "\t\t" "Exits: ";
7381  const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
7382  if (!SE.isLoopInvariant(ExitValue, L)) {
7383  OS << "<<Unknown>>";
7384  } else {
7385  OS << *ExitValue;
7386  }
7387  }
7388 
7389  OS << "\n";
7390  }
7391 
7392  OS << "Determining loop execution counts for: ";
7393  WriteAsOperand(OS, F, /*PrintType=*/false);
7394  OS << "\n";
7395  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
7396  PrintLoopInfo(OS, &SE, *I);
7397 }
7398 
7401  SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values = LoopDispositions[S];
7402  for (unsigned u = 0; u < Values.size(); u++) {
7403  if (Values[u].first == L)
7404  return Values[u].second;
7405  }
7406  Values.push_back(std::make_pair(L, LoopVariant));
7407  LoopDisposition D = computeLoopDisposition(S, L);
7408  SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values2 = LoopDispositions[S];
7409  for (unsigned u = Values2.size(); u > 0; u--) {
7410  if (Values2[u - 1].first == L) {
7411  Values2[u - 1].second = D;
7412  break;
7413  }
7414  }
7415  return D;
7416 }
7417 
7419 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
7420  switch (S->getSCEVType()) {
7421  case scConstant:
7422  return LoopInvariant;
7423  case scTruncate:
7424  case scZeroExtend:
7425  case scSignExtend:
7426  return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
7427  case scAddRecExpr: {
7428  const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7429 
7430  // If L is the addrec's loop, it's computable.
7431  if (AR->getLoop() == L)
7432  return LoopComputable;
7433 
7434  // Add recurrences are never invariant in the function-body (null loop).
7435  if (!L)
7436  return LoopVariant;
7437 
7438  // This recurrence is variant w.r.t. L if L contains AR's loop.
7439  if (L->contains(AR->getLoop()))
7440  return LoopVariant;
7441 
7442  // This recurrence is invariant w.r.t. L if AR's loop contains L.
7443  if (AR->getLoop()->contains(L))
7444  return LoopInvariant;
7445 
7446  // This recurrence is variant w.r.t. L if any of its operands
7447  // are variant.
7448  for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
7449  I != E; ++I)
7450  if (!isLoopInvariant(*I, L))
7451  return LoopVariant;
7452 
7453  // Otherwise it's loop-invariant.
7454  return LoopInvariant;
7455  }
7456  case scAddExpr:
7457  case scMulExpr:
7458  case scUMaxExpr:
7459  case scSMaxExpr: {
7460  const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
7461  bool HasVarying = false;
7462  for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
7463  I != E; ++I) {
7465  if (D == LoopVariant)
7466  return LoopVariant;
7467  if (D == LoopComputable)
7468  HasVarying = true;
7469  }
7470  return HasVarying ? LoopComputable : LoopInvariant;
7471  }
7472  case scUDivExpr: {
7473  const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
7475  if (LD == LoopVariant)
7476  return LoopVariant;
7477  LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
7478  if (RD == LoopVariant)
7479  return LoopVariant;
7480  return (LD == LoopInvariant && RD == LoopInvariant) ?
7482  }
7483  case scUnknown:
7484  // All non-instruction values are loop invariant. All instructions are loop
7485  // invariant if they are not contained in the specified loop.
7486  // Instructions are never considered invariant in the function body
7487  // (null loop) because they are defined within the "loop".
7488  if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
7489  return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
7490  return LoopInvariant;
7491  case scCouldNotCompute:
7492  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
7493  default: llvm_unreachable("Unknown SCEV kind!");
7494  }
7495 }
7496 
7497 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
7498  return getLoopDisposition(S, L) == LoopInvariant;
7499 }
7500 
7502  return getLoopDisposition(S, L) == LoopComputable;
7503 }
7504 
7507  SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values = BlockDispositions[S];
7508  for (unsigned u = 0; u < Values.size(); u++) {
7509  if (Values[u].first == BB)
7510  return Values[u].second;
7511  }
7512  Values.push_back(std::make_pair(BB, DoesNotDominateBlock));
7513  BlockDisposition D = computeBlockDisposition(S, BB);
7514  SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values2 = BlockDispositions[S];
7515  for (unsigned u = Values2.size(); u > 0; u--) {
7516  if (Values2[u - 1].first == BB) {
7517  Values2[u - 1].second = D;
7518  break;
7519  }
7520  }
7521  return D;
7522 }
7523 
7525 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
7526  switch (S->getSCEVType()) {
7527  case scConstant:
7528  return ProperlyDominatesBlock;
7529  case scTruncate:
7530  case scZeroExtend:
7531  case scSignExtend:
7532  return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
7533  case scAddRecExpr: {
7534  // This uses a "dominates" query instead of "properly dominates" query
7535  // to test for proper dominance too, because the instruction which
7536  // produces the addrec's value is a PHI, and a PHI effectively properly
7537  // dominates its entire containing block.
7538  const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7539  if (!DT->dominates(AR->getLoop()->getHeader(), BB))
7540  return DoesNotDominateBlock;
7541  }
7542  // FALL THROUGH into SCEVNAryExpr handling.
7543  case scAddExpr:
7544  case scMulExpr:
7545  case scUMaxExpr:
7546  case scSMaxExpr: {
7547  const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
7548  bool Proper = true;
7549  for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
7550  I != E; ++I) {
7552  if (D == DoesNotDominateBlock)
7553  return DoesNotDominateBlock;
7554  if (D == DominatesBlock)
7555  Proper = false;
7556  }
7557  return Proper ? ProperlyDominatesBlock : DominatesBlock;
7558  }
7559  case scUDivExpr: {
7560  const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
7561  const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
7562  BlockDisposition LD = getBlockDisposition(LHS, BB);
7563  if (LD == DoesNotDominateBlock)
7564  return DoesNotDominateBlock;
7565  BlockDisposition RD = getBlockDisposition(RHS, BB);
7566  if (RD == DoesNotDominateBlock)
7567  return DoesNotDominateBlock;
7568  return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
7570  }
7571  case scUnknown:
7572  if (Instruction *I =
7573  dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
7574  if (I->getParent() == BB)
7575  return DominatesBlock;
7576  if (DT->properlyDominates(I->getParent(), BB))
7577  return ProperlyDominatesBlock;
7578  return DoesNotDominateBlock;
7579  }
7580  return ProperlyDominatesBlock;
7581  case scCouldNotCompute:
7582  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
7583  default:
7584  llvm_unreachable("Unknown SCEV kind!");
7585  }
7586 }
7587 
7588 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
7589  return getBlockDisposition(S, BB) >= DominatesBlock;
7590 }
7591 
7594 }
7595 
7596 namespace {
7597 // Search for a SCEV expression node within an expression tree.
7598 // Implements SCEVTraversal::Visitor.
7599 struct SCEVSearch {
7600  const SCEV *Node;
7601  bool IsFound;
7602 
7603  SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
7604 
7605  bool follow(const SCEV *S) {
7606  IsFound |= (S == Node);
7607  return !IsFound;
7608  }
7609  bool isDone() const { return IsFound; }
7610 };
7611 }
7612 
7613 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
7614  SCEVSearch Search(Op);
7615  visitAll(S, Search);
7616  return Search.IsFound;
7617 }
7618 
7619 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
7620  ValuesAtScopes.erase(S);
7621  LoopDispositions.erase(S);
7622  BlockDispositions.erase(S);
7623  UnsignedRanges.erase(S);
7624  SignedRanges.erase(S);
7625 
7627  BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
7628  BackedgeTakenInfo &BEInfo = I->second;
7629  if (BEInfo.hasOperand(S, this)) {
7630  BEInfo.clear();
7631  BackedgeTakenCounts.erase(I++);
7632  }
7633  else
7634  ++I;
7635  }
7636 }
7637 
7639 
7640 /// replaceSubString - Replaces all occurences of From in Str with To.
7641 static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
7642  size_t Pos = 0;
7643  while ((Pos = Str.find(From, Pos)) != std::string::npos) {
7644  Str.replace(Pos, From.size(), To.data(), To.size());
7645  Pos += To.size();
7646  }
7647 }
7648 
7649 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
7650 static void
7651 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
7652  for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
7653  getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
7654 
7655  std::string &S = Map[L];
7656  if (S.empty()) {
7657  raw_string_ostream OS(S);
7658  SE.getBackedgeTakenCount(L)->print(OS);
7659 
7660  // false and 0 are semantically equivalent. This can happen in dead loops.
7661  replaceSubString(OS.str(), "false", "0");
7662  // Remove wrap flags, their use in SCEV is highly fragile.
7663  // FIXME: Remove this when SCEV gets smarter about them.
7664  replaceSubString(OS.str(), "<nw>", "");
7665  replaceSubString(OS.str(), "<nsw>", "");
7666  replaceSubString(OS.str(), "<nuw>", "");
7667  }
7668  }
7669 }
7670 
7672  if (!VerifySCEV)
7673  return;
7674 
7675  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7676 
7677  // Gather stringified backedge taken counts for all loops using SCEV's caches.
7678  // FIXME: It would be much better to store actual values instead of strings,
7679  // but SCEV pointers will change if we drop the caches.
7680  VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
7681  for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7682  getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
7683 
7684  // Gather stringified backedge taken counts for all loops without using
7685  // SCEV's caches.
7686  SE.releaseMemory();
7687  for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7688  getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
7689 
7690  // Now compare whether they're the same with and without caches. This allows
7691  // verifying that no pass changed the cache.
7692  assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
7693  "New loops suddenly appeared!");
7694 
7695  for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
7696  OldE = BackedgeDumpsOld.end(),
7697  NewI = BackedgeDumpsNew.begin();
7698  OldI != OldE; ++OldI, ++NewI) {
7699  assert(OldI->first == NewI->first && "Loop order changed!");
7700 
7701  // Compare the stringified SCEVs. We don't care if undef backedgetaken count
7702  // changes.
7703  // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
7704  // means that a pass is buggy or SCEV has to learn a new pattern but is
7705  // usually not harmful.
7706  if (OldI->second != NewI->second &&
7707  OldI->second.find("undef") == std::string::npos &&
7708  NewI->second.find("undef") == std::string::npos &&
7709  OldI->second != "***COULDNOTCOMPUTE***" &&
7710  NewI->second != "***COULDNOTCOMPUTE***") {
7711  dbgs() << "SCEVValidator: SCEV for loop '"
7712  << OldI->first->getHeader()->getName()
7713  << "' changed from '" << OldI->second
7714  << "' to '" << NewI->second << "'!\n";
7715  std::abort();
7716  }
7717  }
7718 
7719  // TODO: Verify more things.
7720 }
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
static const SCEV * getSignExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE)
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
void AddPointer(const void *Ptr)
Definition: FoldingSet.cpp:52
APInt multiplicativeInverse(const APInt &modulo) const
Definition: APInt.cpp:1352
use_iterator use_end()
Definition: Value.h:152
APInt getSignedMin() const
static cl::opt< bool > VerifySCEV("verify-scev", cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:445
Abstract base class of comparison instructions.
Definition: InstrTypes.h:633
The SCEV properly dominates the block.
BasicBlock * getUniquePredecessor()
Return this block if it has a unique predecessor block. Otherwise return a null pointer.
Definition: BasicBlock.cpp:196
void reserve(unsigned N)
Definition: SmallVector.h:425
APInt LLVM_ATTRIBUTE_UNUSED_RESULT abs() const
Get the absolute value;.
Definition: APInt.h:1521
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const APInt &B, ScalarEvolution &SE)
static PassRegistry * getPassRegistry()
virtual void verifyAnalysis() const
SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned SCEVTy, const SCEV *op, Type *ty)
LLVM Argument representation.
Definition: Argument.h:35
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1306
bool isOne() const
const SCEV * getExitCount(Loop *L, BasicBlock *ExitingBlock)
const SCEV * getConstant(ConstantInt *V)
size_t size() const
size - Get the string size.
Definition: StringRef.h:113
ConstantRange sextOrTrunc(uint32_t BitWidth) const
APInt GreatestCommonDivisor(const APInt &Val1, const APInt &Val2)
Compute GCD of two APInt values.
Definition: APInt.cpp:804
bool canConstantFoldCallTo(const Function *F)
The main container class for the LLVM Intermediate Representation.
Definition: Module.h:112
LLVMContext & getContext() const
LoopInfoBase< BasicBlock, Loop >::iterator iterator
Definition: LoopInfo.h:607
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist)
bool isReachableFromEntry(const BasicBlock *A) const
Definition: Dominators.h:879
bool isZero() const
void setBit(unsigned bitPosition)
Set a given bit to 1.
Definition: APInt.cpp:583
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
unsigned getNumOperands() const
Definition: User.h:108
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
static Constant * getGetElementPtr(Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false)
Definition: Constants.h:1004
const SCEV * getPointerBase(const SCEV *V)
STATISTIC(NumArrayLenItCounts,"Number of trip counts computed with array length")
uint32_t getBitWidth() const
Definition: ConstantRange.h:87
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:528
Predicate getInversePredicate() const
Return the inverse of the instruction's predicate.
Definition: InstrTypes.h:737
scalar Scalar Evolution false
bool isKnownNonNegative(const SCEV *S)
unsigned less or equal
Definition: InstrTypes.h:677
unsigned less than
Definition: InstrTypes.h:676
bool insert(PtrType Ptr)
Definition: SmallPtrSet.h:253
bool isSigned() const
Determine if this instruction is using a signed comparison.
Definition: InstrTypes.h:780
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist)
bool properlyDominates(const DomTreeNode *A, const DomTreeNode *B) const
Definition: Dominators.h:818
uint32_t GetMinTrailingZeros(const SCEV *S)
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
virtual void print(raw_ostream &OS, const Module *=0) const
APInt getSignedMax() const
bool isMask(unsigned numBits, const APInt &APIVal)
Definition: APInt.h:1717
bool isLoopInvariant(const SCEV *S, const Loop *L)
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
LoopT * getParentLoop() const
Definition: LoopInfo.h:96
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:116
ConstantRange getUnsignedRange(const SCEV *S)
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
const SCEV *const * Operands
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:242
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Definition: Function.cpp:657
#define R2(n)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: Type.cpp:218
static bool HasSameValue(const SCEV *A, const SCEV *B)
const Constant * getInitializer() const
const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
op_iterator op_begin()
Definition: User.h:116
BlockT * getHeader() const
Definition: LoopInfo.h:95
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:423
LoopInfoBase< BlockT, LoopT > * LI
Definition: LoopInfoImpl.h:411
ConstantRange smax(const ConstantRange &Other) const
bool isOffsetOf(Type *&STy, Constant *&FieldNo) const
virtual bool runOnFunction(Function &F)
const SCEV * getStart() const
BlockT * getLoopLatch() const
Definition: LoopInfoImpl.h:154
iterator begin()
Definition: BasicBlock.h:193
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2029
bool isKnownNonPositive(const SCEV *S)
static void getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE)
getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:322
The SCEV is loop-invariant.
void WriteAsOperand(raw_ostream &, const Value *, bool PrintTy=true, const Module *Context=0)
Definition: AsmWriter.cpp:1179
APInt LLVM_ATTRIBUTE_UNUSED_RESULT urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1890
AnalysisUsage & addRequired()
void setValPtr(Value *P)
Definition: ValueHandle.h:349
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:167
bool isUnconditional() const
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1502
static unsigned getBitWidth(Type *Ty, const DataLayout *TD)
bool isAlignOf(Type *&AllocTy) const
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:128
MarkPendingLoopPredicate(Value *C, DenseSet< Value * > &LP)
ConstantRange truncate(uint32_t BitWidth) const
const StructLayout * getStructLayout(StructType *Ty) const
Definition: DataLayout.cpp:445
APInt urem(const APInt &LHS, const APInt &RHS)
Function for unsigned remainder operation.
Definition: APInt.h:1819
const APInt & getValue() const
Return the constant's value.
Definition: Constants.h:105
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
Definition: SmallVector.h:430
#define llvm_unreachable(msg)
APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.cpp:1127
const SCEV *const * op_iterator
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:172
iterator find_as(const LookupKeyT &Val)
Definition: DenseMap.h:127
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Definition: LoopInfoImpl.h:33
static bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta, APInt &G, APInt &X, APInt &Y)
uint64_t getTypeSizeInBits(Type *Ty) const
void AddInteger(signed I)
Definition: FoldingSet.cpp:60
APInt umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:1705
bool empty() const
Definition: DenseSet.h:33
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
ConstantRange signExtend(uint32_t BitWidth) const
op_iterator op_begin() const
virtual void getAnalysisUsage(AnalysisUsage &AU) const
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
#define G(x, y, z)
Definition: MD5.cpp:52
ConstantRange multiply(const ConstantRange &Other) const
APInt lshr(const APInt &LHS, unsigned shiftAmt)
Logical right-shift function.
Definition: APInt.h:1790
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Definition: LoopInfoImpl.h:62
uint64_t getZExtValue() const
Return the zero extended value.
Definition: Constants.h:116
static const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
reverse_iterator rbegin() const
Definition: LoopInfo.h:132
static Constant * getSizeOf(Type *Ty)
Definition: Constants.cpp:1756
const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:56
bool contains(const APInt &Val) const
unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock)
const char * data() const
Definition: StringRef.h:107
#define T
APInt udiv(const APInt &LHS, const APInt &RHS)
Unsigned division function for APInt.
Definition: APInt.h:1809
Constant * ConstantFoldConstantExpression(const ConstantExpr *CE, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0)
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS)
Definition: Constants.cpp:1870
bool sgt(const APInt &RHS) const
Signed greather than comparison.
Definition: APInt.h:1100
BasicBlock * getSuccessor(unsigned i) const
const SCEV * getSizeOfExpr(Type *IntTy, Type *AllocTy)
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition: APInt.h:1276
APInt sdiv(const APInt &LHS, const APInt &RHS)
Signed division function for APInt.
Definition: APInt.h:1804
Loop * getLoopFor(const BasicBlock *BB) const
Definition: LoopInfo.h:618
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will ""symbolically execute a constant ""derived loop"), cl::init(100))
bool hasDefinitiveInitializer() const
bool isArrayTy() const
Definition: Type.h:216
void swap(DenseMap &RHS)
Definition: DenseMap.h:576
static Constant * getUDiv(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2062
APInt ashr(const APInt &LHS, unsigned shiftAmt)
Arithmetic right-shift function.
Definition: APInt.h:1783
Type * getEffectiveSCEVType(Type *Ty) const
ConstantRange subtract(const APInt &CI) const
const char * getOpcodeName() const
Definition: Instruction.h:85
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
static std::pair< const SCEV *, const SCEV * > SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout *TD=0, unsigned Depth=0)
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.cpp:515
static cl::opt< unsigned > MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4))
unsigned getNumIncomingValues() const
std::vector< LoopT * >::const_reverse_iterator reverse_iterator
Definition: LoopInfo.h:129
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:442
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout *TD, const TargetLibraryInfo *TLI)
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
const SCEV * getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const
unsigned getNumSuccessors() const
Definition: InstrTypes.h:59
const SCEV * getCouldNotCompute()
#define P(N)
#define true
Definition: ConvertUTF.c:65
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:314
iterator begin() const
Definition: LoopInfo.h:130
bool isSCEVable(Type *Ty) const
* if(!EatIfPresent(lltok::kw_thread_local)) return false
APInt srem(const APInt &LHS, const APInt &RHS)
Function for signed remainder operation.
Definition: APInt.h:1814
ConstantRange intersectWith(const ConstantRange &CR) const
bool isSizeOf(Type *&AllocTy) const
APInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:919
bool isFullSet() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:72
BasicBlock * getSuccessor(unsigned idx) const
Definition: InstrTypes.h:65
Type * getType() const
LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI)
bool sge(const APInt &RHS) const
Signed greather or equal comparison.
Definition: APInt.h:1132
LLVM Constant Representation.
Definition: Constant.h:41
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition: APInt.h:356
void dump() const
const SCEV * getOperand(unsigned i) const
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1845
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1850
static bool canConstantEvolve(Instruction *I, const Loop *L)
scalar evolution
static const SCEV * getPreStartForSignExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE)
const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:476
APInt LLVM_ATTRIBUTE_UNUSED_RESULT sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:942
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition: APInt.h:1068
iterator end() const
Definition: LoopInfo.h:131
const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
ConstantRange udiv(const ConstantRange &Other) const
op_iterator op_end()
Definition: User.h:118
BasicBlock * getIncomingBlock(unsigned i) const
ItTy next(ItTy it, Dist n)
Definition: STLExtras.h:154
bool contains(const LoopT *L) const
Definition: LoopInfo.h:104
bool isFalseWhenEqual() const
Determine if this is false when both operands are the same.
Definition: InstrTypes.h:798
bool hasOperand(const SCEV *S, const SCEV *Op) const
Represent an integer comparison operator.
Definition: Instructions.h:911
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
const SCEV * getMaxBackedgeTakenCount(const Loop *L)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1252
unsigned getValueID() const
Definition: Value.h:233
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1116
iterator end()
Definition: DenseMap.h:57
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:350
APInt LLVM_ATTRIBUTE_UNUSED_RESULT sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1879
Value * getOperand(unsigned i) const
Definition: User.h:88
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0)
Integer representation type.
Definition: DerivedTypes.h:37
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:714
ConstantRange zeroExtend(uint32_t BitWidth) const
bool isKnownNegative(const SCEV *S)
bool count(const KeyT &Val) const
count - Return true if the specified key is in the map.
Definition: DenseMap.h:103
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2023
Constant * ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD=0)
void setNoWrapFlags(NoWrapFlags Flags)
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
static Constant * getAllOnesValue(Type *Ty)
Get the all ones value.
Definition: Constants.cpp:163
const SCEV * getLHS() const
bool dominates(const DomTreeNode *A, const DomTreeNode *B) const
Definition: Dominators.h:801
void append(in_iter in_start, in_iter in_end)
Definition: SmallVector.h:445
bool isEmptySet() const
const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
bool isPointerTy() const
Definition: Type.h:220
iterator erase(iterator I)
Definition: SmallVector.h:478
Value * SimplifyInstruction(Instruction *I, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
bool isNonConstantNegative() const
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:284
const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
ConstantRange add(const ConstantRange &Other) const
std::string & str()
Definition: raw_ostream.h:441
const SCEV * getRHS() const
signed greater than
Definition: InstrTypes.h:678
The SCEV is loop-variant (unknown).
static const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2)
LoopInfoBase< BasicBlock, Loop >::reverse_iterator reverse_iterator
Definition: LoopInfo.h:608
bool isConditional() const
const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Definition: APInt.h:1084
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.cpp:736
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Definition: DataLayout.cpp:610
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:153
static IntegerType * get(LLVMContext &C, unsigned NumBits)
Get or create an IntegerType instance.
Definition: Type.cpp:305
static Constant * getBitCast(Constant *C, Type *Ty)
Definition: Constants.cpp:1661
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2)
static PointerType * getUnqual(Type *ElementType)
Definition: DerivedTypes.h:436
Class for constant integers.
Definition: Constants.h:51
bool isKnownPositive(const SCEV *S)
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.cpp:547
Value * getIncomingValue(unsigned i) const
uint64_t getTypeAllocSize(Type *Ty) const
Definition: DataLayout.h:326
reverse_iterator rend() const
Definition: LoopInfo.h:133
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty)
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty)
DenseSet< Value * > & LoopPreds
bool isAllOnesValue() const
Type * getType() const
Definition: Value.h:111
The SCEV dominates the block.
bool isVolatile() const
Definition: Instructions.h:170
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
signed less than
Definition: InstrTypes.h:680
const APInt & getLower() const
Definition: ConstantRange.h:79
bool erase(const KeyT &Val)
Definition: DenseMap.h:190
bool isTrueWhenEqual() const
Determine if this is true when both operands are the same.
Definition: InstrTypes.h:792
ConstantRange getSignedRange(const SCEV *S)
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:335
Predicate getSwappedPredicate() const
Return the predicate as if the operands were swapped.
Definition: InstrTypes.h:753
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition: APInt.h:430
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
Definition: Constants.cpp:492
ConstantInt * getValue() const
static Constant * getTrunc(Constant *C, Type *Ty)
Definition: Constants.cpp:1527
const SCEV * delinearize(ScalarEvolution &SE, SmallVectorImpl< const SCEV * > &Subscripts, SmallVectorImpl< const SCEV * > &Sizes) const
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
#define NC
Definition: regutils.h:39
bool isNullValue() const
Definition: Constants.cpp:75
APInt umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:1702
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT maskFlags(SCEV::NoWrapFlags Flags, int Mask)
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
Definition: Debug.cpp:101
scalar Scalar Evolution Analysis
APInt smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:1699
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:591
signed less or equal
Definition: InstrTypes.h:681
Class for arbitrary precision integers.
Definition: APInt.h:75
void * Allocate(size_t Size, size_t Alignment)
Definition: Allocator.cpp:95
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty)
bool isConstant() const
bool isIntegerTy() const
Definition: Type.h:196
unsigned size() const
Definition: DenseMap.h:70
BasicBlock * getSinglePredecessor()
Return this block if it has a single predecessor block. Otherwise return a null pointer.
Definition: BasicBlock.cpp:183
unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock)
DenseMap< const Loop *, std::string > VerifyMap
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
static const SCEV * getOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the givien expression tree.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:418
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:365
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1840
static Constant * BuildConstantFromSCEV(const SCEV *V)
bool isStructTy() const
Definition: Type.h:212
ConstantRange umax(const ConstantRange &Other) const
DenseMapIterator< KeyT, ValueT, KeyInfoT > iterator
Definition: DenseMap.h:50
use_iterator use_begin()
Definition: Value.h:150
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2010
static void replaceSubString(std::string &Str, StringRef From, StringRef To)
replaceSubString - Replaces all occurences of From in Str with To.
Value * getCondition() const
static Constant * getSExt(Constant *C, Type *Ty)
Definition: Constants.cpp:1541
The SCEV does not dominate the block.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:371
void forgetLoop(const Loop *L)
pointer data()
data - Return a pointer to the vector's buffer, even if empty().
Definition: SmallVector.h:135
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:241
static Constant * getZExt(Constant *C, Type *Ty)
Definition: Constants.cpp:1555
static Constant * getOffsetOf(StructType *STy, unsigned FieldNo)
Definition: Constants.cpp:1780
bool isInBounds() const
Definition: Operator.h:373
unsigned greater or equal
Definition: InstrTypes.h:675
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap)
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Infer the opcode for cast operand and type.
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
static Constant * AddOne(Constant *C)
AddOne - Add one to a ConstantInt.
bool isSignBit() const
Check if the APInt's value is returned by getSignBit.
Definition: APInt.h:399
bool isKnownNonZero(const SCEV *S)
BlockT * getLoopPredecessor() const
Definition: LoopInfoImpl.h:128
APInt smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:1696
static ConstantRange makeConstantRange(Predicate pred, const APInt &C)
Make a ConstantRange for a relation with a constant value.
static bool CollectAddOperandsWithScales(DenseMap< const SCEV *, APInt > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, const SCEV *const *Ops, size_t NumOperands, const APInt &Scale, ScalarEvolution &SE)
AnalysisUsage & addRequiredTransitive()
unsigned getPrimitiveSizeInBits() const
Definition: Type.cpp:117
const Loop * getLoop() const
INITIALIZE_PASS_BEGIN(ScalarEvolution,"scalar-evolution","Scalar Evolution Analysis", false, true) INITIALIZE_PASS_END(ScalarEvolution
unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD=0, unsigned Depth=0)
const SCEV * getBackedgeTakenCount(const Loop *L)
const APInt & getUpper() const
Definition: ConstantRange.h:83
virtual void releaseMemory()
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Use * op_iterator
Definition: User.h:113
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:433
void initializeScalarEvolutionPass(PassRegistry &)
unsigned getSCEVType() const
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS)
void print(raw_ostream &OS) const
LLVM Value Representation.
Definition: Value.h:66
const SCEV * getSCEV(Value *V)
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:83
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Function.cpp:67
APInt shl(const APInt &LHS, unsigned shiftAmt)
Left-shift function.
Definition: APInt.h:1797
ConstantRange zextOrTrunc(uint32_t BitWidth) const
Constant * ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, ArrayRef< Constant * > Ops, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0)
uint64_t getTypeSizeInBits(Type *Ty) const
Definition: DataLayout.h:459
#define DEBUG(X)
Definition: Debug.h:97
const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
Definition: APInt.h:1340
const SCEV * getUnknown(Value *V)
FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const
Definition: FoldingSet.cpp:175
The SCEV varies predictably with the loop.
bool dominates(const SCEV *S, const BasicBlock *BB)
op_iterator op_end() const
unsigned greater than
Definition: InstrTypes.h:674
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:129
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:983
BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty)
bool replacementPreservesLCSSAForm(Instruction *From, Value *To)
Definition: LoopInfo.h:691
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
std::vector< LoopT * >::const_iterator iterator
Definition: LoopInfo.h:127
const SCEV * getNegativeSCEV(const SCEV *V)
static bool CanConstantFold(const Instruction *I)
Constant * ConstantFoldLoadThroughGEPIndices(Constant *C, ArrayRef< Constant * > Indices)
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty)
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
static Constant * getMul(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2051
APInt getUnsignedMax() const
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
friend class SCEVCallbackVH
const SCEV * getOperand() const
static Constant * getCast(unsigned ops, Constant *C, Type *Ty)
Definition: Constants.cpp:1444
unsigned getLoopDepth() const
Definition: LoopInfo.h:88
const SCEV * getNotSCEV(const SCEV *V)
getNotSCEV - Return a SCEV corresponding to ~V = -1-V
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1052
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
static RegisterPass< NVPTXAllocaHoisting > X("alloca-hoisting","Hoisting alloca instructions in non-entry ""blocks to the entry block")
const BasicBlock * getParent() const
Definition: Instruction.h:52
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
static bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
signed greater or equal
Definition: InstrTypes.h:679
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
gep_type_iterator gep_type_begin(const User *GEP)
APInt getUnsignedMin() const