LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InstCombineAddSub.cpp
Go to the documentation of this file.
1 //===- InstCombineAddSub.cpp ----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for add, fadd, sub, and fsub.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombine.h"
15 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/IR/DataLayout.h"
20 using namespace llvm;
21 using namespace PatternMatch;
22 
23 namespace {
24 
25  /// Class representing coefficient of floating-point addend.
26  /// This class needs to be highly efficient, which is especially true for
27  /// the constructor. As of I write this comment, the cost of the default
28  /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
29  /// perform write-merging).
30  ///
31  class FAddendCoef {
32  public:
33  // The constructor has to initialize a APFloat, which is uncessary for
34  // most addends which have coefficient either 1 or -1. So, the constructor
35  // is expensive. In order to avoid the cost of the constructor, we should
36  // reuse some instances whenever possible. The pre-created instances
37  // FAddCombine::Add[0-5] embodies this idea.
38  //
39  FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {}
40  ~FAddendCoef();
41 
42  void set(short C) {
43  assert(!insaneIntVal(C) && "Insane coefficient");
44  IsFp = false; IntVal = C;
45  }
46 
47  void set(const APFloat& C);
48 
49  void negate();
50 
51  bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
52  Value *getValue(Type *) const;
53 
54  // If possible, don't define operator+/operator- etc because these
55  // operators inevitably call FAddendCoef's constructor which is not cheap.
56  void operator=(const FAddendCoef &A);
57  void operator+=(const FAddendCoef &A);
58  void operator-=(const FAddendCoef &A);
59  void operator*=(const FAddendCoef &S);
60 
61  bool isOne() const { return isInt() && IntVal == 1; }
62  bool isTwo() const { return isInt() && IntVal == 2; }
63  bool isMinusOne() const { return isInt() && IntVal == -1; }
64  bool isMinusTwo() const { return isInt() && IntVal == -2; }
65 
66  private:
67  bool insaneIntVal(int V) { return V > 4 || V < -4; }
68  APFloat *getFpValPtr(void)
69  { return reinterpret_cast<APFloat*>(&FpValBuf.buffer[0]); }
70  const APFloat *getFpValPtr(void) const
71  { return reinterpret_cast<const APFloat*>(&FpValBuf.buffer[0]); }
72 
73  const APFloat &getFpVal(void) const {
74  assert(IsFp && BufHasFpVal && "Incorret state");
75  return *getFpValPtr();
76  }
77 
78  APFloat &getFpVal(void) {
79  assert(IsFp && BufHasFpVal && "Incorret state");
80  return *getFpValPtr();
81  }
82 
83  bool isInt() const { return !IsFp; }
84 
85  // If the coefficient is represented by an integer, promote it to a
86  // floating point.
87  void convertToFpType(const fltSemantics &Sem);
88 
89  // Construct an APFloat from a signed integer.
90  // TODO: We should get rid of this function when APFloat can be constructed
91  // from an *SIGNED* integer.
92  APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
93  private:
94 
95  bool IsFp;
96 
97  // True iff FpValBuf contains an instance of APFloat.
98  bool BufHasFpVal;
99 
100  // The integer coefficient of an individual addend is either 1 or -1,
101  // and we try to simplify at most 4 addends from neighboring at most
102  // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
103  // is overkill of this end.
104  short IntVal;
105 
107  };
108 
109  /// FAddend is used to represent floating-point addend. An addend is
110  /// represented as <C, V>, where the V is a symbolic value, and C is a
111  /// constant coefficient. A constant addend is represented as <C, 0>.
112  ///
113  class FAddend {
114  public:
115  FAddend() { Val = 0; }
116 
117  Value *getSymVal (void) const { return Val; }
118  const FAddendCoef &getCoef(void) const { return Coeff; }
119 
120  bool isConstant() const { return Val == 0; }
121  bool isZero() const { return Coeff.isZero(); }
122 
123  void set(short Coefficient, Value *V) { Coeff.set(Coefficient), Val = V; }
124  void set(const APFloat& Coefficient, Value *V)
125  { Coeff.set(Coefficient); Val = V; }
126  void set(const ConstantFP* Coefficient, Value *V)
127  { Coeff.set(Coefficient->getValueAPF()); Val = V; }
128 
129  void negate() { Coeff.negate(); }
130 
131  /// Drill down the U-D chain one step to find the definition of V, and
132  /// try to break the definition into one or two addends.
133  static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
134 
135  /// Similar to FAddend::drillDownOneStep() except that the value being
136  /// splitted is the addend itself.
137  unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
138 
139  void operator+=(const FAddend &T) {
140  assert((Val == T.Val) && "Symbolic-values disagree");
141  Coeff += T.Coeff;
142  }
143 
144  private:
145  void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
146 
147  // This addend has the value of "Coeff * Val".
148  Value *Val;
149  FAddendCoef Coeff;
150  };
151 
152  /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
153  /// with its neighboring at most two instructions.
154  ///
155  class FAddCombine {
156  public:
157  FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(0) {}
158  Value *simplify(Instruction *FAdd);
159 
160  private:
161  typedef SmallVector<const FAddend*, 4> AddendVect;
162 
163  Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
164 
165  Value *performFactorization(Instruction *I);
166 
167  /// Convert given addend to a Value
168  Value *createAddendVal(const FAddend &A, bool& NeedNeg);
169 
170  /// Return the number of instructions needed to emit the N-ary addition.
171  unsigned calcInstrNumber(const AddendVect& Vect);
172  Value *createFSub(Value *Opnd0, Value *Opnd1);
173  Value *createFAdd(Value *Opnd0, Value *Opnd1);
174  Value *createFMul(Value *Opnd0, Value *Opnd1);
175  Value *createFDiv(Value *Opnd0, Value *Opnd1);
176  Value *createFNeg(Value *V);
177  Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
178  void createInstPostProc(Instruction *NewInst);
179 
180  InstCombiner::BuilderTy *Builder;
181  Instruction *Instr;
182 
183  private:
184  // Debugging stuff are clustered here.
185  #ifndef NDEBUG
186  unsigned CreateInstrNum;
187  void initCreateInstNum() { CreateInstrNum = 0; }
188  void incCreateInstNum() { CreateInstrNum++; }
189  #else
190  void initCreateInstNum() {}
191  void incCreateInstNum() {}
192  #endif
193  };
194 }
195 
196 //===----------------------------------------------------------------------===//
197 //
198 // Implementation of
199 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
200 //
201 //===----------------------------------------------------------------------===//
202 FAddendCoef::~FAddendCoef() {
203  if (BufHasFpVal)
204  getFpValPtr()->~APFloat();
205 }
206 
207 void FAddendCoef::set(const APFloat& C) {
208  APFloat *P = getFpValPtr();
209 
210  if (isInt()) {
211  // As the buffer is meanless byte stream, we cannot call
212  // APFloat::operator=().
213  new(P) APFloat(C);
214  } else
215  *P = C;
216 
217  IsFp = BufHasFpVal = true;
218 }
219 
220 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
221  if (!isInt())
222  return;
223 
224  APFloat *P = getFpValPtr();
225  if (IntVal > 0)
226  new(P) APFloat(Sem, IntVal);
227  else {
228  new(P) APFloat(Sem, 0 - IntVal);
229  P->changeSign();
230  }
231  IsFp = BufHasFpVal = true;
232 }
233 
234 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
235  if (Val >= 0)
236  return APFloat(Sem, Val);
237 
238  APFloat T(Sem, 0 - Val);
239  T.changeSign();
240 
241  return T;
242 }
243 
244 void FAddendCoef::operator=(const FAddendCoef &That) {
245  if (That.isInt())
246  set(That.IntVal);
247  else
248  set(That.getFpVal());
249 }
250 
251 void FAddendCoef::operator+=(const FAddendCoef &That) {
253  if (isInt() == That.isInt()) {
254  if (isInt())
255  IntVal += That.IntVal;
256  else
257  getFpVal().add(That.getFpVal(), RndMode);
258  return;
259  }
260 
261  if (isInt()) {
262  const APFloat &T = That.getFpVal();
263  convertToFpType(T.getSemantics());
264  getFpVal().add(T, RndMode);
265  return;
266  }
267 
268  APFloat &T = getFpVal();
269  T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
270 }
271 
272 void FAddendCoef::operator-=(const FAddendCoef &That) {
274  if (isInt() == That.isInt()) {
275  if (isInt())
276  IntVal -= That.IntVal;
277  else
278  getFpVal().subtract(That.getFpVal(), RndMode);
279  return;
280  }
281 
282  if (isInt()) {
283  const APFloat &T = That.getFpVal();
284  convertToFpType(T.getSemantics());
285  getFpVal().subtract(T, RndMode);
286  return;
287  }
288 
289  APFloat &T = getFpVal();
290  T.subtract(createAPFloatFromInt(T.getSemantics(), IntVal), RndMode);
291 }
292 
293 void FAddendCoef::operator*=(const FAddendCoef &That) {
294  if (That.isOne())
295  return;
296 
297  if (That.isMinusOne()) {
298  negate();
299  return;
300  }
301 
302  if (isInt() && That.isInt()) {
303  int Res = IntVal * (int)That.IntVal;
304  assert(!insaneIntVal(Res) && "Insane int value");
305  IntVal = Res;
306  return;
307  }
308 
309  const fltSemantics &Semantic =
310  isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
311 
312  if (isInt())
313  convertToFpType(Semantic);
314  APFloat &F0 = getFpVal();
315 
316  if (That.isInt())
317  F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319  else
320  F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
321 
322  return;
323 }
324 
325 void FAddendCoef::negate() {
326  if (isInt())
327  IntVal = 0 - IntVal;
328  else
329  getFpVal().changeSign();
330 }
331 
332 Value *FAddendCoef::getValue(Type *Ty) const {
333  return isInt() ?
334  ConstantFP::get(Ty, float(IntVal)) :
335  ConstantFP::get(Ty->getContext(), getFpVal());
336 }
337 
338 // The definition of <Val> Addends
339 // =========================================
340 // A + B <1, A>, <1,B>
341 // A - B <1, A>, <1,B>
342 // 0 - B <-1, B>
343 // C * A, <C, A>
344 // A + C <1, A> <C, NULL>
345 // 0 +/- 0 <0, NULL> (corner case)
346 //
347 // Legend: A and B are not constant, C is constant
348 //
349 unsigned FAddend::drillValueDownOneStep
350  (Value *Val, FAddend &Addend0, FAddend &Addend1) {
351  Instruction *I = 0;
352  if (Val == 0 || !(I = dyn_cast<Instruction>(Val)))
353  return 0;
354 
355  unsigned Opcode = I->getOpcode();
356 
357  if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
358  ConstantFP *C0, *C1;
359  Value *Opnd0 = I->getOperand(0);
360  Value *Opnd1 = I->getOperand(1);
361  if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
362  Opnd0 = 0;
363 
364  if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
365  Opnd1 = 0;
366 
367  if (Opnd0) {
368  if (!C0)
369  Addend0.set(1, Opnd0);
370  else
371  Addend0.set(C0, 0);
372  }
373 
374  if (Opnd1) {
375  FAddend &Addend = Opnd0 ? Addend1 : Addend0;
376  if (!C1)
377  Addend.set(1, Opnd1);
378  else
379  Addend.set(C1, 0);
380  if (Opcode == Instruction::FSub)
381  Addend.negate();
382  }
383 
384  if (Opnd0 || Opnd1)
385  return Opnd0 && Opnd1 ? 2 : 1;
386 
387  // Both operands are zero. Weird!
388  Addend0.set(APFloat(C0->getValueAPF().getSemantics()), 0);
389  return 1;
390  }
391 
392  if (I->getOpcode() == Instruction::FMul) {
393  Value *V0 = I->getOperand(0);
394  Value *V1 = I->getOperand(1);
395  if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
396  Addend0.set(C, V1);
397  return 1;
398  }
399 
400  if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
401  Addend0.set(C, V0);
402  return 1;
403  }
404  }
405 
406  return 0;
407 }
408 
409 // Try to break *this* addend into two addends. e.g. Suppose this addend is
410 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
411 // i.e. <2.3, X> and <2.3, Y>.
412 //
413 unsigned FAddend::drillAddendDownOneStep
414  (FAddend &Addend0, FAddend &Addend1) const {
415  if (isConstant())
416  return 0;
417 
418  unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
419  if (!BreakNum || Coeff.isOne())
420  return BreakNum;
421 
422  Addend0.Scale(Coeff);
423 
424  if (BreakNum == 2)
425  Addend1.Scale(Coeff);
426 
427  return BreakNum;
428 }
429 
430 // Try to perform following optimization on the input instruction I. Return the
431 // simplified expression if was successful; otherwise, return 0.
432 //
433 // Instruction "I" is Simplified into
434 // -------------------------------------------------------
435 // (x * y) +/- (x * z) x * (y +/- z)
436 // (y / x) +/- (z / x) (y +/- z) / x
437 //
438 Value *FAddCombine::performFactorization(Instruction *I) {
439  assert((I->getOpcode() == Instruction::FAdd ||
440  I->getOpcode() == Instruction::FSub) && "Expect add/sub");
441 
444 
445  if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
446  return 0;
447 
448  bool isMpy = false;
449  if (I0->getOpcode() == Instruction::FMul)
450  isMpy = true;
451  else if (I0->getOpcode() != Instruction::FDiv)
452  return 0;
453 
454  Value *Opnd0_0 = I0->getOperand(0);
455  Value *Opnd0_1 = I0->getOperand(1);
456  Value *Opnd1_0 = I1->getOperand(0);
457  Value *Opnd1_1 = I1->getOperand(1);
458 
459  // Input Instr I Factor AddSub0 AddSub1
460  // ----------------------------------------------
461  // (x*y) +/- (x*z) x y z
462  // (y/x) +/- (z/x) x y z
463  //
464  Value *Factor = 0;
465  Value *AddSub0 = 0, *AddSub1 = 0;
466 
467  if (isMpy) {
468  if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
469  Factor = Opnd0_0;
470  else if (Opnd0_1 == Opnd1_0 || Opnd0_1 == Opnd1_1)
471  Factor = Opnd0_1;
472 
473  if (Factor) {
474  AddSub0 = (Factor == Opnd0_0) ? Opnd0_1 : Opnd0_0;
475  AddSub1 = (Factor == Opnd1_0) ? Opnd1_1 : Opnd1_0;
476  }
477  } else if (Opnd0_1 == Opnd1_1) {
478  Factor = Opnd0_1;
479  AddSub0 = Opnd0_0;
480  AddSub1 = Opnd1_0;
481  }
482 
483  if (!Factor)
484  return 0;
485 
486  // Create expression "NewAddSub = AddSub0 +/- AddsSub1"
487  Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
488  createFAdd(AddSub0, AddSub1) :
489  createFSub(AddSub0, AddSub1);
490  if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
491  const APFloat &F = CFP->getValueAPF();
492  if (!F.isNormal())
493  return 0;
494  }
495 
496  if (isMpy)
497  return createFMul(Factor, NewAddSub);
498 
499  return createFDiv(NewAddSub, Factor);
500 }
501 
503  assert(I->hasUnsafeAlgebra() && "Should be in unsafe mode");
504 
505  // Currently we are not able to handle vector type.
506  if (I->getType()->isVectorTy())
507  return 0;
508 
509  assert((I->getOpcode() == Instruction::FAdd ||
510  I->getOpcode() == Instruction::FSub) && "Expect add/sub");
511 
512  // Save the instruction before calling other member-functions.
513  Instr = I;
514 
515  FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
516 
517  unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
518 
519  // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
520  unsigned Opnd0_ExpNum = 0;
521  unsigned Opnd1_ExpNum = 0;
522 
523  if (!Opnd0.isConstant())
524  Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
525 
526  // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
527  if (OpndNum == 2 && !Opnd1.isConstant())
528  Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
529 
530  // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
531  if (Opnd0_ExpNum && Opnd1_ExpNum) {
532  AddendVect AllOpnds;
533  AllOpnds.push_back(&Opnd0_0);
534  AllOpnds.push_back(&Opnd1_0);
535  if (Opnd0_ExpNum == 2)
536  AllOpnds.push_back(&Opnd0_1);
537  if (Opnd1_ExpNum == 2)
538  AllOpnds.push_back(&Opnd1_1);
539 
540  // Compute instruction quota. We should save at least one instruction.
541  unsigned InstQuota = 0;
542 
543  Value *V0 = I->getOperand(0);
544  Value *V1 = I->getOperand(1);
545  InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
546  (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
547 
548  if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
549  return R;
550  }
551 
552  if (OpndNum != 2) {
553  // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
554  // splitted into two addends, say "V = X - Y", the instruction would have
555  // been optimized into "I = Y - X" in the previous steps.
556  //
557  const FAddendCoef &CE = Opnd0.getCoef();
558  return CE.isOne() ? Opnd0.getSymVal() : 0;
559  }
560 
561  // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
562  if (Opnd1_ExpNum) {
563  AddendVect AllOpnds;
564  AllOpnds.push_back(&Opnd0);
565  AllOpnds.push_back(&Opnd1_0);
566  if (Opnd1_ExpNum == 2)
567  AllOpnds.push_back(&Opnd1_1);
568 
569  if (Value *R = simplifyFAdd(AllOpnds, 1))
570  return R;
571  }
572 
573  // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
574  if (Opnd0_ExpNum) {
575  AddendVect AllOpnds;
576  AllOpnds.push_back(&Opnd1);
577  AllOpnds.push_back(&Opnd0_0);
578  if (Opnd0_ExpNum == 2)
579  AllOpnds.push_back(&Opnd0_1);
580 
581  if (Value *R = simplifyFAdd(AllOpnds, 1))
582  return R;
583  }
584 
585  // step 6: Try factorization as the last resort,
586  return performFactorization(I);
587 }
588 
589 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
590 
591  unsigned AddendNum = Addends.size();
592  assert(AddendNum <= 4 && "Too many addends");
593 
594  // For saving intermediate results;
595  unsigned NextTmpIdx = 0;
596  FAddend TmpResult[3];
597 
598  // Points to the constant addend of the resulting simplified expression.
599  // If the resulting expr has constant-addend, this constant-addend is
600  // desirable to reside at the top of the resulting expression tree. Placing
601  // constant close to supper-expr(s) will potentially reveal some optimization
602  // opportunities in super-expr(s).
603  //
604  const FAddend *ConstAdd = 0;
605 
606  // Simplified addends are placed <SimpVect>.
607  AddendVect SimpVect;
608 
609  // The outer loop works on one symbolic-value at a time. Suppose the input
610  // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
611  // The symbolic-values will be processed in this order: x, y, z.
612  //
613  for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
614 
615  const FAddend *ThisAddend = Addends[SymIdx];
616  if (!ThisAddend) {
617  // This addend was processed before.
618  continue;
619  }
620 
621  Value *Val = ThisAddend->getSymVal();
622  unsigned StartIdx = SimpVect.size();
623  SimpVect.push_back(ThisAddend);
624 
625  // The inner loop collects addends sharing same symbolic-value, and these
626  // addends will be later on folded into a single addend. Following above
627  // example, if the symbolic value "y" is being processed, the inner loop
628  // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
629  // be later on folded into "<b1+b2, y>".
630  //
631  for (unsigned SameSymIdx = SymIdx + 1;
632  SameSymIdx < AddendNum; SameSymIdx++) {
633  const FAddend *T = Addends[SameSymIdx];
634  if (T && T->getSymVal() == Val) {
635  // Set null such that next iteration of the outer loop will not process
636  // this addend again.
637  Addends[SameSymIdx] = 0;
638  SimpVect.push_back(T);
639  }
640  }
641 
642  // If multiple addends share same symbolic value, fold them together.
643  if (StartIdx + 1 != SimpVect.size()) {
644  FAddend &R = TmpResult[NextTmpIdx ++];
645  R = *SimpVect[StartIdx];
646  for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
647  R += *SimpVect[Idx];
648 
649  // Pop all addends being folded and push the resulting folded addend.
650  SimpVect.resize(StartIdx);
651  if (Val != 0) {
652  if (!R.isZero()) {
653  SimpVect.push_back(&R);
654  }
655  } else {
656  // Don't push constant addend at this time. It will be the last element
657  // of <SimpVect>.
658  ConstAdd = &R;
659  }
660  }
661  }
662 
663  assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
664  "out-of-bound access");
665 
666  if (ConstAdd)
667  SimpVect.push_back(ConstAdd);
668 
669  Value *Result;
670  if (!SimpVect.empty())
671  Result = createNaryFAdd(SimpVect, InstrQuota);
672  else {
673  // The addition is folded to 0.0.
674  Result = ConstantFP::get(Instr->getType(), 0.0);
675  }
676 
677  return Result;
678 }
679 
680 Value *FAddCombine::createNaryFAdd
681  (const AddendVect &Opnds, unsigned InstrQuota) {
682  assert(!Opnds.empty() && "Expect at least one addend");
683 
684  // Step 1: Check if the # of instructions needed exceeds the quota.
685  //
686  unsigned InstrNeeded = calcInstrNumber(Opnds);
687  if (InstrNeeded > InstrQuota)
688  return 0;
689 
690  initCreateInstNum();
691 
692  // step 2: Emit the N-ary addition.
693  // Note that at most three instructions are involved in Fadd-InstCombine: the
694  // addition in question, and at most two neighboring instructions.
695  // The resulting optimized addition should have at least one less instruction
696  // than the original addition expression tree. This implies that the resulting
697  // N-ary addition has at most two instructions, and we don't need to worry
698  // about tree-height when constructing the N-ary addition.
699 
700  Value *LastVal = 0;
701  bool LastValNeedNeg = false;
702 
703  // Iterate the addends, creating fadd/fsub using adjacent two addends.
704  for (AddendVect::const_iterator I = Opnds.begin(), E = Opnds.end();
705  I != E; I++) {
706  bool NeedNeg;
707  Value *V = createAddendVal(**I, NeedNeg);
708  if (!LastVal) {
709  LastVal = V;
710  LastValNeedNeg = NeedNeg;
711  continue;
712  }
713 
714  if (LastValNeedNeg == NeedNeg) {
715  LastVal = createFAdd(LastVal, V);
716  continue;
717  }
718 
719  if (LastValNeedNeg)
720  LastVal = createFSub(V, LastVal);
721  else
722  LastVal = createFSub(LastVal, V);
723 
724  LastValNeedNeg = false;
725  }
726 
727  if (LastValNeedNeg) {
728  LastVal = createFNeg(LastVal);
729  }
730 
731  #ifndef NDEBUG
732  assert(CreateInstrNum == InstrNeeded &&
733  "Inconsistent in instruction numbers");
734  #endif
735 
736  return LastVal;
737 }
738 
739 Value *FAddCombine::createFSub
740  (Value *Opnd0, Value *Opnd1) {
741  Value *V = Builder->CreateFSub(Opnd0, Opnd1);
742  if (Instruction *I = dyn_cast<Instruction>(V))
743  createInstPostProc(I);
744  return V;
745 }
746 
747 Value *FAddCombine::createFNeg(Value *V) {
748  Value *Zero = cast<Value>(ConstantFP::get(V->getType(), 0.0));
749  return createFSub(Zero, V);
750 }
751 
752 Value *FAddCombine::createFAdd
753  (Value *Opnd0, Value *Opnd1) {
754  Value *V = Builder->CreateFAdd(Opnd0, Opnd1);
755  if (Instruction *I = dyn_cast<Instruction>(V))
756  createInstPostProc(I);
757  return V;
758 }
759 
760 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
761  Value *V = Builder->CreateFMul(Opnd0, Opnd1);
762  if (Instruction *I = dyn_cast<Instruction>(V))
763  createInstPostProc(I);
764  return V;
765 }
766 
767 Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
768  Value *V = Builder->CreateFDiv(Opnd0, Opnd1);
769  if (Instruction *I = dyn_cast<Instruction>(V))
770  createInstPostProc(I);
771  return V;
772 }
773 
774 void FAddCombine::createInstPostProc(Instruction *NewInstr) {
775  NewInstr->setDebugLoc(Instr->getDebugLoc());
776 
777  // Keep track of the number of instruction created.
778  incCreateInstNum();
779 
780  // Propagate fast-math flags
781  NewInstr->setFastMathFlags(Instr->getFastMathFlags());
782 }
783 
784 // Return the number of instruction needed to emit the N-ary addition.
785 // NOTE: Keep this function in sync with createAddendVal().
786 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
787  unsigned OpndNum = Opnds.size();
788  unsigned InstrNeeded = OpndNum - 1;
789 
790  // The number of addends in the form of "(-1)*x".
791  unsigned NegOpndNum = 0;
792 
793  // Adjust the number of instructions needed to emit the N-ary add.
794  for (AddendVect::const_iterator I = Opnds.begin(), E = Opnds.end();
795  I != E; I++) {
796  const FAddend *Opnd = *I;
797  if (Opnd->isConstant())
798  continue;
799 
800  const FAddendCoef &CE = Opnd->getCoef();
801  if (CE.isMinusOne() || CE.isMinusTwo())
802  NegOpndNum++;
803 
804  // Let the addend be "c * x". If "c == +/-1", the value of the addend
805  // is immediately available; otherwise, it needs exactly one instruction
806  // to evaluate the value.
807  if (!CE.isMinusOne() && !CE.isOne())
808  InstrNeeded++;
809  }
810  if (NegOpndNum == OpndNum)
811  InstrNeeded++;
812  return InstrNeeded;
813 }
814 
815 // Input Addend Value NeedNeg(output)
816 // ================================================================
817 // Constant C C false
818 // <+/-1, V> V coefficient is -1
819 // <2/-2, V> "fadd V, V" coefficient is -2
820 // <C, V> "fmul V, C" false
821 //
822 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
823 Value *FAddCombine::createAddendVal
824  (const FAddend &Opnd, bool &NeedNeg) {
825  const FAddendCoef &Coeff = Opnd.getCoef();
826 
827  if (Opnd.isConstant()) {
828  NeedNeg = false;
829  return Coeff.getValue(Instr->getType());
830  }
831 
832  Value *OpndVal = Opnd.getSymVal();
833 
834  if (Coeff.isMinusOne() || Coeff.isOne()) {
835  NeedNeg = Coeff.isMinusOne();
836  return OpndVal;
837  }
838 
839  if (Coeff.isTwo() || Coeff.isMinusTwo()) {
840  NeedNeg = Coeff.isMinusTwo();
841  return createFAdd(OpndVal, OpndVal);
842  }
843 
844  NeedNeg = false;
845  return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
846 }
847 
848 /// AddOne - Add one to a ConstantInt.
849 static Constant *AddOne(Constant *C) {
850  return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
851 }
852 
853 /// SubOne - Subtract one from a ConstantInt.
855  return ConstantInt::get(C->getContext(), C->getValue()-1);
856 }
857 
858 
859 // dyn_castFoldableMul - If this value is a multiply that can be folded into
860 // other computations (because it has a constant operand), return the
861 // non-constant operand of the multiply, and set CST to point to the multiplier.
862 // Otherwise, return null.
863 //
864 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
865  if (!V->hasOneUse() || !V->getType()->isIntegerTy())
866  return 0;
867 
869  if (I == 0) return 0;
870 
871  if (I->getOpcode() == Instruction::Mul)
872  if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
873  return I->getOperand(0);
874  if (I->getOpcode() == Instruction::Shl)
875  if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
876  // The multiplier is really 1 << CST.
877  uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
878  uint32_t CSTVal = CST->getLimitedValue(BitWidth);
879  CST = ConstantInt::get(V->getType()->getContext(),
880  APInt::getOneBitSet(BitWidth, CSTVal));
881  return I->getOperand(0);
882  }
883  return 0;
884 }
885 
886 
887 /// WillNotOverflowSignedAdd - Return true if we can prove that:
888 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
889 /// This basically requires proving that the add in the original type would not
890 /// overflow to change the sign bit or have a carry out.
891 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
892  // There are different heuristics we can use for this. Here are some simple
893  // ones.
894 
895  // Add has the property that adding any two 2's complement numbers can only
896  // have one carry bit which can change a sign. As such, if LHS and RHS each
897  // have at least two sign bits, we know that the addition of the two values
898  // will sign extend fine.
899  if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
900  return true;
901 
902 
903  // If one of the operands only has one non-zero bit, and if the other operand
904  // has a known-zero bit in a more significant place than it (not including the
905  // sign bit) the ripple may go up to and fill the zero, but won't change the
906  // sign. For example, (X & ~4) + 1.
907 
908  // TODO: Implement.
909 
910  return false;
911 }
912 
914  bool Changed = SimplifyAssociativeOrCommutative(I);
915  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
916 
917  if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
918  I.hasNoUnsignedWrap(), TD))
919  return ReplaceInstUsesWith(I, V);
920 
921  // (A*B)+(A*C) -> A*(B+C) etc
922  if (Value *V = SimplifyUsingDistributiveLaws(I))
923  return ReplaceInstUsesWith(I, V);
924 
925  if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
926  // X + (signbit) --> X ^ signbit
927  const APInt &Val = CI->getValue();
928  if (Val.isSignBit())
929  return BinaryOperator::CreateXor(LHS, RHS);
930 
931  // See if SimplifyDemandedBits can simplify this. This handles stuff like
932  // (X & 254)+1 -> (X&254)|1
933  if (SimplifyDemandedInstructionBits(I))
934  return &I;
935 
936  // zext(bool) + C -> bool ? C + 1 : C
937  if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
938  if (ZI->getSrcTy()->isIntegerTy(1))
939  return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
940 
941  Value *XorLHS = 0; ConstantInt *XorRHS = 0;
942  if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
943  uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
944  const APInt &RHSVal = CI->getValue();
945  unsigned ExtendAmt = 0;
946  // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
947  // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
948  if (XorRHS->getValue() == -RHSVal) {
949  if (RHSVal.isPowerOf2())
950  ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
951  else if (XorRHS->getValue().isPowerOf2())
952  ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
953  }
954 
955  if (ExtendAmt) {
956  APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
957  if (!MaskedValueIsZero(XorLHS, Mask))
958  ExtendAmt = 0;
959  }
960 
961  if (ExtendAmt) {
962  Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
963  Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
964  return BinaryOperator::CreateAShr(NewShl, ShAmt);
965  }
966 
967  // If this is a xor that was canonicalized from a sub, turn it back into
968  // a sub and fuse this add with it.
969  if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
970  IntegerType *IT = cast<IntegerType>(I.getType());
971  APInt LHSKnownOne(IT->getBitWidth(), 0);
972  APInt LHSKnownZero(IT->getBitWidth(), 0);
973  ComputeMaskedBits(XorLHS, LHSKnownZero, LHSKnownOne);
974  if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
975  return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
976  XorLHS);
977  }
978  // (X + signbit) + C could have gotten canonicalized to (X ^ signbit) + C,
979  // transform them into (X + (signbit ^ C))
980  if (XorRHS->getValue().isSignBit())
981  return BinaryOperator::CreateAdd(XorLHS,
982  ConstantExpr::getXor(XorRHS, CI));
983  }
984  }
985 
986  if (isa<Constant>(RHS) && isa<PHINode>(LHS))
987  if (Instruction *NV = FoldOpIntoPhi(I))
988  return NV;
989 
990  if (I.getType()->isIntegerTy(1))
991  return BinaryOperator::CreateXor(LHS, RHS);
992 
993  // X + X --> X << 1
994  if (LHS == RHS) {
995  BinaryOperator *New =
996  BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
999  return New;
1000  }
1001 
1002  // -A + B --> B - A
1003  // -A + -B --> -(A + B)
1004  if (Value *LHSV = dyn_castNegVal(LHS)) {
1005  if (!isa<Constant>(RHS))
1006  if (Value *RHSV = dyn_castNegVal(RHS)) {
1007  Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
1008  return BinaryOperator::CreateNeg(NewAdd);
1009  }
1010 
1011  return BinaryOperator::CreateSub(RHS, LHSV);
1012  }
1013 
1014  // A + -B --> A - B
1015  if (!isa<Constant>(RHS))
1016  if (Value *V = dyn_castNegVal(RHS))
1017  return BinaryOperator::CreateSub(LHS, V);
1018 
1019 
1020  ConstantInt *C2;
1021  if (Value *X = dyn_castFoldableMul(LHS, C2)) {
1022  if (X == RHS) // X*C + X --> X * (C+1)
1023  return BinaryOperator::CreateMul(RHS, AddOne(C2));
1024 
1025  // X*C1 + X*C2 --> X * (C1+C2)
1026  ConstantInt *C1;
1027  if (X == dyn_castFoldableMul(RHS, C1))
1028  return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
1029  }
1030 
1031  // X + X*C --> X * (C+1)
1032  if (dyn_castFoldableMul(RHS, C2) == LHS)
1033  return BinaryOperator::CreateMul(LHS, AddOne(C2));
1034 
1035  // A+B --> A|B iff A and B have no bits set in common.
1036  if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
1037  APInt LHSKnownOne(IT->getBitWidth(), 0);
1038  APInt LHSKnownZero(IT->getBitWidth(), 0);
1039  ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
1040  if (LHSKnownZero != 0) {
1041  APInt RHSKnownOne(IT->getBitWidth(), 0);
1042  APInt RHSKnownZero(IT->getBitWidth(), 0);
1043  ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
1044 
1045  // No bits in common -> bitwise or.
1046  if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
1047  return BinaryOperator::CreateOr(LHS, RHS);
1048  }
1049  }
1050 
1051  // W*X + Y*Z --> W * (X+Z) iff W == Y
1052  {
1053  Value *W, *X, *Y, *Z;
1054  if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
1055  match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
1056  if (W != Y) {
1057  if (W == Z) {
1058  std::swap(Y, Z);
1059  } else if (Y == X) {
1060  std::swap(W, X);
1061  } else if (X == Z) {
1062  std::swap(Y, Z);
1063  std::swap(W, X);
1064  }
1065  }
1066 
1067  if (W == Y) {
1068  Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
1069  return BinaryOperator::CreateMul(W, NewAdd);
1070  }
1071  }
1072  }
1073 
1074  if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1075  Value *X = 0;
1076  if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
1077  return BinaryOperator::CreateSub(SubOne(CRHS), X);
1078 
1079  // (X & FF00) + xx00 -> (X+xx00) & FF00
1080  if (LHS->hasOneUse() &&
1081  match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1082  CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1083  // See if all bits from the first bit set in the Add RHS up are included
1084  // in the mask. First, get the rightmost bit.
1085  const APInt &AddRHSV = CRHS->getValue();
1086 
1087  // Form a mask of all bits from the lowest bit added through the top.
1088  APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1089 
1090  // See if the and mask includes all of these bits.
1091  APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1092 
1093  if (AddRHSHighBits == AddRHSHighBitsAnd) {
1094  // Okay, the xform is safe. Insert the new add pronto.
1095  Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
1096  return BinaryOperator::CreateAnd(NewAdd, C2);
1097  }
1098  }
1099 
1100  // Try to fold constant add into select arguments.
1101  if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
1102  if (Instruction *R = FoldOpIntoSelect(I, SI))
1103  return R;
1104  }
1105 
1106  // add (select X 0 (sub n A)) A --> select X A n
1107  {
1108  SelectInst *SI = dyn_cast<SelectInst>(LHS);
1109  Value *A = RHS;
1110  if (!SI) {
1111  SI = dyn_cast<SelectInst>(RHS);
1112  A = LHS;
1113  }
1114  if (SI && SI->hasOneUse()) {
1115  Value *TV = SI->getTrueValue();
1116  Value *FV = SI->getFalseValue();
1117  Value *N;
1118 
1119  // Can we fold the add into the argument of the select?
1120  // We check both true and false select arguments for a matching subtract.
1121  if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1122  // Fold the add into the true select value.
1123  return SelectInst::Create(SI->getCondition(), N, A);
1124 
1125  if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1126  // Fold the add into the false select value.
1127  return SelectInst::Create(SI->getCondition(), A, N);
1128  }
1129  }
1130 
1131  // Check for (add (sext x), y), see if we can merge this into an
1132  // integer add followed by a sext.
1133  if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1134  // (add (sext x), cst) --> (sext (add x, cst'))
1135  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1136  Constant *CI =
1137  ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1138  if (LHSConv->hasOneUse() &&
1139  ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
1140  WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
1141  // Insert the new, smaller add.
1142  Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1143  CI, "addconv");
1144  return new SExtInst(NewAdd, I.getType());
1145  }
1146  }
1147 
1148  // (add (sext x), (sext y)) --> (sext (add int x, y))
1149  if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1150  // Only do this if x/y have the same type, if at last one of them has a
1151  // single use (so we don't increase the number of sexts), and if the
1152  // integer add will not overflow.
1153  if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
1154  (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1155  WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1156  RHSConv->getOperand(0))) {
1157  // Insert the new integer add.
1158  Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1159  RHSConv->getOperand(0), "addconv");
1160  return new SExtInst(NewAdd, I.getType());
1161  }
1162  }
1163  }
1164 
1165  // Check for (x & y) + (x ^ y)
1166  {
1167  Value *A = 0, *B = 0;
1168  if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
1169  (match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
1170  match(LHS, m_And(m_Specific(B), m_Specific(A)))))
1171  return BinaryOperator::CreateOr(A, B);
1172 
1173  if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
1174  (match(RHS, m_And(m_Specific(A), m_Specific(B))) ||
1175  match(RHS, m_And(m_Specific(B), m_Specific(A)))))
1176  return BinaryOperator::CreateOr(A, B);
1177  }
1178 
1179  return Changed ? &I : 0;
1180 }
1181 
1183  bool Changed = SimplifyAssociativeOrCommutative(I);
1184  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1185 
1186  if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), TD))
1187  return ReplaceInstUsesWith(I, V);
1188 
1189  if (isa<Constant>(RHS)) {
1190  if (isa<PHINode>(LHS))
1191  if (Instruction *NV = FoldOpIntoPhi(I))
1192  return NV;
1193 
1194  if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
1195  if (Instruction *NV = FoldOpIntoSelect(I, SI))
1196  return NV;
1197  }
1198 
1199  // -A + B --> B - A
1200  // -A + -B --> -(A + B)
1201  if (Value *LHSV = dyn_castFNegVal(LHS))
1202  return BinaryOperator::CreateFSub(RHS, LHSV);
1203 
1204  // A + -B --> A - B
1205  if (!isa<Constant>(RHS))
1206  if (Value *V = dyn_castFNegVal(RHS))
1207  return BinaryOperator::CreateFSub(LHS, V);
1208 
1209  // Check for (fadd double (sitofp x), y), see if we can merge this into an
1210  // integer add followed by a promotion.
1211  if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1212  // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1213  // ... if the constant fits in the integer value. This is useful for things
1214  // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1215  // requires a constant pool load, and generally allows the add to be better
1216  // instcombined.
1217  if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
1218  Constant *CI =
1219  ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
1220  if (LHSConv->hasOneUse() &&
1221  ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1222  WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
1223  // Insert the new integer add.
1224  Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1225  CI, "addconv");
1226  return new SIToFPInst(NewAdd, I.getType());
1227  }
1228  }
1229 
1230  // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1231  if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1232  // Only do this if x/y have the same type, if at last one of them has a
1233  // single use (so we don't increase the number of int->fp conversions),
1234  // and if the integer add will not overflow.
1235  if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
1236  (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1237  WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1238  RHSConv->getOperand(0))) {
1239  // Insert the new integer add.
1240  Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1241  RHSConv->getOperand(0),"addconv");
1242  return new SIToFPInst(NewAdd, I.getType());
1243  }
1244  }
1245  }
1246 
1247  // select C, 0, B + select C, A, 0 -> select C, A, B
1248  {
1249  Value *A1, *B1, *C1, *A2, *B2, *C2;
1250  if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
1251  match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
1252  if (C1 == C2) {
1253  Constant *Z1=0, *Z2=0;
1254  Value *A, *B, *C=C1;
1255  if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
1256  Z1 = dyn_cast<Constant>(A1); A = A2;
1257  Z2 = dyn_cast<Constant>(B2); B = B1;
1258  } else if (match(B1, m_AnyZero()) && match(A2, m_AnyZero())) {
1259  Z1 = dyn_cast<Constant>(B1); B = B2;
1260  Z2 = dyn_cast<Constant>(A2); A = A1;
1261  }
1262 
1263  if (Z1 && Z2 &&
1264  (I.hasNoSignedZeros() ||
1265  (Z1->isNegativeZeroValue() && Z2->isNegativeZeroValue()))) {
1266  return SelectInst::Create(C, A, B);
1267  }
1268  }
1269  }
1270  }
1271 
1272  if (I.hasUnsafeAlgebra()) {
1273  if (Value *V = FAddCombine(Builder).simplify(&I))
1274  return ReplaceInstUsesWith(I, V);
1275  }
1276 
1277  return Changed ? &I : 0;
1278 }
1279 
1280 
1281 /// Optimize pointer differences into the same array into a size. Consider:
1282 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1283 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1284 ///
1286  Type *Ty) {
1287  assert(TD && "Must have target data info for this");
1288 
1289  // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1290  // this.
1291  bool Swapped = false;
1292  GEPOperator *GEP1 = 0, *GEP2 = 0;
1293 
1294  // For now we require one side to be the base pointer "A" or a constant
1295  // GEP derived from it.
1296  if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1297  // (gep X, ...) - X
1298  if (LHSGEP->getOperand(0) == RHS) {
1299  GEP1 = LHSGEP;
1300  Swapped = false;
1301  } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1302  // (gep X, ...) - (gep X, ...)
1303  if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1304  RHSGEP->getOperand(0)->stripPointerCasts()) {
1305  GEP2 = RHSGEP;
1306  GEP1 = LHSGEP;
1307  Swapped = false;
1308  }
1309  }
1310  }
1311 
1312  if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1313  // X - (gep X, ...)
1314  if (RHSGEP->getOperand(0) == LHS) {
1315  GEP1 = RHSGEP;
1316  Swapped = true;
1317  } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1318  // (gep X, ...) - (gep X, ...)
1319  if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1320  LHSGEP->getOperand(0)->stripPointerCasts()) {
1321  GEP2 = LHSGEP;
1322  GEP1 = RHSGEP;
1323  Swapped = true;
1324  }
1325  }
1326  }
1327 
1328  // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
1329  // multiple users.
1330  if (GEP1 == 0 ||
1331  (GEP2 != 0 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
1332  return 0;
1333 
1334  // Emit the offset of the GEP and an intptr_t.
1335  Value *Result = EmitGEPOffset(GEP1);
1336 
1337  // If we had a constant expression GEP on the other side offsetting the
1338  // pointer, subtract it from the offset we have.
1339  if (GEP2) {
1340  Value *Offset = EmitGEPOffset(GEP2);
1341  Result = Builder->CreateSub(Result, Offset);
1342  }
1343 
1344  // If we have p - gep(p, ...) then we have to negate the result.
1345  if (Swapped)
1346  Result = Builder->CreateNeg(Result, "diff.neg");
1347 
1348  return Builder->CreateIntCast(Result, Ty, true);
1349 }
1350 
1351 
1353  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1354 
1355  if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
1356  I.hasNoUnsignedWrap(), TD))
1357  return ReplaceInstUsesWith(I, V);
1358 
1359  // (A*B)-(A*C) -> A*(B-C) etc
1360  if (Value *V = SimplifyUsingDistributiveLaws(I))
1361  return ReplaceInstUsesWith(I, V);
1362 
1363  // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
1364  if (Value *V = dyn_castNegVal(Op1)) {
1365  BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1368  return Res;
1369  }
1370 
1371  if (I.getType()->isIntegerTy(1))
1372  return BinaryOperator::CreateXor(Op0, Op1);
1373 
1374  // Replace (-1 - A) with (~A).
1375  if (match(Op0, m_AllOnes()))
1376  return BinaryOperator::CreateNot(Op1);
1377 
1378  if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
1379  // C - ~X == X + (1+C)
1380  Value *X = 0;
1381  if (match(Op1, m_Not(m_Value(X))))
1382  return BinaryOperator::CreateAdd(X, AddOne(C));
1383 
1384  // -(X >>u 31) -> (X >>s 31)
1385  // -(X >>s 31) -> (X >>u 31)
1386  if (C->isZero()) {
1387  Value *X; ConstantInt *CI;
1388  if (match(Op1, m_LShr(m_Value(X), m_ConstantInt(CI))) &&
1389  // Verify we are shifting out everything but the sign bit.
1390  CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
1391  return BinaryOperator::CreateAShr(X, CI);
1392 
1393  if (match(Op1, m_AShr(m_Value(X), m_ConstantInt(CI))) &&
1394  // Verify we are shifting out everything but the sign bit.
1395  CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
1396  return BinaryOperator::CreateLShr(X, CI);
1397  }
1398 
1399  // Try to fold constant sub into select arguments.
1400  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1401  if (Instruction *R = FoldOpIntoSelect(I, SI))
1402  return R;
1403 
1404  // C-(X+C2) --> (C-C2)-X
1405  ConstantInt *C2;
1406  if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
1407  return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1408 
1409  if (SimplifyDemandedInstructionBits(I))
1410  return &I;
1411 
1412  // Fold (sub 0, (zext bool to B)) --> (sext bool to B)
1413  if (C->isZero() && match(Op1, m_ZExt(m_Value(X))))
1414  if (X->getType()->isIntegerTy(1))
1415  return CastInst::CreateSExtOrBitCast(X, Op1->getType());
1416 
1417  // Fold (sub 0, (sext bool to B)) --> (zext bool to B)
1418  if (C->isZero() && match(Op1, m_SExt(m_Value(X))))
1419  if (X->getType()->isIntegerTy(1))
1420  return CastInst::CreateZExtOrBitCast(X, Op1->getType());
1421  }
1422 
1423 
1424  { Value *Y;
1425  // X-(X+Y) == -Y X-(Y+X) == -Y
1426  if (match(Op1, m_Add(m_Specific(Op0), m_Value(Y))) ||
1427  match(Op1, m_Add(m_Value(Y), m_Specific(Op0))))
1428  return BinaryOperator::CreateNeg(Y);
1429 
1430  // (X-Y)-X == -Y
1431  if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1432  return BinaryOperator::CreateNeg(Y);
1433  }
1434 
1435  if (Op1->hasOneUse()) {
1436  Value *X = 0, *Y = 0, *Z = 0;
1437  Constant *C = 0;
1438  ConstantInt *CI = 0;
1439 
1440  // (X - (Y - Z)) --> (X + (Z - Y)).
1441  if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1442  return BinaryOperator::CreateAdd(Op0,
1443  Builder->CreateSub(Z, Y, Op1->getName()));
1444 
1445  // (X - (X & Y)) --> (X & ~Y)
1446  //
1447  if (match(Op1, m_And(m_Value(Y), m_Specific(Op0))) ||
1448  match(Op1, m_And(m_Specific(Op0), m_Value(Y))))
1449  return BinaryOperator::CreateAnd(Op0,
1450  Builder->CreateNot(Y, Y->getName() + ".not"));
1451 
1452  // 0 - (X sdiv C) -> (X sdiv -C)
1453  if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) &&
1454  match(Op0, m_Zero()))
1455  return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1456 
1457  // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1458  if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1459  if (Value *XNeg = dyn_castNegVal(X))
1460  return BinaryOperator::CreateShl(XNeg, Y);
1461 
1462  // X - X*C --> X * (1-C)
1463  if (match(Op1, m_Mul(m_Specific(Op0), m_ConstantInt(CI)))) {
1465  return BinaryOperator::CreateMul(Op0, CP1);
1466  }
1467 
1468  // X - X<<C --> X * (1-(1<<C))
1469  if (match(Op1, m_Shl(m_Specific(Op0), m_ConstantInt(CI)))) {
1470  Constant *One = ConstantInt::get(I.getType(), 1);
1471  C = ConstantExpr::getSub(One, ConstantExpr::getShl(One, CI));
1472  return BinaryOperator::CreateMul(Op0, C);
1473  }
1474 
1475  // X - A*-B -> X + A*B
1476  // X - -A*B -> X + A*B
1477  Value *A, *B;
1478  if (match(Op1, m_Mul(m_Value(A), m_Neg(m_Value(B)))) ||
1479  match(Op1, m_Mul(m_Neg(m_Value(A)), m_Value(B))))
1480  return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
1481 
1482  // X - A*CI -> X + A*-CI
1483  // X - CI*A -> X + A*-CI
1484  if (match(Op1, m_Mul(m_Value(A), m_ConstantInt(CI))) ||
1485  match(Op1, m_Mul(m_ConstantInt(CI), m_Value(A)))) {
1486  Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
1487  return BinaryOperator::CreateAdd(Op0, NewMul);
1488  }
1489  }
1490 
1491  ConstantInt *C1;
1492  if (Value *X = dyn_castFoldableMul(Op0, C1)) {
1493  if (X == Op1) // X*C - X --> X * (C-1)
1494  return BinaryOperator::CreateMul(Op1, SubOne(C1));
1495 
1496  ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
1497  if (X == dyn_castFoldableMul(Op1, C2))
1498  return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
1499  }
1500 
1501  // Optimize pointer differences into the same array into a size. Consider:
1502  // &A[10] - &A[0]: we should compile this to "10".
1503  if (TD) {
1504  Value *LHSOp, *RHSOp;
1505  if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1506  match(Op1, m_PtrToInt(m_Value(RHSOp))))
1507  if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1508  return ReplaceInstUsesWith(I, Res);
1509 
1510  // trunc(p)-trunc(q) -> trunc(p-q)
1511  if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1512  match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1513  if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1514  return ReplaceInstUsesWith(I, Res);
1515  }
1516 
1517  return 0;
1518 }
1519 
1521  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1522 
1523  if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), TD))
1524  return ReplaceInstUsesWith(I, V);
1525 
1526  if (isa<Constant>(Op0))
1527  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1528  if (Instruction *NV = FoldOpIntoSelect(I, SI))
1529  return NV;
1530 
1531  // If this is a 'B = x-(-A)', change to B = x+A, potentially looking
1532  // through FP extensions/truncations along the way.
1533  if (Value *V = dyn_castFNegVal(Op1)) {
1534  Instruction *NewI = BinaryOperator::CreateFAdd(Op0, V);
1535  NewI->copyFastMathFlags(&I);
1536  return NewI;
1537  }
1538  if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
1539  if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
1540  Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType());
1541  Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
1542  NewI->copyFastMathFlags(&I);
1543  return NewI;
1544  }
1545  } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
1546  if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
1547  Value *NewExt = Builder->CreateFPExt(V, I.getType());
1548  Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
1549  NewI->copyFastMathFlags(&I);
1550  return NewI;
1551  }
1552  }
1553 
1554  if (I.hasUnsafeAlgebra()) {
1555  if (Value *V = FAddCombine(Builder).simplify(&I))
1556  return ReplaceInstUsesWith(I, V);
1557  }
1558 
1559  return 0;
1560 }
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:467
std::string & operator+=(std::string &buffer, StringRef string)
Definition: StringRef.h:544
class_match< Value > m_Value()
m_Value() - Match an arbitrary value and ignore it.
Definition: PatternMatch.h:70
void setHasNoSignedWrap(bool b=true)
static Constant * getSIToFP(Constant *C, Type *Ty)
Definition: Constants.cpp:1604
void setFastMathFlags(FastMathFlags FMF)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:407
unsigned getScalarSizeInBits()
Definition: Type.cpp:135
match_zero m_Zero()
Definition: PatternMatch.h:137
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=0)
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
This class represents zero extension of integer types.
Value * SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout *TD=0, unsigned Depth=0)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:419
class_match< Constant > m_Constant()
Definition: PatternMatch.h:78
Value * EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP, bool NoAssumptions=false)
Definition: Local.h:187
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:497
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
static Value * dyn_castFoldableMul(Value *V, ConstantInt *&CST)
Instruction * visitFSub(BinaryOperator &I)
FastMathFlags getFastMathFlags() const
F(f)
This class represents a sign extension of integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:61
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2040
void setDebugLoc(const DebugLoc &Loc)
setDebugLoc - Set the debug location information for this instruction.
Definition: Instruction.h:175
StringRef getName() const
Definition: Value.cpp:167
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2029
bool isNegativeZeroValue() const
Definition: Constants.cpp:45
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:42
static unsigned getBitWidth(Type *Ty, const DataLayout *TD)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:479
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT,"arm-default-it","Generate IT block based on arch"), clEnumValN(RestrictedIT,"arm-restrict-it","Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT,"arm-no-restrict-it","Allow IT blocks based on ARMv7"), clEnumValEnd))
const APInt & getValue() const
Return the constant's value.
Definition: Constants.h:105
void setHasNoUnsignedWrap(bool b=true)
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
m_Trunc
Definition: PatternMatch.h:678
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:421
#define false
Definition: ConvertUTF.c:64
not_match< LHS > m_Not(const LHS &L)
Definition: PatternMatch.h:738
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:395
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:250
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
m_ZExt
Definition: PatternMatch.h:692
#define T
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
Get the constant's value with a saturation limit.
Definition: Constants.h:218
bool hasUnsafeAlgebra() const
Determine whether the unsafe-algebra flag is set.
class_match< ConstantInt > m_ConstantInt()
m_ConstantInt() - Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:72
SelectClass_match< Cond, LHS, RHS > m_Select(const Cond &C, const LHS &L, const RHS &R)
Definition: PatternMatch.h:630
bool isZero() const
isZero - Return true if the value is positive or negative zero.
Definition: Constants.h:266
void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout *TD=0, unsigned Depth=0)
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:510
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty)
#define P(N)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:491
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
Definition: PatternMatch.h:437
bool isVectorTy() const
Definition: Type.h:229
LLVM Constant Representation.
Definition: Constant.h:41
const Value * getCondition() const
cst_pred_ty< is_all_ones > m_AllOnes()
m_AllOnes() - Match an integer or vector with all bits set to true.
Definition: PatternMatch.h:265
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:476
specificval_ty m_Specific(const Value *V)
m_Specific - Match if we have a specific specified value.
Definition: PatternMatch.h:323
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:485
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=0)
Value * getOperand(unsigned i) const
Definition: User.h:88
void changeSign()
Definition: APFloat.cpp:1589
Integer representation type.
Definition: DerivedTypes.h:37
Instruction * visitFAdd(BinaryOperator &I)
match_combine_or< match_zero, match_neg_zero > m_AnyZero()
Definition: PatternMatch.h:157
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=0)
Create a ZExt or BitCast cast instruction.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:517
bool hasNoSignedWrap() const
hasNoSignedWrap - Determine whether the no signed wrap flag is set.
const Value * getTrueValue() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:390
neg_match< LHS > m_Neg(const LHS &L)
m_Neg - Match an integer negate.
Definition: PatternMatch.h:764
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
m_SExt
Definition: PatternMatch.h:685
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=0)
Create a SExt or BitCast cast instruction.
static bool isZero(Value *V, DataLayout *DL)
Definition: Lint.cpp:507
roundingMode
IEEE-754R 4.3: Rounding-direction attributes.
Definition: APFloat.h:155
Class for constant integers.
Definition: Constants.h:51
opStatus add(const APFloat &, roundingMode)
Definition: APFloat.cpp:1642
unsigned logBase2() const
Definition: APInt.h:1500
Type * getType() const
Definition: Value.h:111
opStatus multiply(const APFloat &, roundingMode)
Definition: APFloat.cpp:1656
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
Definition: Constants.cpp:492
static Constant * getTrunc(Constant *C, Type *Ty)
Definition: Constants.cpp:1527
void copyFastMathFlags(const Instruction *I)
Copy I's fast-math flags.
static Constant * get(Type *Ty, double V)
Definition: Constants.cpp:557
static Constant * getFPToSI(Constant *C, Type *Ty)
Definition: Constants.cpp:1626
static Constant * SubOne(ConstantInt *C)
SubOne - Subtract one from a ConstantInt.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:591
Class for arbitrary precision integers.
Definition: APInt.h:75
bool isIntegerTy() const
Definition: Type.h:196
CastClass_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
m_PtrToInt
Definition: PatternMatch.h:671
This union template exposes a suitably aligned and sized character array member which can hold elemen...
Definition: AlignOf.h:199
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2010
static Constant * getSExt(Constant *C, Type *Ty)
Definition: Constants.cpp:1541
Instruction * visitAdd(BinaryOperator &I)
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
bool hasOneUse() const
Definition: Value.h:161
static Constant * AddOne(Constant *C)
AddOne - Add one to a ConstantInt.
bool isSignBit() const
Check if the APInt's value is returned by getSignBit.
Definition: APInt.h:399
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2100
bool hasNoSignedZeros() const
Determine whether the no-signed-zeros flag is set.
unsigned getPrimitiveSizeInBits() const
Definition: Type.cpp:117
unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD=0, unsigned Depth=0)
const APFloat & getValueAPF() const
Definition: Constants.h:263
This class represents a cast from signed integer to floating point.
This class represents a truncation of floating point types.
LLVM Value Representation.
Definition: Value.h:66
bool hasNoUnsignedWrap() const
hasNoUnsignedWrap - Determine whether the no unsigned wrap flag is set.
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:83
bool isZero() const
Returns true if and only if the float is plus or minus zero.
Definition: APFloat.h:376
bool isInt(int64_t x)
isInt - Checks if an integer fits into the given bit width.
Definition: MathExtras.h:263
const Value * getFalseValue() const
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
loop simplify
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=0)
This class represents an extension of floating point types.
const fltSemantics & getSemantics() const
Definition: APFloat.h:397
opStatus subtract(const APFloat &, roundingMode)
Definition: APFloat.cpp:1649
static RegisterPass< NVPTXAllocaHoisting > X("alloca-hoisting","Hoisting alloca instructions in non-entry ""blocks to the entry block")
Instruction * visitSub(BinaryOperator &I)
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
bool isNormal() const
Definition: APFloat.h:367
Value * SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout *TD=0, const TargetLibraryInfo *TLI=0, const DominatorTree *DT=0)
static Constant * getXor(Constant *C1, Constant *C2)
Definition: Constants.cpp:2096