LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
SROA.cpp
Go to the documentation of this file.
1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This transformation implements the well known scalar replacement of
11 /// aggregates transformation. It tries to identify promotable elements of an
12 /// aggregate alloca, and promote them to registers. It will also try to
13 /// convert uses of an element (or set of elements) of an alloca into a vector
14 /// or bitfield-style integer scalar if appropriate.
15 ///
16 /// It works to do this with minimal slicing of the alloca so that regions
17 /// which are merely transferred in and out of external memory remain unchanged
18 /// and are not decomposed to scalar code.
19 ///
20 /// Because this also performs alloca promotion, it can be thought of as also
21 /// serving the purpose of SSA formation. The algorithm iterates on the
22 /// function until all opportunities for promotion have been realized.
23 ///
24 //===----------------------------------------------------------------------===//
25 
26 #define DEBUG_TYPE "sroa"
27 #include "llvm/Transforms/Scalar.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetVector.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
33 #include "llvm/Analysis/Loads.h"
36 #include "llvm/DIBuilder.h"
37 #include "llvm/DebugInfo.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/InstVisitor.h"
48 #include "llvm/Pass.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
58 using namespace llvm;
59 
60 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
61 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
62 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
63 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
64 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
65 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
66 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
67 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
68 STATISTIC(NumDeleted, "Number of instructions deleted");
69 STATISTIC(NumVectorized, "Number of vectorized aggregates");
70 
71 /// Hidden option to force the pass to not use DomTree and mem2reg, instead
72 /// forming SSA values through the SSAUpdater infrastructure.
73 static cl::opt<bool>
74 ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
75 
76 namespace {
77 /// \brief A custom IRBuilder inserter which prefixes all names if they are
78 /// preserved.
79 template <bool preserveNames = true>
80 class IRBuilderPrefixedInserter :
81  public IRBuilderDefaultInserter<preserveNames> {
82  std::string Prefix;
83 
84 public:
85  void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
86 
87 protected:
88  void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
89  BasicBlock::iterator InsertPt) const {
91  I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
92  }
93 };
94 
95 // Specialization for not preserving the name is trivial.
96 template <>
97 class IRBuilderPrefixedInserter<false> :
98  public IRBuilderDefaultInserter<false> {
99 public:
100  void SetNamePrefix(const Twine &P) {}
101 };
102 
103 /// \brief Provide a typedef for IRBuilder that drops names in release builds.
104 #ifndef NDEBUG
105 typedef llvm::IRBuilder<true, ConstantFolder,
106  IRBuilderPrefixedInserter<true> > IRBuilderTy;
107 #else
108 typedef llvm::IRBuilder<false, ConstantFolder,
109  IRBuilderPrefixedInserter<false> > IRBuilderTy;
110 #endif
111 }
112 
113 namespace {
114 /// \brief A used slice of an alloca.
115 ///
116 /// This structure represents a slice of an alloca used by some instruction. It
117 /// stores both the begin and end offsets of this use, a pointer to the use
118 /// itself, and a flag indicating whether we can classify the use as splittable
119 /// or not when forming partitions of the alloca.
120 class Slice {
121  /// \brief The beginning offset of the range.
122  uint64_t BeginOffset;
123 
124  /// \brief The ending offset, not included in the range.
125  uint64_t EndOffset;
126 
127  /// \brief Storage for both the use of this slice and whether it can be
128  /// split.
129  PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
130 
131 public:
132  Slice() : BeginOffset(), EndOffset() {}
133  Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
134  : BeginOffset(BeginOffset), EndOffset(EndOffset),
135  UseAndIsSplittable(U, IsSplittable) {}
136 
137  uint64_t beginOffset() const { return BeginOffset; }
138  uint64_t endOffset() const { return EndOffset; }
139 
140  bool isSplittable() const { return UseAndIsSplittable.getInt(); }
141  void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
142 
143  Use *getUse() const { return UseAndIsSplittable.getPointer(); }
144 
145  bool isDead() const { return getUse() == 0; }
146  void kill() { UseAndIsSplittable.setPointer(0); }
147 
148  /// \brief Support for ordering ranges.
149  ///
150  /// This provides an ordering over ranges such that start offsets are
151  /// always increasing, and within equal start offsets, the end offsets are
152  /// decreasing. Thus the spanning range comes first in a cluster with the
153  /// same start position.
154  bool operator<(const Slice &RHS) const {
155  if (beginOffset() < RHS.beginOffset()) return true;
156  if (beginOffset() > RHS.beginOffset()) return false;
157  if (isSplittable() != RHS.isSplittable()) return !isSplittable();
158  if (endOffset() > RHS.endOffset()) return true;
159  return false;
160  }
161 
162  /// \brief Support comparison with a single offset to allow binary searches.
163  friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
164  uint64_t RHSOffset) {
165  return LHS.beginOffset() < RHSOffset;
166  }
167  friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
168  const Slice &RHS) {
169  return LHSOffset < RHS.beginOffset();
170  }
171 
172  bool operator==(const Slice &RHS) const {
173  return isSplittable() == RHS.isSplittable() &&
174  beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
175  }
176  bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
177 };
178 } // end anonymous namespace
179 
180 namespace llvm {
181 template <typename T> struct isPodLike;
182 template <> struct isPodLike<Slice> {
183  static const bool value = true;
184 };
185 }
186 
187 namespace {
188 /// \brief Representation of the alloca slices.
189 ///
190 /// This class represents the slices of an alloca which are formed by its
191 /// various uses. If a pointer escapes, we can't fully build a representation
192 /// for the slices used and we reflect that in this structure. The uses are
193 /// stored, sorted by increasing beginning offset and with unsplittable slices
194 /// starting at a particular offset before splittable slices.
195 class AllocaSlices {
196 public:
197  /// \brief Construct the slices of a particular alloca.
198  AllocaSlices(const DataLayout &DL, AllocaInst &AI);
199 
200  /// \brief Test whether a pointer to the allocation escapes our analysis.
201  ///
202  /// If this is true, the slices are never fully built and should be
203  /// ignored.
204  bool isEscaped() const { return PointerEscapingInstr; }
205 
206  /// \brief Support for iterating over the slices.
207  /// @{
208  typedef SmallVectorImpl<Slice>::iterator iterator;
209  iterator begin() { return Slices.begin(); }
210  iterator end() { return Slices.end(); }
211 
212  typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
213  const_iterator begin() const { return Slices.begin(); }
214  const_iterator end() const { return Slices.end(); }
215  /// @}
216 
217  /// \brief Allow iterating the dead users for this alloca.
218  ///
219  /// These are instructions which will never actually use the alloca as they
220  /// are outside the allocated range. They are safe to replace with undef and
221  /// delete.
222  /// @{
223  typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
224  dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
225  dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
226  /// @}
227 
228  /// \brief Allow iterating the dead expressions referring to this alloca.
229  ///
230  /// These are operands which have cannot actually be used to refer to the
231  /// alloca as they are outside its range and the user doesn't correct for
232  /// that. These mostly consist of PHI node inputs and the like which we just
233  /// need to replace with undef.
234  /// @{
235  typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
236  dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
237  dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
238  /// @}
239 
240 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
241  void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
242  void printSlice(raw_ostream &OS, const_iterator I,
243  StringRef Indent = " ") const;
244  void printUse(raw_ostream &OS, const_iterator I,
245  StringRef Indent = " ") const;
246  void print(raw_ostream &OS) const;
247  void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
249 #endif
250 
251 private:
252  template <typename DerivedT, typename RetT = void> class BuilderBase;
253  class SliceBuilder;
254  friend class AllocaSlices::SliceBuilder;
255 
256 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
257  /// \brief Handle to alloca instruction to simplify method interfaces.
258  AllocaInst &AI;
259 #endif
260 
261  /// \brief The instruction responsible for this alloca not having a known set
262  /// of slices.
263  ///
264  /// When an instruction (potentially) escapes the pointer to the alloca, we
265  /// store a pointer to that here and abort trying to form slices of the
266  /// alloca. This will be null if the alloca slices are analyzed successfully.
267  Instruction *PointerEscapingInstr;
268 
269  /// \brief The slices of the alloca.
270  ///
271  /// We store a vector of the slices formed by uses of the alloca here. This
272  /// vector is sorted by increasing begin offset, and then the unsplittable
273  /// slices before the splittable ones. See the Slice inner class for more
274  /// details.
275  SmallVector<Slice, 8> Slices;
276 
277  /// \brief Instructions which will become dead if we rewrite the alloca.
278  ///
279  /// Note that these are not separated by slice. This is because we expect an
280  /// alloca to be completely rewritten or not rewritten at all. If rewritten,
281  /// all these instructions can simply be removed and replaced with undef as
282  /// they come from outside of the allocated space.
284 
285  /// \brief Operands which will become dead if we rewrite the alloca.
286  ///
287  /// These are operands that in their particular use can be replaced with
288  /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
289  /// to PHI nodes and the like. They aren't entirely dead (there might be
290  /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
291  /// want to swap this particular input for undef to simplify the use lists of
292  /// the alloca.
293  SmallVector<Use *, 8> DeadOperands;
294 };
295 }
296 
298  // If the condition being selected on is a constant or the same value is
299  // being selected between, fold the select. Yes this does (rarely) happen
300  // early on.
301  if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
302  return SI.getOperand(1+CI->isZero());
303  if (SI.getOperand(1) == SI.getOperand(2))
304  return SI.getOperand(1);
305 
306  return 0;
307 }
308 
309 /// \brief Builder for the alloca slices.
310 ///
311 /// This class builds a set of alloca slices by recursively visiting the uses
312 /// of an alloca and making a slice for each load and store at each offset.
313 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
315  friend class InstVisitor<SliceBuilder>;
317 
318  const uint64_t AllocSize;
319  AllocaSlices &S;
320 
321  SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
323 
324  /// \brief Set to de-duplicate dead instructions found in the use walk.
325  SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
326 
327 public:
328  SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
330  AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {}
331 
332 private:
333  void markAsDead(Instruction &I) {
334  if (VisitedDeadInsts.insert(&I))
335  S.DeadUsers.push_back(&I);
336  }
337 
338  void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
339  bool IsSplittable = false) {
340  // Completely skip uses which have a zero size or start either before or
341  // past the end of the allocation.
342  if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
343  DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
344  << " which has zero size or starts outside of the "
345  << AllocSize << " byte alloca:\n"
346  << " alloca: " << S.AI << "\n"
347  << " use: " << I << "\n");
348  return markAsDead(I);
349  }
350 
351  uint64_t BeginOffset = Offset.getZExtValue();
352  uint64_t EndOffset = BeginOffset + Size;
353 
354  // Clamp the end offset to the end of the allocation. Note that this is
355  // formulated to handle even the case where "BeginOffset + Size" overflows.
356  // This may appear superficially to be something we could ignore entirely,
357  // but that is not so! There may be widened loads or PHI-node uses where
358  // some instructions are dead but not others. We can't completely ignore
359  // them, and so have to record at least the information here.
360  assert(AllocSize >= BeginOffset); // Established above.
361  if (Size > AllocSize - BeginOffset) {
362  DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
363  << " to remain within the " << AllocSize << " byte alloca:\n"
364  << " alloca: " << S.AI << "\n"
365  << " use: " << I << "\n");
366  EndOffset = AllocSize;
367  }
368 
369  S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
370  }
371 
372  void visitBitCastInst(BitCastInst &BC) {
373  if (BC.use_empty())
374  return markAsDead(BC);
375 
376  return Base::visitBitCastInst(BC);
377  }
378 
379  void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
380  if (GEPI.use_empty())
381  return markAsDead(GEPI);
382 
383  return Base::visitGetElementPtrInst(GEPI);
384  }
385 
386  void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
387  uint64_t Size, bool IsVolatile) {
388  // We allow splitting of loads and stores where the type is an integer type
389  // and cover the entire alloca. This prevents us from splitting over
390  // eagerly.
391  // FIXME: In the great blue eventually, we should eagerly split all integer
392  // loads and stores, and then have a separate step that merges adjacent
393  // alloca partitions into a single partition suitable for integer widening.
394  // Or we should skip the merge step and rely on GVN and other passes to
395  // merge adjacent loads and stores that survive mem2reg.
396  bool IsSplittable =
397  Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
398 
399  insertUse(I, Offset, Size, IsSplittable);
400  }
401 
402  void visitLoadInst(LoadInst &LI) {
403  assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
404  "All simple FCA loads should have been pre-split");
405 
406  if (!IsOffsetKnown)
407  return PI.setAborted(&LI);
408 
409  uint64_t Size = DL.getTypeStoreSize(LI.getType());
410  return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
411  }
412 
413  void visitStoreInst(StoreInst &SI) {
414  Value *ValOp = SI.getValueOperand();
415  if (ValOp == *U)
416  return PI.setEscapedAndAborted(&SI);
417  if (!IsOffsetKnown)
418  return PI.setAborted(&SI);
419 
420  uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
421 
422  // If this memory access can be shown to *statically* extend outside the
423  // bounds of of the allocation, it's behavior is undefined, so simply
424  // ignore it. Note that this is more strict than the generic clamping
425  // behavior of insertUse. We also try to handle cases which might run the
426  // risk of overflow.
427  // FIXME: We should instead consider the pointer to have escaped if this
428  // function is being instrumented for addressing bugs or race conditions.
429  if (Offset.isNegative() || Size > AllocSize ||
430  Offset.ugt(AllocSize - Size)) {
431  DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
432  << " which extends past the end of the " << AllocSize
433  << " byte alloca:\n"
434  << " alloca: " << S.AI << "\n"
435  << " use: " << SI << "\n");
436  return markAsDead(SI);
437  }
438 
439  assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
440  "All simple FCA stores should have been pre-split");
441  handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
442  }
443 
444 
445  void visitMemSetInst(MemSetInst &II) {
446  assert(II.getRawDest() == *U && "Pointer use is not the destination?");
447  ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
448  if ((Length && Length->getValue() == 0) ||
449  (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
450  // Zero-length mem transfer intrinsics can be ignored entirely.
451  return markAsDead(II);
452 
453  if (!IsOffsetKnown)
454  return PI.setAborted(&II);
455 
456  insertUse(II, Offset,
457  Length ? Length->getLimitedValue()
458  : AllocSize - Offset.getLimitedValue(),
459  (bool)Length);
460  }
461 
462  void visitMemTransferInst(MemTransferInst &II) {
463  ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
464  if ((Length && Length->getValue() == 0) ||
465  (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
466  // Zero-length mem transfer intrinsics can be ignored entirely.
467  return markAsDead(II);
468 
469  if (!IsOffsetKnown)
470  return PI.setAborted(&II);
471 
472  uint64_t RawOffset = Offset.getLimitedValue();
473  uint64_t Size = Length ? Length->getLimitedValue()
474  : AllocSize - RawOffset;
475 
476  // Check for the special case where the same exact value is used for both
477  // source and dest.
478  if (*U == II.getRawDest() && *U == II.getRawSource()) {
479  // For non-volatile transfers this is a no-op.
480  if (!II.isVolatile())
481  return markAsDead(II);
482 
483  return insertUse(II, Offset, Size, /*IsSplittable=*/false);
484  }
485 
486  // If we have seen both source and destination for a mem transfer, then
487  // they both point to the same alloca.
488  bool Inserted;
490  llvm::tie(MTPI, Inserted) =
491  MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
492  unsigned PrevIdx = MTPI->second;
493  if (!Inserted) {
494  Slice &PrevP = S.Slices[PrevIdx];
495 
496  // Check if the begin offsets match and this is a non-volatile transfer.
497  // In that case, we can completely elide the transfer.
498  if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
499  PrevP.kill();
500  return markAsDead(II);
501  }
502 
503  // Otherwise we have an offset transfer within the same alloca. We can't
504  // split those.
505  PrevP.makeUnsplittable();
506  }
507 
508  // Insert the use now that we've fixed up the splittable nature.
509  insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
510 
511  // Check that we ended up with a valid index in the map.
512  assert(S.Slices[PrevIdx].getUse()->getUser() == &II &&
513  "Map index doesn't point back to a slice with this user.");
514  }
515 
516  // Disable SRoA for any intrinsics except for lifetime invariants.
517  // FIXME: What about debug intrinsics? This matches old behavior, but
518  // doesn't make sense.
519  void visitIntrinsicInst(IntrinsicInst &II) {
520  if (!IsOffsetKnown)
521  return PI.setAborted(&II);
522 
525  ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
526  uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
527  Length->getLimitedValue());
528  insertUse(II, Offset, Size, true);
529  return;
530  }
531 
532  Base::visitIntrinsicInst(II);
533  }
534 
535  Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
536  // We consider any PHI or select that results in a direct load or store of
537  // the same offset to be a viable use for slicing purposes. These uses
538  // are considered unsplittable and the size is the maximum loaded or stored
539  // size.
542  Visited.insert(Root);
543  Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
544  // If there are no loads or stores, the access is dead. We mark that as
545  // a size zero access.
546  Size = 0;
547  do {
548  Instruction *I, *UsedI;
549  llvm::tie(UsedI, I) = Uses.pop_back_val();
550 
551  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
552  Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
553  continue;
554  }
555  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
556  Value *Op = SI->getOperand(0);
557  if (Op == UsedI)
558  return SI;
559  Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
560  continue;
561  }
562 
563  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
564  if (!GEP->hasAllZeroIndices())
565  return GEP;
566  } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
567  !isa<SelectInst>(I)) {
568  return I;
569  }
570 
571  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
572  ++UI)
573  if (Visited.insert(cast<Instruction>(*UI)))
574  Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
575  } while (!Uses.empty());
576 
577  return 0;
578  }
579 
580  void visitPHINode(PHINode &PN) {
581  if (PN.use_empty())
582  return markAsDead(PN);
583  if (!IsOffsetKnown)
584  return PI.setAborted(&PN);
585 
586  // See if we already have computed info on this node.
587  uint64_t &PHISize = PHIOrSelectSizes[&PN];
588  if (!PHISize) {
589  // This is a new PHI node, check for an unsafe use of the PHI node.
590  if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize))
591  return PI.setAborted(UnsafeI);
592  }
593 
594  // For PHI and select operands outside the alloca, we can't nuke the entire
595  // phi or select -- the other side might still be relevant, so we special
596  // case them here and use a separate structure to track the operands
597  // themselves which should be replaced with undef.
598  // FIXME: This should instead be escaped in the event we're instrumenting
599  // for address sanitization.
600  if ((Offset.isNegative() && (-Offset).uge(PHISize)) ||
601  (!Offset.isNegative() && Offset.uge(AllocSize))) {
602  S.DeadOperands.push_back(U);
603  return;
604  }
605 
606  insertUse(PN, Offset, PHISize);
607  }
608 
609  void visitSelectInst(SelectInst &SI) {
610  if (SI.use_empty())
611  return markAsDead(SI);
612  if (Value *Result = foldSelectInst(SI)) {
613  if (Result == *U)
614  // If the result of the constant fold will be the pointer, recurse
615  // through the select as if we had RAUW'ed it.
616  enqueueUsers(SI);
617  else
618  // Otherwise the operand to the select is dead, and we can replace it
619  // with undef.
620  S.DeadOperands.push_back(U);
621 
622  return;
623  }
624  if (!IsOffsetKnown)
625  return PI.setAborted(&SI);
626 
627  // See if we already have computed info on this node.
628  uint64_t &SelectSize = PHIOrSelectSizes[&SI];
629  if (!SelectSize) {
630  // This is a new Select, check for an unsafe use of it.
631  if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize))
632  return PI.setAborted(UnsafeI);
633  }
634 
635  // For PHI and select operands outside the alloca, we can't nuke the entire
636  // phi or select -- the other side might still be relevant, so we special
637  // case them here and use a separate structure to track the operands
638  // themselves which should be replaced with undef.
639  // FIXME: This should instead be escaped in the event we're instrumenting
640  // for address sanitization.
641  if ((Offset.isNegative() && Offset.uge(SelectSize)) ||
642  (!Offset.isNegative() && Offset.uge(AllocSize))) {
643  S.DeadOperands.push_back(U);
644  return;
645  }
646 
647  insertUse(SI, Offset, SelectSize);
648  }
649 
650  /// \brief Disable SROA entirely if there are unhandled users of the alloca.
651  void visitInstruction(Instruction &I) {
652  PI.setAborted(&I);
653  }
654 };
655 
656 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
657  :
658 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
659  AI(AI),
660 #endif
661  PointerEscapingInstr(0) {
662  SliceBuilder PB(DL, AI, *this);
663  SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
664  if (PtrI.isEscaped() || PtrI.isAborted()) {
665  // FIXME: We should sink the escape vs. abort info into the caller nicely,
666  // possibly by just storing the PtrInfo in the AllocaSlices.
667  PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
668  : PtrI.getAbortingInst();
669  assert(PointerEscapingInstr && "Did not track a bad instruction");
670  return;
671  }
672 
673  Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
674  std::mem_fun_ref(&Slice::isDead)),
675  Slices.end());
676 
677  // Sort the uses. This arranges for the offsets to be in ascending order,
678  // and the sizes to be in descending order.
679  std::sort(Slices.begin(), Slices.end());
680 }
681 
682 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
683 
684 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
685  StringRef Indent) const {
686  printSlice(OS, I, Indent);
687  printUse(OS, I, Indent);
688 }
689 
690 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
691  StringRef Indent) const {
692  OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
693  << " slice #" << (I - begin())
694  << (I->isSplittable() ? " (splittable)" : "") << "\n";
695 }
696 
697 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
698  StringRef Indent) const {
699  OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
700 }
701 
702 void AllocaSlices::print(raw_ostream &OS) const {
703  if (PointerEscapingInstr) {
704  OS << "Can't analyze slices for alloca: " << AI << "\n"
705  << " A pointer to this alloca escaped by:\n"
706  << " " << *PointerEscapingInstr << "\n";
707  return;
708  }
709 
710  OS << "Slices of alloca: " << AI << "\n";
711  for (const_iterator I = begin(), E = end(); I != E; ++I)
712  print(OS, I);
713 }
714 
715 void AllocaSlices::dump(const_iterator I) const { print(dbgs(), I); }
716 void AllocaSlices::dump() const { print(dbgs()); }
717 
718 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
719 
720 namespace {
721 /// \brief Implementation of LoadAndStorePromoter for promoting allocas.
722 ///
723 /// This subclass of LoadAndStorePromoter adds overrides to handle promoting
724 /// the loads and stores of an alloca instruction, as well as updating its
725 /// debug information. This is used when a domtree is unavailable and thus
726 /// mem2reg in its full form can't be used to handle promotion of allocas to
727 /// scalar values.
728 class AllocaPromoter : public LoadAndStorePromoter {
729  AllocaInst &AI;
730  DIBuilder &DIB;
731 
734 
735 public:
736  AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
737  AllocaInst &AI, DIBuilder &DIB)
738  : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
739 
740  void run(const SmallVectorImpl<Instruction*> &Insts) {
741  // Retain the debug information attached to the alloca for use when
742  // rewriting loads and stores.
743  if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
744  for (Value::use_iterator UI = DebugNode->use_begin(),
745  UE = DebugNode->use_end();
746  UI != UE; ++UI)
747  if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
748  DDIs.push_back(DDI);
749  else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
750  DVIs.push_back(DVI);
751  }
752 
753  LoadAndStorePromoter::run(Insts);
754 
755  // While we have the debug information, clear it off of the alloca. The
756  // caller takes care of deleting the alloca.
757  while (!DDIs.empty())
758  DDIs.pop_back_val()->eraseFromParent();
759  while (!DVIs.empty())
760  DVIs.pop_back_val()->eraseFromParent();
761  }
762 
763  virtual bool isInstInList(Instruction *I,
764  const SmallVectorImpl<Instruction*> &Insts) const {
765  Value *Ptr;
766  if (LoadInst *LI = dyn_cast<LoadInst>(I))
767  Ptr = LI->getOperand(0);
768  else
769  Ptr = cast<StoreInst>(I)->getPointerOperand();
770 
771  // Only used to detect cycles, which will be rare and quickly found as
772  // we're walking up a chain of defs rather than down through uses.
773  SmallPtrSet<Value *, 4> Visited;
774 
775  do {
776  if (Ptr == &AI)
777  return true;
778 
779  if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr))
780  Ptr = BCI->getOperand(0);
781  else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
782  Ptr = GEPI->getPointerOperand();
783  else
784  return false;
785 
786  } while (Visited.insert(Ptr));
787 
788  return false;
789  }
790 
791  virtual void updateDebugInfo(Instruction *Inst) const {
793  E = DDIs.end(); I != E; ++I) {
794  DbgDeclareInst *DDI = *I;
795  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
796  ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
797  else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
798  ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
799  }
801  E = DVIs.end(); I != E; ++I) {
802  DbgValueInst *DVI = *I;
803  Value *Arg = 0;
804  if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
805  // If an argument is zero extended then use argument directly. The ZExt
806  // may be zapped by an optimization pass in future.
807  if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
808  Arg = dyn_cast<Argument>(ZExt->getOperand(0));
809  else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
810  Arg = dyn_cast<Argument>(SExt->getOperand(0));
811  if (!Arg)
812  Arg = SI->getValueOperand();
813  } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
814  Arg = LI->getPointerOperand();
815  } else {
816  continue;
817  }
818  Instruction *DbgVal =
819  DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
820  Inst);
821  DbgVal->setDebugLoc(DVI->getDebugLoc());
822  }
823  }
824 };
825 } // end anon namespace
826 
827 
828 namespace {
829 /// \brief An optimization pass providing Scalar Replacement of Aggregates.
830 ///
831 /// This pass takes allocations which can be completely analyzed (that is, they
832 /// don't escape) and tries to turn them into scalar SSA values. There are
833 /// a few steps to this process.
834 ///
835 /// 1) It takes allocations of aggregates and analyzes the ways in which they
836 /// are used to try to split them into smaller allocations, ideally of
837 /// a single scalar data type. It will split up memcpy and memset accesses
838 /// as necessary and try to isolate individual scalar accesses.
839 /// 2) It will transform accesses into forms which are suitable for SSA value
840 /// promotion. This can be replacing a memset with a scalar store of an
841 /// integer value, or it can involve speculating operations on a PHI or
842 /// select to be a PHI or select of the results.
843 /// 3) Finally, this will try to detect a pattern of accesses which map cleanly
844 /// onto insert and extract operations on a vector value, and convert them to
845 /// this form. By doing so, it will enable promotion of vector aggregates to
846 /// SSA vector values.
847 class SROA : public FunctionPass {
848  const bool RequiresDomTree;
849 
850  LLVMContext *C;
851  const DataLayout *DL;
852  DominatorTree *DT;
853 
854  /// \brief Worklist of alloca instructions to simplify.
855  ///
856  /// Each alloca in the function is added to this. Each new alloca formed gets
857  /// added to it as well to recursively simplify unless that alloca can be
858  /// directly promoted. Finally, each time we rewrite a use of an alloca other
859  /// the one being actively rewritten, we add it back onto the list if not
860  /// already present to ensure it is re-visited.
862 
863  /// \brief A collection of instructions to delete.
864  /// We try to batch deletions to simplify code and make things a bit more
865  /// efficient.
867 
868  /// \brief Post-promotion worklist.
869  ///
870  /// Sometimes we discover an alloca which has a high probability of becoming
871  /// viable for SROA after a round of promotion takes place. In those cases,
872  /// the alloca is enqueued here for re-processing.
873  ///
874  /// Note that we have to be very careful to clear allocas out of this list in
875  /// the event they are deleted.
877 
878  /// \brief A collection of alloca instructions we can directly promote.
879  std::vector<AllocaInst *> PromotableAllocas;
880 
881  /// \brief A worklist of PHIs to speculate prior to promoting allocas.
882  ///
883  /// All of these PHIs have been checked for the safety of speculation and by
884  /// being speculated will allow promoting allocas currently in the promotable
885  /// queue.
887 
888  /// \brief A worklist of select instructions to speculate prior to promoting
889  /// allocas.
890  ///
891  /// All of these select instructions have been checked for the safety of
892  /// speculation and by being speculated will allow promoting allocas
893  /// currently in the promotable queue.
895 
896 public:
897  SROA(bool RequiresDomTree = true)
898  : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
899  C(0), DL(0), DT(0) {
900  initializeSROAPass(*PassRegistry::getPassRegistry());
901  }
902  bool runOnFunction(Function &F);
903  void getAnalysisUsage(AnalysisUsage &AU) const;
904 
905  const char *getPassName() const { return "SROA"; }
906  static char ID;
907 
908 private:
909  friend class PHIOrSelectSpeculator;
910  friend class AllocaSliceRewriter;
911 
912  bool rewritePartition(AllocaInst &AI, AllocaSlices &S,
914  int64_t BeginOffset, int64_t EndOffset,
916  bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
917  bool runOnAlloca(AllocaInst &AI);
918  void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
919  bool promoteAllocas(Function &F);
920 };
921 }
922 
923 char SROA::ID = 0;
924 
925 FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
926  return new SROA(RequiresDomTree);
927 }
928 
929 INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
930  false, false)
932 INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
933  false, false)
934 
935 /// Walk the range of a partitioning looking for a common type to cover this
936 /// sequence of slices.
937 static Type *findCommonType(AllocaSlices::const_iterator B,
938  AllocaSlices::const_iterator E,
939  uint64_t EndOffset) {
940  Type *Ty = 0;
941  bool IgnoreNonIntegralTypes = false;
942  for (AllocaSlices::const_iterator I = B; I != E; ++I) {
943  Use *U = I->getUse();
944  if (isa<IntrinsicInst>(*U->getUser()))
945  continue;
946  if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
947  continue;
948 
949  Type *UserTy = 0;
950  if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
951  UserTy = LI->getType();
952  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
953  UserTy = SI->getValueOperand()->getType();
954  } else {
955  IgnoreNonIntegralTypes = true; // Give up on anything but an iN type.
956  continue;
957  }
958 
959  if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
960  // If the type is larger than the partition, skip it. We only encounter
961  // this for split integer operations where we want to use the type of the
962  // entity causing the split. Also skip if the type is not a byte width
963  // multiple.
964  if (ITy->getBitWidth() % 8 != 0 ||
965  ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
966  continue;
967 
968  // If we have found an integer type use covering the alloca, use that
969  // regardless of the other types, as integers are often used for
970  // a "bucket of bits" type.
971  //
972  // NB: This *must* be the only return from inside the loop so that the
973  // order of slices doesn't impact the computed type.
974  return ITy;
975  } else if (IgnoreNonIntegralTypes) {
976  continue;
977  }
978 
979  if (Ty && Ty != UserTy)
980  IgnoreNonIntegralTypes = true; // Give up on anything but an iN type.
981 
982  Ty = UserTy;
983  }
984  return Ty;
985 }
986 
987 /// PHI instructions that use an alloca and are subsequently loaded can be
988 /// rewritten to load both input pointers in the pred blocks and then PHI the
989 /// results, allowing the load of the alloca to be promoted.
990 /// From this:
991 /// %P2 = phi [i32* %Alloca, i32* %Other]
992 /// %V = load i32* %P2
993 /// to:
994 /// %V1 = load i32* %Alloca -> will be mem2reg'd
995 /// ...
996 /// %V2 = load i32* %Other
997 /// ...
998 /// %V = phi [i32 %V1, i32 %V2]
999 ///
1000 /// We can do this to a select if its only uses are loads and if the operands
1001 /// to the select can be loaded unconditionally.
1002 ///
1003 /// FIXME: This should be hoisted into a generic utility, likely in
1004 /// Transforms/Util/Local.h
1006  const DataLayout *DL = 0) {
1007  // For now, we can only do this promotion if the load is in the same block
1008  // as the PHI, and if there are no stores between the phi and load.
1009  // TODO: Allow recursive phi users.
1010  // TODO: Allow stores.
1011  BasicBlock *BB = PN.getParent();
1012  unsigned MaxAlign = 0;
1013  bool HaveLoad = false;
1014  for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
1015  ++UI) {
1016  LoadInst *LI = dyn_cast<LoadInst>(*UI);
1017  if (LI == 0 || !LI->isSimple())
1018  return false;
1019 
1020  // For now we only allow loads in the same block as the PHI. This is
1021  // a common case that happens when instcombine merges two loads through
1022  // a PHI.
1023  if (LI->getParent() != BB)
1024  return false;
1025 
1026  // Ensure that there are no instructions between the PHI and the load that
1027  // could store.
1028  for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
1029  if (BBI->mayWriteToMemory())
1030  return false;
1031 
1032  MaxAlign = std::max(MaxAlign, LI->getAlignment());
1033  HaveLoad = true;
1034  }
1035 
1036  if (!HaveLoad)
1037  return false;
1038 
1039  // We can only transform this if it is safe to push the loads into the
1040  // predecessor blocks. The only thing to watch out for is that we can't put
1041  // a possibly trapping load in the predecessor if it is a critical edge.
1042  for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1044  Value *InVal = PN.getIncomingValue(Idx);
1045 
1046  // If the value is produced by the terminator of the predecessor (an
1047  // invoke) or it has side-effects, there is no valid place to put a load
1048  // in the predecessor.
1049  if (TI == InVal || TI->mayHaveSideEffects())
1050  return false;
1051 
1052  // If the predecessor has a single successor, then the edge isn't
1053  // critical.
1054  if (TI->getNumSuccessors() == 1)
1055  continue;
1056 
1057  // If this pointer is always safe to load, or if we can prove that there
1058  // is already a load in the block, then we can move the load to the pred
1059  // block.
1060  if (InVal->isDereferenceablePointer() ||
1061  isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
1062  continue;
1063 
1064  return false;
1065  }
1066 
1067  return true;
1068 }
1069 
1070 static void speculatePHINodeLoads(PHINode &PN) {
1071  DEBUG(dbgs() << " original: " << PN << "\n");
1072 
1073  Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1074  IRBuilderTy PHIBuilder(&PN);
1075  PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1076  PN.getName() + ".sroa.speculated");
1077 
1078  // Get the TBAA tag and alignment to use from one of the loads. It doesn't
1079  // matter which one we get and if any differ.
1080  LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
1081  MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1082  unsigned Align = SomeLoad->getAlignment();
1083 
1084  // Rewrite all loads of the PN to use the new PHI.
1085  while (!PN.use_empty()) {
1086  LoadInst *LI = cast<LoadInst>(*PN.use_begin());
1087  LI->replaceAllUsesWith(NewPN);
1088  LI->eraseFromParent();
1089  }
1090 
1091  // Inject loads into all of the pred blocks.
1092  for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1093  BasicBlock *Pred = PN.getIncomingBlock(Idx);
1094  TerminatorInst *TI = Pred->getTerminator();
1095  Value *InVal = PN.getIncomingValue(Idx);
1096  IRBuilderTy PredBuilder(TI);
1097 
1098  LoadInst *Load = PredBuilder.CreateLoad(
1099  InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1100  ++NumLoadsSpeculated;
1101  Load->setAlignment(Align);
1102  if (TBAATag)
1103  Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1104  NewPN->addIncoming(Load, Pred);
1105  }
1106 
1107  DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1108  PN.eraseFromParent();
1109 }
1110 
1111 /// Select instructions that use an alloca and are subsequently loaded can be
1112 /// rewritten to load both input pointers and then select between the result,
1113 /// allowing the load of the alloca to be promoted.
1114 /// From this:
1115 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1116 /// %V = load i32* %P2
1117 /// to:
1118 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1119 /// %V2 = load i32* %Other
1120 /// %V = select i1 %cond, i32 %V1, i32 %V2
1121 ///
1122 /// We can do this to a select if its only uses are loads and if the operand
1123 /// to the select can be loaded unconditionally.
1124 static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
1125  Value *TValue = SI.getTrueValue();
1126  Value *FValue = SI.getFalseValue();
1127  bool TDerefable = TValue->isDereferenceablePointer();
1128  bool FDerefable = FValue->isDereferenceablePointer();
1129 
1130  for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
1131  ++UI) {
1132  LoadInst *LI = dyn_cast<LoadInst>(*UI);
1133  if (LI == 0 || !LI->isSimple())
1134  return false;
1135 
1136  // Both operands to the select need to be dereferencable, either
1137  // absolutely (e.g. allocas) or at this point because we can see other
1138  // accesses to it.
1139  if (!TDerefable &&
1140  !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
1141  return false;
1142  if (!FDerefable &&
1143  !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
1144  return false;
1145  }
1146 
1147  return true;
1148 }
1149 
1151  DEBUG(dbgs() << " original: " << SI << "\n");
1152 
1153  IRBuilderTy IRB(&SI);
1154  Value *TV = SI.getTrueValue();
1155  Value *FV = SI.getFalseValue();
1156  // Replace the loads of the select with a select of two loads.
1157  while (!SI.use_empty()) {
1158  LoadInst *LI = cast<LoadInst>(*SI.use_begin());
1159  assert(LI->isSimple() && "We only speculate simple loads");
1160 
1161  IRB.SetInsertPoint(LI);
1162  LoadInst *TL =
1163  IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
1164  LoadInst *FL =
1165  IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
1166  NumLoadsSpeculated += 2;
1167 
1168  // Transfer alignment and TBAA info if present.
1169  TL->setAlignment(LI->getAlignment());
1170  FL->setAlignment(LI->getAlignment());
1171  if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
1172  TL->setMetadata(LLVMContext::MD_tbaa, Tag);
1173  FL->setMetadata(LLVMContext::MD_tbaa, Tag);
1174  }
1175 
1176  Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1177  LI->getName() + ".sroa.speculated");
1178 
1179  DEBUG(dbgs() << " speculated to: " << *V << "\n");
1180  LI->replaceAllUsesWith(V);
1181  LI->eraseFromParent();
1182  }
1183  SI.eraseFromParent();
1184 }
1185 
1186 /// \brief Build a GEP out of a base pointer and indices.
1187 ///
1188 /// This will return the BasePtr if that is valid, or build a new GEP
1189 /// instruction using the IRBuilder if GEP-ing is needed.
1190 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1191  SmallVectorImpl<Value *> &Indices) {
1192  if (Indices.empty())
1193  return BasePtr;
1194 
1195  // A single zero index is a no-op, so check for this and avoid building a GEP
1196  // in that case.
1197  if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1198  return BasePtr;
1199 
1200  return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
1201 }
1202 
1203 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1204 /// TargetTy without changing the offset of the pointer.
1205 ///
1206 /// This routine assumes we've already established a properly offset GEP with
1207 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1208 /// zero-indices down through type layers until we find one the same as
1209 /// TargetTy. If we can't find one with the same type, we at least try to use
1210 /// one with the same size. If none of that works, we just produce the GEP as
1211 /// indicated by Indices to have the correct offset.
1212 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1213  Value *BasePtr, Type *Ty, Type *TargetTy,
1214  SmallVectorImpl<Value *> &Indices) {
1215  if (Ty == TargetTy)
1216  return buildGEP(IRB, BasePtr, Indices);
1217 
1218  // See if we can descend into a struct and locate a field with the correct
1219  // type.
1220  unsigned NumLayers = 0;
1221  Type *ElementTy = Ty;
1222  do {
1223  if (ElementTy->isPointerTy())
1224  break;
1225  if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
1226  ElementTy = SeqTy->getElementType();
1227  // Note that we use the default address space as this index is over an
1228  // array or a vector, not a pointer.
1229  Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0)));
1230  } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1231  if (STy->element_begin() == STy->element_end())
1232  break; // Nothing left to descend into.
1233  ElementTy = *STy->element_begin();
1234  Indices.push_back(IRB.getInt32(0));
1235  } else {
1236  break;
1237  }
1238  ++NumLayers;
1239  } while (ElementTy != TargetTy);
1240  if (ElementTy != TargetTy)
1241  Indices.erase(Indices.end() - NumLayers, Indices.end());
1242 
1243  return buildGEP(IRB, BasePtr, Indices);
1244 }
1245 
1246 /// \brief Recursively compute indices for a natural GEP.
1247 ///
1248 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1249 /// element types adding appropriate indices for the GEP.
1250 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
1251  Value *Ptr, Type *Ty, APInt &Offset,
1252  Type *TargetTy,
1253  SmallVectorImpl<Value *> &Indices) {
1254  if (Offset == 0)
1255  return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices);
1256 
1257  // We can't recurse through pointer types.
1258  if (Ty->isPointerTy())
1259  return 0;
1260 
1261  // We try to analyze GEPs over vectors here, but note that these GEPs are
1262  // extremely poorly defined currently. The long-term goal is to remove GEPing
1263  // over a vector from the IR completely.
1264  if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1265  unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
1266  if (ElementSizeInBits % 8)
1267  return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
1268  APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1269  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1270  if (NumSkippedElements.ugt(VecTy->getNumElements()))
1271  return 0;
1272  Offset -= NumSkippedElements * ElementSize;
1273  Indices.push_back(IRB.getInt(NumSkippedElements));
1274  return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
1275  Offset, TargetTy, Indices);
1276  }
1277 
1278  if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1279  Type *ElementTy = ArrTy->getElementType();
1280  APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1281  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1282  if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1283  return 0;
1284 
1285  Offset -= NumSkippedElements * ElementSize;
1286  Indices.push_back(IRB.getInt(NumSkippedElements));
1287  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1288  Indices);
1289  }
1290 
1291  StructType *STy = dyn_cast<StructType>(Ty);
1292  if (!STy)
1293  return 0;
1294 
1295  const StructLayout *SL = DL.getStructLayout(STy);
1296  uint64_t StructOffset = Offset.getZExtValue();
1297  if (StructOffset >= SL->getSizeInBytes())
1298  return 0;
1299  unsigned Index = SL->getElementContainingOffset(StructOffset);
1300  Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1301  Type *ElementTy = STy->getElementType(Index);
1302  if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
1303  return 0; // The offset points into alignment padding.
1304 
1305  Indices.push_back(IRB.getInt32(Index));
1306  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1307  Indices);
1308 }
1309 
1310 /// \brief Get a natural GEP from a base pointer to a particular offset and
1311 /// resulting in a particular type.
1312 ///
1313 /// The goal is to produce a "natural" looking GEP that works with the existing
1314 /// composite types to arrive at the appropriate offset and element type for
1315 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1316 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1317 /// Indices, and setting Ty to the result subtype.
1318 ///
1319 /// If no natural GEP can be constructed, this function returns null.
1320 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1321  Value *Ptr, APInt Offset, Type *TargetTy,
1322  SmallVectorImpl<Value *> &Indices) {
1323  PointerType *Ty = cast<PointerType>(Ptr->getType());
1324 
1325  // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1326  // an i8.
1327  if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
1328  return 0;
1329 
1330  Type *ElementTy = Ty->getElementType();
1331  if (!ElementTy->isSized())
1332  return 0; // We can't GEP through an unsized element.
1333  APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1334  if (ElementSize == 0)
1335  return 0; // Zero-length arrays can't help us build a natural GEP.
1336  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1337 
1338  Offset -= NumSkippedElements * ElementSize;
1339  Indices.push_back(IRB.getInt(NumSkippedElements));
1340  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1341  Indices);
1342 }
1343 
1344 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1345 /// resulting pointer has PointerTy.
1346 ///
1347 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1348 /// and produces the pointer type desired. Where it cannot, it will try to use
1349 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1350 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1351 /// bitcast to the type.
1352 ///
1353 /// The strategy for finding the more natural GEPs is to peel off layers of the
1354 /// pointer, walking back through bit casts and GEPs, searching for a base
1355 /// pointer from which we can compute a natural GEP with the desired
1356 /// properties. The algorithm tries to fold as many constant indices into
1357 /// a single GEP as possible, thus making each GEP more independent of the
1358 /// surrounding code.
1359 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
1360  Value *Ptr, APInt Offset, Type *PointerTy) {
1361  // Even though we don't look through PHI nodes, we could be called on an
1362  // instruction in an unreachable block, which may be on a cycle.
1363  SmallPtrSet<Value *, 4> Visited;
1364  Visited.insert(Ptr);
1365  SmallVector<Value *, 4> Indices;
1366 
1367  // We may end up computing an offset pointer that has the wrong type. If we
1368  // never are able to compute one directly that has the correct type, we'll
1369  // fall back to it, so keep it around here.
1370  Value *OffsetPtr = 0;
1371 
1372  // Remember any i8 pointer we come across to re-use if we need to do a raw
1373  // byte offset.
1374  Value *Int8Ptr = 0;
1375  APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1376 
1377  Type *TargetTy = PointerTy->getPointerElementType();
1378 
1379  do {
1380  // First fold any existing GEPs into the offset.
1381  while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1382  APInt GEPOffset(Offset.getBitWidth(), 0);
1383  if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1384  break;
1385  Offset += GEPOffset;
1386  Ptr = GEP->getPointerOperand();
1387  if (!Visited.insert(Ptr))
1388  break;
1389  }
1390 
1391  // See if we can perform a natural GEP here.
1392  Indices.clear();
1393  if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1394  Indices)) {
1395  if (P->getType() == PointerTy) {
1396  // Zap any offset pointer that we ended up computing in previous rounds.
1397  if (OffsetPtr && OffsetPtr->use_empty())
1398  if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
1399  I->eraseFromParent();
1400  return P;
1401  }
1402  if (!OffsetPtr) {
1403  OffsetPtr = P;
1404  }
1405  }
1406 
1407  // Stash this pointer if we've found an i8*.
1408  if (Ptr->getType()->isIntegerTy(8)) {
1409  Int8Ptr = Ptr;
1410  Int8PtrOffset = Offset;
1411  }
1412 
1413  // Peel off a layer of the pointer and update the offset appropriately.
1414  if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1415  Ptr = cast<Operator>(Ptr)->getOperand(0);
1416  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1417  if (GA->mayBeOverridden())
1418  break;
1419  Ptr = GA->getAliasee();
1420  } else {
1421  break;
1422  }
1423  assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1424  } while (Visited.insert(Ptr));
1425 
1426  if (!OffsetPtr) {
1427  if (!Int8Ptr) {
1428  Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
1429  "raw_cast");
1430  Int8PtrOffset = Offset;
1431  }
1432 
1433  OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
1434  IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
1435  "raw_idx");
1436  }
1437  Ptr = OffsetPtr;
1438 
1439  // On the off chance we were targeting i8*, guard the bitcast here.
1440  if (Ptr->getType() != PointerTy)
1441  Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
1442 
1443  return Ptr;
1444 }
1445 
1446 /// \brief Test whether we can convert a value from the old to the new type.
1447 ///
1448 /// This predicate should be used to guard calls to convertValue in order to
1449 /// ensure that we only try to convert viable values. The strategy is that we
1450 /// will peel off single element struct and array wrappings to get to an
1451 /// underlying value, and convert that value.
1452 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1453  if (OldTy == NewTy)
1454  return true;
1455  if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1456  if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
1457  if (NewITy->getBitWidth() >= OldITy->getBitWidth())
1458  return true;
1459  if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
1460  return false;
1461  if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1462  return false;
1463 
1464  // We can convert pointers to integers and vice-versa. Same for vectors
1465  // of pointers and integers.
1466  OldTy = OldTy->getScalarType();
1467  NewTy = NewTy->getScalarType();
1468  if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1469  if (NewTy->isPointerTy() && OldTy->isPointerTy())
1470  return true;
1471  if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
1472  return true;
1473  return false;
1474  }
1475 
1476  return true;
1477 }
1478 
1479 /// \brief Generic routine to convert an SSA value to a value of a different
1480 /// type.
1481 ///
1482 /// This will try various different casting techniques, such as bitcasts,
1483 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1484 /// two types for viability with this routine.
1485 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1486  Type *NewTy) {
1487  Type *OldTy = V->getType();
1488  assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1489 
1490  if (OldTy == NewTy)
1491  return V;
1492 
1493  if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1494  if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
1495  if (NewITy->getBitWidth() > OldITy->getBitWidth())
1496  return IRB.CreateZExt(V, NewITy);
1497 
1498  // See if we need inttoptr for this type pair. A cast involving both scalars
1499  // and vectors requires and additional bitcast.
1500  if (OldTy->getScalarType()->isIntegerTy() &&
1501  NewTy->getScalarType()->isPointerTy()) {
1502  // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1503  if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1504  return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1505  NewTy);
1506 
1507  // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1508  if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1509  return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1510  NewTy);
1511 
1512  return IRB.CreateIntToPtr(V, NewTy);
1513  }
1514 
1515  // See if we need ptrtoint for this type pair. A cast involving both scalars
1516  // and vectors requires and additional bitcast.
1517  if (OldTy->getScalarType()->isPointerTy() &&
1518  NewTy->getScalarType()->isIntegerTy()) {
1519  // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1520  if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1521  return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1522  NewTy);
1523 
1524  // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1525  if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1526  return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1527  NewTy);
1528 
1529  return IRB.CreatePtrToInt(V, NewTy);
1530  }
1531 
1532  return IRB.CreateBitCast(V, NewTy);
1533 }
1534 
1535 /// \brief Test whether the given slice use can be promoted to a vector.
1536 ///
1537 /// This function is called to test each entry in a partioning which is slated
1538 /// for a single slice.
1540  const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset,
1541  uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize,
1542  AllocaSlices::const_iterator I) {
1543  // First validate the slice offsets.
1544  uint64_t BeginOffset =
1545  std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset;
1546  uint64_t BeginIndex = BeginOffset / ElementSize;
1547  if (BeginIndex * ElementSize != BeginOffset ||
1548  BeginIndex >= Ty->getNumElements())
1549  return false;
1550  uint64_t EndOffset =
1551  std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset;
1552  uint64_t EndIndex = EndOffset / ElementSize;
1553  if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
1554  return false;
1555 
1556  assert(EndIndex > BeginIndex && "Empty vector!");
1557  uint64_t NumElements = EndIndex - BeginIndex;
1558  Type *SliceTy =
1559  (NumElements == 1) ? Ty->getElementType()
1560  : VectorType::get(Ty->getElementType(), NumElements);
1561 
1562  Type *SplitIntTy =
1563  Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1564 
1565  Use *U = I->getUse();
1566 
1567  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1568  if (MI->isVolatile())
1569  return false;
1570  if (!I->isSplittable())
1571  return false; // Skip any unsplittable intrinsics.
1572  } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
1573  // Disable vector promotion when there are loads or stores of an FCA.
1574  return false;
1575  } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1576  if (LI->isVolatile())
1577  return false;
1578  Type *LTy = LI->getType();
1579  if (SliceBeginOffset > I->beginOffset() ||
1580  SliceEndOffset < I->endOffset()) {
1581  assert(LTy->isIntegerTy());
1582  LTy = SplitIntTy;
1583  }
1584  if (!canConvertValue(DL, SliceTy, LTy))
1585  return false;
1586  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1587  if (SI->isVolatile())
1588  return false;
1589  Type *STy = SI->getValueOperand()->getType();
1590  if (SliceBeginOffset > I->beginOffset() ||
1591  SliceEndOffset < I->endOffset()) {
1592  assert(STy->isIntegerTy());
1593  STy = SplitIntTy;
1594  }
1595  if (!canConvertValue(DL, STy, SliceTy))
1596  return false;
1597  } else {
1598  return false;
1599  }
1600 
1601  return true;
1602 }
1603 
1604 /// \brief Test whether the given alloca partitioning and range of slices can be
1605 /// promoted to a vector.
1606 ///
1607 /// This is a quick test to check whether we can rewrite a particular alloca
1608 /// partition (and its newly formed alloca) into a vector alloca with only
1609 /// whole-vector loads and stores such that it could be promoted to a vector
1610 /// SSA value. We only can ensure this for a limited set of operations, and we
1611 /// don't want to do the rewrites unless we are confident that the result will
1612 /// be promotable, so we have an early test here.
1613 static bool
1614 isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S,
1615  uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
1616  AllocaSlices::const_iterator I,
1617  AllocaSlices::const_iterator E,
1619  VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
1620  if (!Ty)
1621  return false;
1622 
1623  uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType());
1624 
1625  // While the definition of LLVM vectors is bitpacked, we don't support sizes
1626  // that aren't byte sized.
1627  if (ElementSize % 8)
1628  return false;
1629  assert((DL.getTypeSizeInBits(Ty) % 8) == 0 &&
1630  "vector size not a multiple of element size?");
1631  ElementSize /= 8;
1632 
1633  for (; I != E; ++I)
1634  if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1635  SliceEndOffset, Ty, ElementSize, I))
1636  return false;
1637 
1639  SUE = SplitUses.end();
1640  SUI != SUE; ++SUI)
1641  if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1642  SliceEndOffset, Ty, ElementSize, *SUI))
1643  return false;
1644 
1645  return true;
1646 }
1647 
1648 /// \brief Test whether a slice of an alloca is valid for integer widening.
1649 ///
1650 /// This implements the necessary checking for the \c isIntegerWideningViable
1651 /// test below on a single slice of the alloca.
1653  Type *AllocaTy,
1654  uint64_t AllocBeginOffset,
1655  uint64_t Size, AllocaSlices &S,
1656  AllocaSlices::const_iterator I,
1657  bool &WholeAllocaOp) {
1658  uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
1659  uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
1660 
1661  // We can't reasonably handle cases where the load or store extends past
1662  // the end of the aloca's type and into its padding.
1663  if (RelEnd > Size)
1664  return false;
1665 
1666  Use *U = I->getUse();
1667 
1668  if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1669  if (LI->isVolatile())
1670  return false;
1671  if (RelBegin == 0 && RelEnd == Size)
1672  WholeAllocaOp = true;
1673  if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
1674  if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1675  return false;
1676  } else if (RelBegin != 0 || RelEnd != Size ||
1677  !canConvertValue(DL, AllocaTy, LI->getType())) {
1678  // Non-integer loads need to be convertible from the alloca type so that
1679  // they are promotable.
1680  return false;
1681  }
1682  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1683  Type *ValueTy = SI->getValueOperand()->getType();
1684  if (SI->isVolatile())
1685  return false;
1686  if (RelBegin == 0 && RelEnd == Size)
1687  WholeAllocaOp = true;
1688  if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
1689  if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1690  return false;
1691  } else if (RelBegin != 0 || RelEnd != Size ||
1692  !canConvertValue(DL, ValueTy, AllocaTy)) {
1693  // Non-integer stores need to be convertible to the alloca type so that
1694  // they are promotable.
1695  return false;
1696  }
1697  } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1698  if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
1699  return false;
1700  if (!I->isSplittable())
1701  return false; // Skip any unsplittable intrinsics.
1702  } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1705  return false;
1706  } else {
1707  return false;
1708  }
1709 
1710  return true;
1711 }
1712 
1713 /// \brief Test whether the given alloca partition's integer operations can be
1714 /// widened to promotable ones.
1715 ///
1716 /// This is a quick test to check whether we can rewrite the integer loads and
1717 /// stores to a particular alloca into wider loads and stores and be able to
1718 /// promote the resulting alloca.
1719 static bool
1721  uint64_t AllocBeginOffset, AllocaSlices &S,
1722  AllocaSlices::const_iterator I,
1723  AllocaSlices::const_iterator E,
1725  uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
1726  // Don't create integer types larger than the maximum bitwidth.
1727  if (SizeInBits > IntegerType::MAX_INT_BITS)
1728  return false;
1729 
1730  // Don't try to handle allocas with bit-padding.
1731  if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
1732  return false;
1733 
1734  // We need to ensure that an integer type with the appropriate bitwidth can
1735  // be converted to the alloca type, whatever that is. We don't want to force
1736  // the alloca itself to have an integer type if there is a more suitable one.
1737  Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
1738  if (!canConvertValue(DL, AllocaTy, IntTy) ||
1739  !canConvertValue(DL, IntTy, AllocaTy))
1740  return false;
1741 
1742  uint64_t Size = DL.getTypeStoreSize(AllocaTy);
1743 
1744  // While examining uses, we ensure that the alloca has a covering load or
1745  // store. We don't want to widen the integer operations only to fail to
1746  // promote due to some other unsplittable entry (which we may make splittable
1747  // later). However, if there are only splittable uses, go ahead and assume
1748  // that we cover the alloca.
1749  bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits);
1750 
1751  for (; I != E; ++I)
1752  if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1753  S, I, WholeAllocaOp))
1754  return false;
1755 
1757  SUE = SplitUses.end();
1758  SUI != SUE; ++SUI)
1759  if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1760  S, *SUI, WholeAllocaOp))
1761  return false;
1762 
1763  return WholeAllocaOp;
1764 }
1765 
1766 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1767  IntegerType *Ty, uint64_t Offset,
1768  const Twine &Name) {
1769  DEBUG(dbgs() << " start: " << *V << "\n");
1770  IntegerType *IntTy = cast<IntegerType>(V->getType());
1771  assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1772  "Element extends past full value");
1773  uint64_t ShAmt = 8*Offset;
1774  if (DL.isBigEndian())
1775  ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
1776  if (ShAmt) {
1777  V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
1778  DEBUG(dbgs() << " shifted: " << *V << "\n");
1779  }
1780  assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1781  "Cannot extract to a larger integer!");
1782  if (Ty != IntTy) {
1783  V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
1784  DEBUG(dbgs() << " trunced: " << *V << "\n");
1785  }
1786  return V;
1787 }
1788 
1789 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
1790  Value *V, uint64_t Offset, const Twine &Name) {
1791  IntegerType *IntTy = cast<IntegerType>(Old->getType());
1792  IntegerType *Ty = cast<IntegerType>(V->getType());
1793  assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1794  "Cannot insert a larger integer!");
1795  DEBUG(dbgs() << " start: " << *V << "\n");
1796  if (Ty != IntTy) {
1797  V = IRB.CreateZExt(V, IntTy, Name + ".ext");
1798  DEBUG(dbgs() << " extended: " << *V << "\n");
1799  }
1800  assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1801  "Element store outside of alloca store");
1802  uint64_t ShAmt = 8*Offset;
1803  if (DL.isBigEndian())
1804  ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
1805  if (ShAmt) {
1806  V = IRB.CreateShl(V, ShAmt, Name + ".shift");
1807  DEBUG(dbgs() << " shifted: " << *V << "\n");
1808  }
1809 
1810  if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
1811  APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
1812  Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
1813  DEBUG(dbgs() << " masked: " << *Old << "\n");
1814  V = IRB.CreateOr(Old, V, Name + ".insert");
1815  DEBUG(dbgs() << " inserted: " << *V << "\n");
1816  }
1817  return V;
1818 }
1819 
1820 static Value *extractVector(IRBuilderTy &IRB, Value *V,
1821  unsigned BeginIndex, unsigned EndIndex,
1822  const Twine &Name) {
1823  VectorType *VecTy = cast<VectorType>(V->getType());
1824  unsigned NumElements = EndIndex - BeginIndex;
1825  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
1826 
1827  if (NumElements == VecTy->getNumElements())
1828  return V;
1829 
1830  if (NumElements == 1) {
1831  V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
1832  Name + ".extract");
1833  DEBUG(dbgs() << " extract: " << *V << "\n");
1834  return V;
1835  }
1836 
1838  Mask.reserve(NumElements);
1839  for (unsigned i = BeginIndex; i != EndIndex; ++i)
1840  Mask.push_back(IRB.getInt32(i));
1841  V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1842  ConstantVector::get(Mask),
1843  Name + ".extract");
1844  DEBUG(dbgs() << " shuffle: " << *V << "\n");
1845  return V;
1846 }
1847 
1848 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
1849  unsigned BeginIndex, const Twine &Name) {
1850  VectorType *VecTy = cast<VectorType>(Old->getType());
1851  assert(VecTy && "Can only insert a vector into a vector");
1852 
1853  VectorType *Ty = dyn_cast<VectorType>(V->getType());
1854  if (!Ty) {
1855  // Single element to insert.
1856  V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
1857  Name + ".insert");
1858  DEBUG(dbgs() << " insert: " << *V << "\n");
1859  return V;
1860  }
1861 
1862  assert(Ty->getNumElements() <= VecTy->getNumElements() &&
1863  "Too many elements!");
1864  if (Ty->getNumElements() == VecTy->getNumElements()) {
1865  assert(V->getType() == VecTy && "Vector type mismatch");
1866  return V;
1867  }
1868  unsigned EndIndex = BeginIndex + Ty->getNumElements();
1869 
1870  // When inserting a smaller vector into the larger to store, we first
1871  // use a shuffle vector to widen it with undef elements, and then
1872  // a second shuffle vector to select between the loaded vector and the
1873  // incoming vector.
1875  Mask.reserve(VecTy->getNumElements());
1876  for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
1877  if (i >= BeginIndex && i < EndIndex)
1878  Mask.push_back(IRB.getInt32(i - BeginIndex));
1879  else
1880  Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
1881  V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1882  ConstantVector::get(Mask),
1883  Name + ".expand");
1884  DEBUG(dbgs() << " shuffle: " << *V << "\n");
1885 
1886  Mask.clear();
1887  for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
1888  Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
1889 
1890  V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
1891 
1892  DEBUG(dbgs() << " blend: " << *V << "\n");
1893  return V;
1894 }
1895 
1896 namespace {
1897 /// \brief Visitor to rewrite instructions using p particular slice of an alloca
1898 /// to use a new alloca.
1899 ///
1900 /// Also implements the rewriting to vector-based accesses when the partition
1901 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
1902 /// lives here.
1903 class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
1904  // Befriend the base class so it can delegate to private visit methods.
1905  friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
1907 
1908  const DataLayout &DL;
1909  AllocaSlices &S;
1910  SROA &Pass;
1911  AllocaInst &OldAI, &NewAI;
1912  const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
1913  Type *NewAllocaTy;
1914 
1915  // If we are rewriting an alloca partition which can be written as pure
1916  // vector operations, we stash extra information here. When VecTy is
1917  // non-null, we have some strict guarantees about the rewritten alloca:
1918  // - The new alloca is exactly the size of the vector type here.
1919  // - The accesses all either map to the entire vector or to a single
1920  // element.
1921  // - The set of accessing instructions is only one of those handled above
1922  // in isVectorPromotionViable. Generally these are the same access kinds
1923  // which are promotable via mem2reg.
1924  VectorType *VecTy;
1925  Type *ElementTy;
1926  uint64_t ElementSize;
1927 
1928  // This is a convenience and flag variable that will be null unless the new
1929  // alloca's integer operations should be widened to this integer type due to
1930  // passing isIntegerWideningViable above. If it is non-null, the desired
1931  // integer type will be stored here for easy access during rewriting.
1932  IntegerType *IntTy;
1933 
1934  // The offset of the slice currently being rewritten.
1935  uint64_t BeginOffset, EndOffset;
1936  bool IsSplittable;
1937  bool IsSplit;
1938  Use *OldUse;
1939  Instruction *OldPtr;
1940 
1941  // Output members carrying state about the result of visiting and rewriting
1942  // the slice of the alloca.
1943  bool IsUsedByRewrittenSpeculatableInstructions;
1944 
1945  // Utility IR builder, whose name prefix is setup for each visited use, and
1946  // the insertion point is set to point to the user.
1947  IRBuilderTy IRB;
1948 
1949 public:
1950  AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
1951  AllocaInst &OldAI, AllocaInst &NewAI,
1952  uint64_t NewBeginOffset, uint64_t NewEndOffset,
1953  bool IsVectorPromotable = false,
1954  bool IsIntegerPromotable = false)
1955  : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
1956  NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset),
1957  NewAllocaTy(NewAI.getAllocatedType()),
1958  VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
1959  ElementTy(VecTy ? VecTy->getElementType() : 0),
1960  ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
1961  IntTy(IsIntegerPromotable
1962  ? Type::getIntNTy(
1963  NewAI.getContext(),
1964  DL.getTypeSizeInBits(NewAI.getAllocatedType()))
1965  : 0),
1966  BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
1967  OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false),
1968  IRB(NewAI.getContext(), ConstantFolder()) {
1969  if (VecTy) {
1970  assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
1971  "Only multiple-of-8 sized vector elements are viable");
1972  ++NumVectorized;
1973  }
1974  assert((!IsVectorPromotable && !IsIntegerPromotable) ||
1975  IsVectorPromotable != IsIntegerPromotable);
1976  }
1977 
1978  bool visit(AllocaSlices::const_iterator I) {
1979  bool CanSROA = true;
1980  BeginOffset = I->beginOffset();
1981  EndOffset = I->endOffset();
1982  IsSplittable = I->isSplittable();
1983  IsSplit =
1984  BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
1985 
1986  OldUse = I->getUse();
1987  OldPtr = cast<Instruction>(OldUse->get());
1988 
1989  Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
1990  IRB.SetInsertPoint(OldUserI);
1991  IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
1992  IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
1993 
1994  CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
1995  if (VecTy || IntTy)
1996  assert(CanSROA);
1997  return CanSROA;
1998  }
1999 
2000  /// \brief Query whether this slice is used by speculatable instructions after
2001  /// rewriting.
2002  ///
2003  /// These instructions (PHIs and Selects currently) require the alloca slice
2004  /// to run back through the rewriter. Thus, they are promotable, but not on
2005  /// this iteration. This is distinct from a slice which is unpromotable for
2006  /// some other reason, in which case we don't even want to perform the
2007  /// speculation. This can be querried at any time and reflects whether (at
2008  /// that point) a visit call has rewritten a speculatable instruction on the
2009  /// current slice.
2010  bool isUsedByRewrittenSpeculatableInstructions() const {
2011  return IsUsedByRewrittenSpeculatableInstructions;
2012  }
2013 
2014 private:
2015  // Make sure the other visit overloads are visible.
2016  using Base::visit;
2017 
2018  // Every instruction which can end up as a user must have a rewrite rule.
2019  bool visitInstruction(Instruction &I) {
2020  DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2021  llvm_unreachable("No rewrite rule for this instruction!");
2022  }
2023 
2024  Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset,
2025  Type *PointerTy) {
2026  assert(Offset >= NewAllocaBeginOffset);
2027  return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(),
2028  Offset - NewAllocaBeginOffset),
2029  PointerTy);
2030  }
2031 
2032  /// \brief Compute suitable alignment to access an offset into the new alloca.
2033  unsigned getOffsetAlign(uint64_t Offset) {
2034  unsigned NewAIAlign = NewAI.getAlignment();
2035  if (!NewAIAlign)
2036  NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
2037  return MinAlign(NewAIAlign, Offset);
2038  }
2039 
2040  /// \brief Compute suitable alignment to access a type at an offset of the
2041  /// new alloca.
2042  ///
2043  /// \returns zero if the type's ABI alignment is a suitable alignment,
2044  /// otherwise returns the maximal suitable alignment.
2045  unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
2046  unsigned Align = getOffsetAlign(Offset);
2047  return Align == DL.getABITypeAlignment(Ty) ? 0 : Align;
2048  }
2049 
2050  unsigned getIndex(uint64_t Offset) {
2051  assert(VecTy && "Can only call getIndex when rewriting a vector");
2052  uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2053  assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2054  uint32_t Index = RelOffset / ElementSize;
2055  assert(Index * ElementSize == RelOffset);
2056  return Index;
2057  }
2058 
2059  void deleteIfTriviallyDead(Value *V) {
2060  Instruction *I = cast<Instruction>(V);
2062  Pass.DeadInsts.insert(I);
2063  }
2064 
2065  Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
2066  uint64_t NewEndOffset) {
2067  unsigned BeginIndex = getIndex(NewBeginOffset);
2068  unsigned EndIndex = getIndex(NewEndOffset);
2069  assert(EndIndex > BeginIndex && "Empty vector!");
2070 
2071  Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2072  "load");
2073  return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
2074  }
2075 
2076  Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
2077  uint64_t NewEndOffset) {
2078  assert(IntTy && "We cannot insert an integer to the alloca");
2079  assert(!LI.isVolatile());
2080  Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2081  "load");
2082  V = convertValue(DL, IRB, V, IntTy);
2083  assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2084  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2085  if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
2086  V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
2087  "extract");
2088  return V;
2089  }
2090 
2091  bool visitLoadInst(LoadInst &LI) {
2092  DEBUG(dbgs() << " original: " << LI << "\n");
2093  Value *OldOp = LI.getOperand(0);
2094  assert(OldOp == OldPtr);
2095 
2096  // Compute the intersecting offset range.
2097  assert(BeginOffset < NewAllocaEndOffset);
2098  assert(EndOffset > NewAllocaBeginOffset);
2099  uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2100  uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2101 
2102  uint64_t Size = NewEndOffset - NewBeginOffset;
2103 
2104  Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
2105  : LI.getType();
2106  bool IsPtrAdjusted = false;
2107  Value *V;
2108  if (VecTy) {
2109  V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
2110  } else if (IntTy && LI.getType()->isIntegerTy()) {
2111  V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
2112  } else if (NewBeginOffset == NewAllocaBeginOffset &&
2113  canConvertValue(DL, NewAllocaTy, LI.getType())) {
2114  V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2115  LI.isVolatile(), "load");
2116  } else {
2117  Type *LTy = TargetTy->getPointerTo();
2118  V = IRB.CreateAlignedLoad(
2119  getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy),
2120  getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset),
2121  LI.isVolatile(), "load");
2122  IsPtrAdjusted = true;
2123  }
2124  V = convertValue(DL, IRB, V, TargetTy);
2125 
2126  if (IsSplit) {
2127  assert(!LI.isVolatile());
2128  assert(LI.getType()->isIntegerTy() &&
2129  "Only integer type loads and stores are split");
2130  assert(Size < DL.getTypeStoreSize(LI.getType()) &&
2131  "Split load isn't smaller than original load");
2132  assert(LI.getType()->getIntegerBitWidth() ==
2133  DL.getTypeStoreSizeInBits(LI.getType()) &&
2134  "Non-byte-multiple bit width");
2135  // Move the insertion point just past the load so that we can refer to it.
2136  IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
2137  // Create a placeholder value with the same type as LI to use as the
2138  // basis for the new value. This allows us to replace the uses of LI with
2139  // the computed value, and then replace the placeholder with LI, leaving
2140  // LI only used for this computation.
2141  Value *Placeholder
2142  = new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
2143  V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
2144  "insert");
2145  LI.replaceAllUsesWith(V);
2146  Placeholder->replaceAllUsesWith(&LI);
2147  delete Placeholder;
2148  } else {
2149  LI.replaceAllUsesWith(V);
2150  }
2151 
2152  Pass.DeadInsts.insert(&LI);
2153  deleteIfTriviallyDead(OldOp);
2154  DEBUG(dbgs() << " to: " << *V << "\n");
2155  return !LI.isVolatile() && !IsPtrAdjusted;
2156  }
2157 
2158  bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2159  uint64_t NewBeginOffset,
2160  uint64_t NewEndOffset) {
2161  if (V->getType() != VecTy) {
2162  unsigned BeginIndex = getIndex(NewBeginOffset);
2163  unsigned EndIndex = getIndex(NewEndOffset);
2164  assert(EndIndex > BeginIndex && "Empty vector!");
2165  unsigned NumElements = EndIndex - BeginIndex;
2166  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2167  Type *SliceTy =
2168  (NumElements == 1) ? ElementTy
2169  : VectorType::get(ElementTy, NumElements);
2170  if (V->getType() != SliceTy)
2171  V = convertValue(DL, IRB, V, SliceTy);
2172 
2173  // Mix in the existing elements.
2174  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2175  "load");
2176  V = insertVector(IRB, Old, V, BeginIndex, "vec");
2177  }
2178  StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2179  Pass.DeadInsts.insert(&SI);
2180 
2181  (void)Store;
2182  DEBUG(dbgs() << " to: " << *Store << "\n");
2183  return true;
2184  }
2185 
2186  bool rewriteIntegerStore(Value *V, StoreInst &SI,
2187  uint64_t NewBeginOffset, uint64_t NewEndOffset) {
2188  assert(IntTy && "We cannot extract an integer from the alloca");
2189  assert(!SI.isVolatile());
2190  if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
2191  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2192  "oldload");
2193  Old = convertValue(DL, IRB, Old, IntTy);
2194  assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2195  uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2196  V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
2197  "insert");
2198  }
2199  V = convertValue(DL, IRB, V, NewAllocaTy);
2200  StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2201  Pass.DeadInsts.insert(&SI);
2202  (void)Store;
2203  DEBUG(dbgs() << " to: " << *Store << "\n");
2204  return true;
2205  }
2206 
2207  bool visitStoreInst(StoreInst &SI) {
2208  DEBUG(dbgs() << " original: " << SI << "\n");
2209  Value *OldOp = SI.getOperand(1);
2210  assert(OldOp == OldPtr);
2211 
2212  Value *V = SI.getValueOperand();
2213 
2214  // Strip all inbounds GEPs and pointer casts to try to dig out any root
2215  // alloca that should be re-examined after promoting this alloca.
2216  if (V->getType()->isPointerTy())
2217  if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2218  Pass.PostPromotionWorklist.insert(AI);
2219 
2220  // Compute the intersecting offset range.
2221  assert(BeginOffset < NewAllocaEndOffset);
2222  assert(EndOffset > NewAllocaBeginOffset);
2223  uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2224  uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2225 
2226  uint64_t Size = NewEndOffset - NewBeginOffset;
2227  if (Size < DL.getTypeStoreSize(V->getType())) {
2228  assert(!SI.isVolatile());
2229  assert(V->getType()->isIntegerTy() &&
2230  "Only integer type loads and stores are split");
2231  assert(V->getType()->getIntegerBitWidth() ==
2232  DL.getTypeStoreSizeInBits(V->getType()) &&
2233  "Non-byte-multiple bit width");
2234  IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
2235  V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
2236  "extract");
2237  }
2238 
2239  if (VecTy)
2240  return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
2241  NewEndOffset);
2242  if (IntTy && V->getType()->isIntegerTy())
2243  return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
2244 
2245  StoreInst *NewSI;
2246  if (NewBeginOffset == NewAllocaBeginOffset &&
2247  NewEndOffset == NewAllocaEndOffset &&
2248  canConvertValue(DL, V->getType(), NewAllocaTy)) {
2249  V = convertValue(DL, IRB, V, NewAllocaTy);
2250  NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2251  SI.isVolatile());
2252  } else {
2253  Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset,
2254  V->getType()->getPointerTo());
2255  NewSI = IRB.CreateAlignedStore(
2256  V, NewPtr, getOffsetTypeAlign(
2257  V->getType(), NewBeginOffset - NewAllocaBeginOffset),
2258  SI.isVolatile());
2259  }
2260  (void)NewSI;
2261  Pass.DeadInsts.insert(&SI);
2262  deleteIfTriviallyDead(OldOp);
2263 
2264  DEBUG(dbgs() << " to: " << *NewSI << "\n");
2265  return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
2266  }
2267 
2268  /// \brief Compute an integer value from splatting an i8 across the given
2269  /// number of bytes.
2270  ///
2271  /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2272  /// call this routine.
2273  /// FIXME: Heed the advice above.
2274  ///
2275  /// \param V The i8 value to splat.
2276  /// \param Size The number of bytes in the output (assuming i8 is one byte)
2277  Value *getIntegerSplat(Value *V, unsigned Size) {
2278  assert(Size > 0 && "Expected a positive number of bytes.");
2279  IntegerType *VTy = cast<IntegerType>(V->getType());
2280  assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2281  if (Size == 1)
2282  return V;
2283 
2284  Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
2285  V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
2287  Constant::getAllOnesValue(SplatIntTy),
2289  Constant::getAllOnesValue(V->getType()),
2290  SplatIntTy)),
2291  "isplat");
2292  return V;
2293  }
2294 
2295  /// \brief Compute a vector splat for a given element value.
2296  Value *getVectorSplat(Value *V, unsigned NumElements) {
2297  V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2298  DEBUG(dbgs() << " splat: " << *V << "\n");
2299  return V;
2300  }
2301 
2302  bool visitMemSetInst(MemSetInst &II) {
2303  DEBUG(dbgs() << " original: " << II << "\n");
2304  assert(II.getRawDest() == OldPtr);
2305 
2306  // If the memset has a variable size, it cannot be split, just adjust the
2307  // pointer to the new alloca.
2308  if (!isa<Constant>(II.getLength())) {
2309  assert(!IsSplit);
2310  assert(BeginOffset >= NewAllocaBeginOffset);
2311  II.setDest(
2312  getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
2313  Type *CstTy = II.getAlignmentCst()->getType();
2314  II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset)));
2315 
2316  deleteIfTriviallyDead(OldPtr);
2317  return false;
2318  }
2319 
2320  // Record this instruction for deletion.
2321  Pass.DeadInsts.insert(&II);
2322 
2323  Type *AllocaTy = NewAI.getAllocatedType();
2324  Type *ScalarTy = AllocaTy->getScalarType();
2325 
2326  // Compute the intersecting offset range.
2327  assert(BeginOffset < NewAllocaEndOffset);
2328  assert(EndOffset > NewAllocaBeginOffset);
2329  uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2330  uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2331  uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
2332 
2333  // If this doesn't map cleanly onto the alloca type, and that type isn't
2334  // a single value type, just emit a memset.
2335  if (!VecTy && !IntTy &&
2336  (BeginOffset > NewAllocaBeginOffset ||
2337  EndOffset < NewAllocaEndOffset ||
2338  !AllocaTy->isSingleValueType() ||
2339  !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
2340  DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
2341  Type *SizeTy = II.getLength()->getType();
2342  Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2343  CallInst *New = IRB.CreateMemSet(
2344  getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()),
2345  II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile());
2346  (void)New;
2347  DEBUG(dbgs() << " to: " << *New << "\n");
2348  return false;
2349  }
2350 
2351  // If we can represent this as a simple value, we have to build the actual
2352  // value to store, which requires expanding the byte present in memset to
2353  // a sensible representation for the alloca type. This is essentially
2354  // splatting the byte to a sufficiently wide integer, splatting it across
2355  // any desired vector width, and bitcasting to the final type.
2356  Value *V;
2357 
2358  if (VecTy) {
2359  // If this is a memset of a vectorized alloca, insert it.
2360  assert(ElementTy == ScalarTy);
2361 
2362  unsigned BeginIndex = getIndex(NewBeginOffset);
2363  unsigned EndIndex = getIndex(NewEndOffset);
2364  assert(EndIndex > BeginIndex && "Empty vector!");
2365  unsigned NumElements = EndIndex - BeginIndex;
2366  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2367 
2368  Value *Splat =
2369  getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
2370  Splat = convertValue(DL, IRB, Splat, ElementTy);
2371  if (NumElements > 1)
2372  Splat = getVectorSplat(Splat, NumElements);
2373 
2374  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2375  "oldload");
2376  V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2377  } else if (IntTy) {
2378  // If this is a memset on an alloca where we can widen stores, insert the
2379  // set integer.
2380  assert(!II.isVolatile());
2381 
2382  uint64_t Size = NewEndOffset - NewBeginOffset;
2383  V = getIntegerSplat(II.getValue(), Size);
2384 
2385  if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2386  EndOffset != NewAllocaBeginOffset)) {
2387  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2388  "oldload");
2389  Old = convertValue(DL, IRB, Old, IntTy);
2390  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2391  V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2392  } else {
2393  assert(V->getType() == IntTy &&
2394  "Wrong type for an alloca wide integer!");
2395  }
2396  V = convertValue(DL, IRB, V, AllocaTy);
2397  } else {
2398  // Established these invariants above.
2399  assert(NewBeginOffset == NewAllocaBeginOffset);
2400  assert(NewEndOffset == NewAllocaEndOffset);
2401 
2402  V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
2403  if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2404  V = getVectorSplat(V, AllocaVecTy->getNumElements());
2405 
2406  V = convertValue(DL, IRB, V, AllocaTy);
2407  }
2408 
2409  Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2410  II.isVolatile());
2411  (void)New;
2412  DEBUG(dbgs() << " to: " << *New << "\n");
2413  return !II.isVolatile();
2414  }
2415 
2417  // Rewriting of memory transfer instructions can be a bit tricky. We break
2418  // them into two categories: split intrinsics and unsplit intrinsics.
2419 
2420  DEBUG(dbgs() << " original: " << II << "\n");
2421 
2422  // Compute the intersecting offset range.
2423  assert(BeginOffset < NewAllocaEndOffset);
2424  assert(EndOffset > NewAllocaBeginOffset);
2425  uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2426  uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2427 
2428  assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
2429  bool IsDest = II.getRawDest() == OldPtr;
2430 
2431  // Compute the relative offset within the transfer.
2432  unsigned IntPtrWidth = DL.getPointerSizeInBits();
2433  APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
2434 
2435  unsigned Align = II.getAlignment();
2436  uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
2437  if (Align > 1)
2438  Align =
2439  MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
2440  MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset)));
2441 
2442  // For unsplit intrinsics, we simply modify the source and destination
2443  // pointers in place. This isn't just an optimization, it is a matter of
2444  // correctness. With unsplit intrinsics we may be dealing with transfers
2445  // within a single alloca before SROA ran, or with transfers that have
2446  // a variable length. We may also be dealing with memmove instead of
2447  // memcpy, and so simply updating the pointers is the necessary for us to
2448  // update both source and dest of a single call.
2449  if (!IsSplittable) {
2450  Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
2451  if (IsDest)
2452  II.setDest(
2453  getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
2454  else
2455  II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset,
2456  II.getRawSource()->getType()));
2457 
2458  Type *CstTy = II.getAlignmentCst()->getType();
2459  II.setAlignment(ConstantInt::get(CstTy, Align));
2460 
2461  DEBUG(dbgs() << " to: " << II << "\n");
2462  deleteIfTriviallyDead(OldOp);
2463  return false;
2464  }
2465  // For split transfer intrinsics we have an incredibly useful assurance:
2466  // the source and destination do not reside within the same alloca, and at
2467  // least one of them does not escape. This means that we can replace
2468  // memmove with memcpy, and we don't need to worry about all manner of
2469  // downsides to splitting and transforming the operations.
2470 
2471  // If this doesn't map cleanly onto the alloca type, and that type isn't
2472  // a single value type, just emit a memcpy.
2473  bool EmitMemCpy
2474  = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset ||
2475  EndOffset < NewAllocaEndOffset ||
2476  !NewAI.getAllocatedType()->isSingleValueType());
2477 
2478  // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2479  // size hasn't been shrunk based on analysis of the viable range, this is
2480  // a no-op.
2481  if (EmitMemCpy && &OldAI == &NewAI) {
2482  // Ensure the start lines up.
2483  assert(NewBeginOffset == BeginOffset);
2484 
2485  // Rewrite the size as needed.
2486  if (NewEndOffset != EndOffset)
2488  NewEndOffset - NewBeginOffset));
2489  return false;
2490  }
2491  // Record this instruction for deletion.
2492  Pass.DeadInsts.insert(&II);
2493 
2494  // Strip all inbounds GEPs and pointer casts to try to dig out any root
2495  // alloca that should be re-examined after rewriting this instruction.
2496  Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2497  if (AllocaInst *AI
2498  = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
2499  Pass.Worklist.insert(AI);
2500 
2501  if (EmitMemCpy) {
2502  Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
2503  : II.getRawDest()->getType();
2504 
2505  // Compute the other pointer, folding as much as possible to produce
2506  // a single, simple GEP in most cases.
2507  OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
2508 
2509  Value *OurPtr = getAdjustedAllocaPtr(
2510  IRB, NewBeginOffset,
2511  IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType());
2512  Type *SizeTy = II.getLength()->getType();
2513  Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2514 
2515  CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
2516  IsDest ? OtherPtr : OurPtr,
2517  Size, Align, II.isVolatile());
2518  (void)New;
2519  DEBUG(dbgs() << " to: " << *New << "\n");
2520  return false;
2521  }
2522 
2523  // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
2524  // is equivalent to 1, but that isn't true if we end up rewriting this as
2525  // a load or store.
2526  if (!Align)
2527  Align = 1;
2528 
2529  bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2530  NewEndOffset == NewAllocaEndOffset;
2531  uint64_t Size = NewEndOffset - NewBeginOffset;
2532  unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2533  unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
2534  unsigned NumElements = EndIndex - BeginIndex;
2535  IntegerType *SubIntTy
2536  = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
2537 
2538  Type *OtherPtrTy = NewAI.getType();
2539  if (VecTy && !IsWholeAlloca) {
2540  if (NumElements == 1)
2541  OtherPtrTy = VecTy->getElementType();
2542  else
2543  OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
2544 
2545  OtherPtrTy = OtherPtrTy->getPointerTo();
2546  } else if (IntTy && !IsWholeAlloca) {
2547  OtherPtrTy = SubIntTy->getPointerTo();
2548  }
2549 
2550  Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
2551  Value *DstPtr = &NewAI;
2552  if (!IsDest)
2553  std::swap(SrcPtr, DstPtr);
2554 
2555  Value *Src;
2556  if (VecTy && !IsWholeAlloca && !IsDest) {
2557  Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2558  "load");
2559  Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
2560  } else if (IntTy && !IsWholeAlloca && !IsDest) {
2561  Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2562  "load");
2563  Src = convertValue(DL, IRB, Src, IntTy);
2564  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2565  Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
2566  } else {
2567  Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
2568  "copyload");
2569  }
2570 
2571  if (VecTy && !IsWholeAlloca && IsDest) {
2572  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2573  "oldload");
2574  Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
2575  } else if (IntTy && !IsWholeAlloca && IsDest) {
2576  Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2577  "oldload");
2578  Old = convertValue(DL, IRB, Old, IntTy);
2579  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2580  Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
2581  Src = convertValue(DL, IRB, Src, NewAllocaTy);
2582  }
2583 
2584  StoreInst *Store = cast<StoreInst>(
2585  IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
2586  (void)Store;
2587  DEBUG(dbgs() << " to: " << *Store << "\n");
2588  return !II.isVolatile();
2589  }
2590 
2591  bool visitIntrinsicInst(IntrinsicInst &II) {
2592  assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2594  DEBUG(dbgs() << " original: " << II << "\n");
2595  assert(II.getArgOperand(1) == OldPtr);
2596 
2597  // Compute the intersecting offset range.
2598  assert(BeginOffset < NewAllocaEndOffset);
2599  assert(EndOffset > NewAllocaBeginOffset);
2600  uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2601  uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2602 
2603  // Record this instruction for deletion.
2604  Pass.DeadInsts.insert(&II);
2605 
2606  ConstantInt *Size
2607  = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
2608  NewEndOffset - NewBeginOffset);
2609  Value *Ptr =
2610  getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType());
2611  Value *New;
2613  New = IRB.CreateLifetimeStart(Ptr, Size);
2614  else
2615  New = IRB.CreateLifetimeEnd(Ptr, Size);
2616 
2617  (void)New;
2618  DEBUG(dbgs() << " to: " << *New << "\n");
2619  return true;
2620  }
2621 
2622  bool visitPHINode(PHINode &PN) {
2623  DEBUG(dbgs() << " original: " << PN << "\n");
2624  assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
2625  assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
2626 
2627  // We would like to compute a new pointer in only one place, but have it be
2628  // as local as possible to the PHI. To do that, we re-use the location of
2629  // the old pointer, which necessarily must be in the right position to
2630  // dominate the PHI.
2631  IRBuilderTy PtrBuilder(OldPtr);
2632  PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
2633  ".");
2634 
2635  Value *NewPtr =
2636  getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType());
2637  // Replace the operands which were using the old pointer.
2638  std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
2639 
2640  DEBUG(dbgs() << " to: " << PN << "\n");
2641  deleteIfTriviallyDead(OldPtr);
2642 
2643  // Check whether we can speculate this PHI node, and if so remember that
2644  // fact and queue it up for another iteration after the speculation
2645  // occurs.
2646  if (isSafePHIToSpeculate(PN, &DL)) {
2647  Pass.SpeculatablePHIs.insert(&PN);
2648  IsUsedByRewrittenSpeculatableInstructions = true;
2649  return true;
2650  }
2651 
2652  return false; // PHIs can't be promoted on their own.
2653  }
2654 
2655  bool visitSelectInst(SelectInst &SI) {
2656  DEBUG(dbgs() << " original: " << SI << "\n");
2657  assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
2658  "Pointer isn't an operand!");
2659  assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
2660  assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
2661 
2662  Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType());
2663  // Replace the operands which were using the old pointer.
2664  if (SI.getOperand(1) == OldPtr)
2665  SI.setOperand(1, NewPtr);
2666  if (SI.getOperand(2) == OldPtr)
2667  SI.setOperand(2, NewPtr);
2668 
2669  DEBUG(dbgs() << " to: " << SI << "\n");
2670  deleteIfTriviallyDead(OldPtr);
2671 
2672  // Check whether we can speculate this select instruction, and if so
2673  // remember that fact and queue it up for another iteration after the
2674  // speculation occurs.
2675  if (isSafeSelectToSpeculate(SI, &DL)) {
2676  Pass.SpeculatableSelects.insert(&SI);
2677  IsUsedByRewrittenSpeculatableInstructions = true;
2678  return true;
2679  }
2680 
2681  return false; // Selects can't be promoted on their own.
2682  }
2683 
2684 };
2685 }
2686 
2687 namespace {
2688 /// \brief Visitor to rewrite aggregate loads and stores as scalar.
2689 ///
2690 /// This pass aggressively rewrites all aggregate loads and stores on
2691 /// a particular pointer (or any pointer derived from it which we can identify)
2692 /// with scalar loads and stores.
2693 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
2694  // Befriend the base class so it can delegate to private visit methods.
2695  friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
2696 
2697  const DataLayout &DL;
2698 
2699  /// Queue of pointer uses to analyze and potentially rewrite.
2700  SmallVector<Use *, 8> Queue;
2701 
2702  /// Set to prevent us from cycling with phi nodes and loops.
2703  SmallPtrSet<User *, 8> Visited;
2704 
2705  /// The current pointer use being rewritten. This is used to dig up the used
2706  /// value (as opposed to the user).
2707  Use *U;
2708 
2709 public:
2710  AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
2711 
2712  /// Rewrite loads and stores through a pointer and all pointers derived from
2713  /// it.
2714  bool rewrite(Instruction &I) {
2715  DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
2716  enqueueUsers(I);
2717  bool Changed = false;
2718  while (!Queue.empty()) {
2719  U = Queue.pop_back_val();
2720  Changed |= visit(cast<Instruction>(U->getUser()));
2721  }
2722  return Changed;
2723  }
2724 
2725 private:
2726  /// Enqueue all the users of the given instruction for further processing.
2727  /// This uses a set to de-duplicate users.
2728  void enqueueUsers(Instruction &I) {
2729  for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
2730  ++UI)
2731  if (Visited.insert(*UI))
2732  Queue.push_back(&UI.getUse());
2733  }
2734 
2735  // Conservative default is to not rewrite anything.
2736  bool visitInstruction(Instruction &I) { return false; }
2737 
2738  /// \brief Generic recursive split emission class.
2739  template <typename Derived>
2740  class OpSplitter {
2741  protected:
2742  /// The builder used to form new instructions.
2743  IRBuilderTy IRB;
2744  /// The indices which to be used with insert- or extractvalue to select the
2745  /// appropriate value within the aggregate.
2746  SmallVector<unsigned, 4> Indices;
2747  /// The indices to a GEP instruction which will move Ptr to the correct slot
2748  /// within the aggregate.
2749  SmallVector<Value *, 4> GEPIndices;
2750  /// The base pointer of the original op, used as a base for GEPing the
2751  /// split operations.
2752  Value *Ptr;
2753 
2754  /// Initialize the splitter with an insertion point, Ptr and start with a
2755  /// single zero GEP index.
2756  OpSplitter(Instruction *InsertionPoint, Value *Ptr)
2757  : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
2758 
2759  public:
2760  /// \brief Generic recursive split emission routine.
2761  ///
2762  /// This method recursively splits an aggregate op (load or store) into
2763  /// scalar or vector ops. It splits recursively until it hits a single value
2764  /// and emits that single value operation via the template argument.
2765  ///
2766  /// The logic of this routine relies on GEPs and insertvalue and
2767  /// extractvalue all operating with the same fundamental index list, merely
2768  /// formatted differently (GEPs need actual values).
2769  ///
2770  /// \param Ty The type being split recursively into smaller ops.
2771  /// \param Agg The aggregate value being built up or stored, depending on
2772  /// whether this is splitting a load or a store respectively.
2773  void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
2774  if (Ty->isSingleValueType())
2775  return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
2776 
2777  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2778  unsigned OldSize = Indices.size();
2779  (void)OldSize;
2780  for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
2781  ++Idx) {
2782  assert(Indices.size() == OldSize && "Did not return to the old size");
2783  Indices.push_back(Idx);
2784  GEPIndices.push_back(IRB.getInt32(Idx));
2785  emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
2786  GEPIndices.pop_back();
2787  Indices.pop_back();
2788  }
2789  return;
2790  }
2791 
2792  if (StructType *STy = dyn_cast<StructType>(Ty)) {
2793  unsigned OldSize = Indices.size();
2794  (void)OldSize;
2795  for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
2796  ++Idx) {
2797  assert(Indices.size() == OldSize && "Did not return to the old size");
2798  Indices.push_back(Idx);
2799  GEPIndices.push_back(IRB.getInt32(Idx));
2800  emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
2801  GEPIndices.pop_back();
2802  Indices.pop_back();
2803  }
2804  return;
2805  }
2806 
2807  llvm_unreachable("Only arrays and structs are aggregate loadable types");
2808  }
2809  };
2810 
2811  struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
2812  LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
2813  : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
2814 
2815  /// Emit a leaf load of a single value. This is called at the leaves of the
2816  /// recursive emission to actually load values.
2817  void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
2818  assert(Ty->isSingleValueType());
2819  // Load the single value and insert it using the indices.
2820  Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
2821  Value *Load = IRB.CreateLoad(GEP, Name + ".load");
2822  Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
2823  DEBUG(dbgs() << " to: " << *Load << "\n");
2824  }
2825  };
2826 
2827  bool visitLoadInst(LoadInst &LI) {
2828  assert(LI.getPointerOperand() == *U);
2829  if (!LI.isSimple() || LI.getType()->isSingleValueType())
2830  return false;
2831 
2832  // We have an aggregate being loaded, split it apart.
2833  DEBUG(dbgs() << " original: " << LI << "\n");
2834  LoadOpSplitter Splitter(&LI, *U);
2835  Value *V = UndefValue::get(LI.getType());
2836  Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
2837  LI.replaceAllUsesWith(V);
2838  LI.eraseFromParent();
2839  return true;
2840  }
2841 
2842  struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
2843  StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
2844  : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
2845 
2846  /// Emit a leaf store of a single value. This is called at the leaves of the
2847  /// recursive emission to actually produce stores.
2848  void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
2849  assert(Ty->isSingleValueType());
2850  // Extract the single value and store it using the indices.
2851  Value *Store = IRB.CreateStore(
2852  IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
2853  IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
2854  (void)Store;
2855  DEBUG(dbgs() << " to: " << *Store << "\n");
2856  }
2857  };
2858 
2859  bool visitStoreInst(StoreInst &SI) {
2860  if (!SI.isSimple() || SI.getPointerOperand() != *U)
2861  return false;
2862  Value *V = SI.getValueOperand();
2863  if (V->getType()->isSingleValueType())
2864  return false;
2865 
2866  // We have an aggregate being stored, split it apart.
2867  DEBUG(dbgs() << " original: " << SI << "\n");
2868  StoreOpSplitter Splitter(&SI, *U);
2869  Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
2870  SI.eraseFromParent();
2871  return true;
2872  }
2873 
2874  bool visitBitCastInst(BitCastInst &BC) {
2875  enqueueUsers(BC);
2876  return false;
2877  }
2878 
2879  bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2880  enqueueUsers(GEPI);
2881  return false;
2882  }
2883 
2884  bool visitPHINode(PHINode &PN) {
2885  enqueueUsers(PN);
2886  return false;
2887  }
2888 
2889  bool visitSelectInst(SelectInst &SI) {
2890  enqueueUsers(SI);
2891  return false;
2892  }
2893 };
2894 }
2895 
2896 /// \brief Strip aggregate type wrapping.
2897 ///
2898 /// This removes no-op aggregate types wrapping an underlying type. It will
2899 /// strip as many layers of types as it can without changing either the type
2900 /// size or the allocated size.
2902  if (Ty->isSingleValueType())
2903  return Ty;
2904 
2905  uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2906  uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
2907 
2908  Type *InnerTy;
2909  if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
2910  InnerTy = ArrTy->getElementType();
2911  } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2912  const StructLayout *SL = DL.getStructLayout(STy);
2913  unsigned Index = SL->getElementContainingOffset(0);
2914  InnerTy = STy->getElementType(Index);
2915  } else {
2916  return Ty;
2917  }
2918 
2919  if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
2920  TypeSize > DL.getTypeSizeInBits(InnerTy))
2921  return Ty;
2922 
2923  return stripAggregateTypeWrapping(DL, InnerTy);
2924 }
2925 
2926 /// \brief Try to find a partition of the aggregate type passed in for a given
2927 /// offset and size.
2928 ///
2929 /// This recurses through the aggregate type and tries to compute a subtype
2930 /// based on the offset and size. When the offset and size span a sub-section
2931 /// of an array, it will even compute a new array type for that sub-section,
2932 /// and the same for structs.
2933 ///
2934 /// Note that this routine is very strict and tries to find a partition of the
2935 /// type which produces the *exact* right offset and size. It is not forgiving
2936 /// when the size or offset cause either end of type-based partition to be off.
2937 /// Also, this is a best-effort routine. It is reasonable to give up and not
2938 /// return a type if necessary.
2939 static Type *getTypePartition(const DataLayout &DL, Type *Ty,
2940  uint64_t Offset, uint64_t Size) {
2941  if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
2942  return stripAggregateTypeWrapping(DL, Ty);
2943  if (Offset > DL.getTypeAllocSize(Ty) ||
2944  (DL.getTypeAllocSize(Ty) - Offset) < Size)
2945  return 0;
2946 
2947  if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
2948  // We can't partition pointers...
2949  if (SeqTy->isPointerTy())
2950  return 0;
2951 
2952  Type *ElementTy = SeqTy->getElementType();
2953  uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
2954  uint64_t NumSkippedElements = Offset / ElementSize;
2955  if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
2956  if (NumSkippedElements >= ArrTy->getNumElements())
2957  return 0;
2958  } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
2959  if (NumSkippedElements >= VecTy->getNumElements())
2960  return 0;
2961  }
2962  Offset -= NumSkippedElements * ElementSize;
2963 
2964  // First check if we need to recurse.
2965  if (Offset > 0 || Size < ElementSize) {
2966  // Bail if the partition ends in a different array element.
2967  if ((Offset + Size) > ElementSize)
2968  return 0;
2969  // Recurse through the element type trying to peel off offset bytes.
2970  return getTypePartition(DL, ElementTy, Offset, Size);
2971  }
2972  assert(Offset == 0);
2973 
2974  if (Size == ElementSize)
2975  return stripAggregateTypeWrapping(DL, ElementTy);
2976  assert(Size > ElementSize);
2977  uint64_t NumElements = Size / ElementSize;
2978  if (NumElements * ElementSize != Size)
2979  return 0;
2980  return ArrayType::get(ElementTy, NumElements);
2981  }
2982 
2983  StructType *STy = dyn_cast<StructType>(Ty);
2984  if (!STy)
2985  return 0;
2986 
2987  const StructLayout *SL = DL.getStructLayout(STy);
2988  if (Offset >= SL->getSizeInBytes())
2989  return 0;
2990  uint64_t EndOffset = Offset + Size;
2991  if (EndOffset > SL->getSizeInBytes())
2992  return 0;
2993 
2994  unsigned Index = SL->getElementContainingOffset(Offset);
2995  Offset -= SL->getElementOffset(Index);
2996 
2997  Type *ElementTy = STy->getElementType(Index);
2998  uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
2999  if (Offset >= ElementSize)
3000  return 0; // The offset points into alignment padding.
3001 
3002  // See if any partition must be contained by the element.
3003  if (Offset > 0 || Size < ElementSize) {
3004  if ((Offset + Size) > ElementSize)
3005  return 0;
3006  return getTypePartition(DL, ElementTy, Offset, Size);
3007  }
3008  assert(Offset == 0);
3009 
3010  if (Size == ElementSize)
3011  return stripAggregateTypeWrapping(DL, ElementTy);
3012 
3013  StructType::element_iterator EI = STy->element_begin() + Index,
3014  EE = STy->element_end();
3015  if (EndOffset < SL->getSizeInBytes()) {
3016  unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3017  if (Index == EndIndex)
3018  return 0; // Within a single element and its padding.
3019 
3020  // Don't try to form "natural" types if the elements don't line up with the
3021  // expected size.
3022  // FIXME: We could potentially recurse down through the last element in the
3023  // sub-struct to find a natural end point.
3024  if (SL->getElementOffset(EndIndex) != EndOffset)
3025  return 0;
3026 
3027  assert(Index < EndIndex);
3028  EE = STy->element_begin() + EndIndex;
3029  }
3030 
3031  // Try to build up a sub-structure.
3032  StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
3033  STy->isPacked());
3034  const StructLayout *SubSL = DL.getStructLayout(SubTy);
3035  if (Size != SubSL->getSizeInBytes())
3036  return 0; // The sub-struct doesn't have quite the size needed.
3037 
3038  return SubTy;
3039 }
3040 
3041 /// \brief Rewrite an alloca partition's users.
3042 ///
3043 /// This routine drives both of the rewriting goals of the SROA pass. It tries
3044 /// to rewrite uses of an alloca partition to be conducive for SSA value
3045 /// promotion. If the partition needs a new, more refined alloca, this will
3046 /// build that new alloca, preserving as much type information as possible, and
3047 /// rewrite the uses of the old alloca to point at the new one and have the
3048 /// appropriate new offsets. It also evaluates how successful the rewrite was
3049 /// at enabling promotion and if it was successful queues the alloca to be
3050 /// promoted.
3051 bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
3053  int64_t BeginOffset, int64_t EndOffset,
3055  assert(BeginOffset < EndOffset);
3056  uint64_t SliceSize = EndOffset - BeginOffset;
3057 
3058  // Try to compute a friendly type for this partition of the alloca. This
3059  // won't always succeed, in which case we fall back to a legal integer type
3060  // or an i8 array of an appropriate size.
3061  Type *SliceTy = 0;
3062  if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
3063  if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
3064  SliceTy = CommonUseTy;
3065  if (!SliceTy)
3066  if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
3067  BeginOffset, SliceSize))
3068  SliceTy = TypePartitionTy;
3069  if ((!SliceTy || (SliceTy->isArrayTy() &&
3070  SliceTy->getArrayElementType()->isIntegerTy())) &&
3071  DL->isLegalInteger(SliceSize * 8))
3072  SliceTy = Type::getIntNTy(*C, SliceSize * 8);
3073  if (!SliceTy)
3074  SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
3075  assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
3076 
3077  bool IsVectorPromotable = isVectorPromotionViable(
3078  *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses);
3079 
3080  bool IsIntegerPromotable =
3081  !IsVectorPromotable &&
3082  isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses);
3083 
3084  // Check for the case where we're going to rewrite to a new alloca of the
3085  // exact same type as the original, and with the same access offsets. In that
3086  // case, re-use the existing alloca, but still run through the rewriter to
3087  // perform phi and select speculation.
3088  AllocaInst *NewAI;
3089  if (SliceTy == AI.getAllocatedType()) {
3090  assert(BeginOffset == 0 &&
3091  "Non-zero begin offset but same alloca type");
3092  NewAI = &AI;
3093  // FIXME: We should be able to bail at this point with "nothing changed".
3094  // FIXME: We might want to defer PHI speculation until after here.
3095  } else {
3096  unsigned Alignment = AI.getAlignment();
3097  if (!Alignment) {
3098  // The minimum alignment which users can rely on when the explicit
3099  // alignment is omitted or zero is that required by the ABI for this
3100  // type.
3101  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
3102  }
3103  Alignment = MinAlign(Alignment, BeginOffset);
3104  // If we will get at least this much alignment from the type alone, leave
3105  // the alloca's alignment unconstrained.
3106  if (Alignment <= DL->getABITypeAlignment(SliceTy))
3107  Alignment = 0;
3108  NewAI = new AllocaInst(SliceTy, 0, Alignment,
3109  AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
3110  ++NumNewAllocas;
3111  }
3112 
3113  DEBUG(dbgs() << "Rewriting alloca partition "
3114  << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
3115  << "\n");
3116 
3117  // Track the high watermark on several worklists that are only relevant for
3118  // promoted allocas. We will reset it to this point if the alloca is not in
3119  // fact scheduled for promotion.
3120  unsigned PPWOldSize = PostPromotionWorklist.size();
3121  unsigned SPOldSize = SpeculatablePHIs.size();
3122  unsigned SSOldSize = SpeculatableSelects.size();
3123  unsigned NumUses = 0;
3124 
3125  AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
3126  EndOffset, IsVectorPromotable,
3127  IsIntegerPromotable);
3128  bool Promotable = true;
3130  SUE = SplitUses.end();
3131  SUI != SUE; ++SUI) {
3132  DEBUG(dbgs() << " rewriting split ");
3133  DEBUG(S.printSlice(dbgs(), *SUI, ""));
3134  Promotable &= Rewriter.visit(*SUI);
3135  ++NumUses;
3136  }
3137  for (AllocaSlices::iterator I = B; I != E; ++I) {
3138  DEBUG(dbgs() << " rewriting ");
3139  DEBUG(S.printSlice(dbgs(), I, ""));
3140  Promotable &= Rewriter.visit(I);
3141  ++NumUses;
3142  }
3143 
3144  NumAllocaPartitionUses += NumUses;
3145  MaxUsesPerAllocaPartition =
3146  std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
3147 
3148  if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) {
3149  DEBUG(dbgs() << " and queuing for promotion\n");
3150  PromotableAllocas.push_back(NewAI);
3151  } else if (NewAI != &AI ||
3152  (Promotable &&
3153  Rewriter.isUsedByRewrittenSpeculatableInstructions())) {
3154  // If we can't promote the alloca, iterate on it to check for new
3155  // refinements exposed by splitting the current alloca. Don't iterate on an
3156  // alloca which didn't actually change and didn't get promoted.
3157  //
3158  // Alternatively, if we could promote the alloca but have speculatable
3159  // instructions then we will speculate them after finishing our processing
3160  // of the original alloca. Mark the new one for re-visiting in the next
3161  // iteration so the speculated operations can be rewritten.
3162  //
3163  // FIXME: We should actually track whether the rewriter changed anything.
3164  Worklist.insert(NewAI);
3165  }
3166 
3167  // Drop any post-promotion work items if promotion didn't happen.
3168  if (!Promotable) {
3169  while (PostPromotionWorklist.size() > PPWOldSize)
3170  PostPromotionWorklist.pop_back();
3171  while (SpeculatablePHIs.size() > SPOldSize)
3172  SpeculatablePHIs.pop_back();
3173  while (SpeculatableSelects.size() > SSOldSize)
3174  SpeculatableSelects.pop_back();
3175  }
3176 
3177  return true;
3178 }
3179 
3180 namespace {
3181 struct IsSliceEndLessOrEqualTo {
3182  uint64_t UpperBound;
3183 
3184  IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
3185 
3186  bool operator()(const AllocaSlices::iterator &I) {
3187  return I->endOffset() <= UpperBound;
3188  }
3189 };
3190 }
3191 
3192 static void
3194  uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
3195  if (Offset >= MaxSplitUseEndOffset) {
3196  SplitUses.clear();
3197  MaxSplitUseEndOffset = 0;
3198  return;
3199  }
3200 
3201  size_t SplitUsesOldSize = SplitUses.size();
3202  SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
3203  IsSliceEndLessOrEqualTo(Offset)),
3204  SplitUses.end());
3205  if (SplitUsesOldSize == SplitUses.size())
3206  return;
3207 
3208  // Recompute the max. While this is linear, so is remove_if.
3209  MaxSplitUseEndOffset = 0;
3211  SUI = SplitUses.begin(),
3212  SUE = SplitUses.end();
3213  SUI != SUE; ++SUI)
3214  MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
3215 }
3216 
3217 /// \brief Walks the slices of an alloca and form partitions based on them,
3218 /// rewriting each of their uses.
3219 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
3220  if (S.begin() == S.end())
3221  return false;
3222 
3223  unsigned NumPartitions = 0;
3224  bool Changed = false;
3226  uint64_t MaxSplitUseEndOffset = 0;
3227 
3228  uint64_t BeginOffset = S.begin()->beginOffset();
3229 
3230  for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end();
3231  SI != SE; SI = SJ) {
3232  uint64_t MaxEndOffset = SI->endOffset();
3233 
3234  if (!SI->isSplittable()) {
3235  // When we're forming an unsplittable region, it must always start at the
3236  // first slice and will extend through its end.
3237  assert(BeginOffset == SI->beginOffset());
3238 
3239  // Form a partition including all of the overlapping slices with this
3240  // unsplittable slice.
3241  while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3242  if (!SJ->isSplittable())
3243  MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3244  ++SJ;
3245  }
3246  } else {
3247  assert(SI->isSplittable()); // Established above.
3248 
3249  // Collect all of the overlapping splittable slices.
3250  while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
3251  SJ->isSplittable()) {
3252  MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3253  ++SJ;
3254  }
3255 
3256  // Back up MaxEndOffset and SJ if we ended the span early when
3257  // encountering an unsplittable slice.
3258  if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3259  assert(!SJ->isSplittable());
3260  MaxEndOffset = SJ->beginOffset();
3261  }
3262  }
3263 
3264  // Check if we have managed to move the end offset forward yet. If so,
3265  // we'll have to rewrite uses and erase old split uses.
3266  if (BeginOffset < MaxEndOffset) {
3267  // Rewrite a sequence of overlapping slices.
3268  Changed |=
3269  rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses);
3270  ++NumPartitions;
3271 
3272  removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
3273  }
3274 
3275  // Accumulate all the splittable slices from the [SI,SJ) region which
3276  // overlap going forward.
3277  for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
3278  if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
3279  SplitUses.push_back(SK);
3280  MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
3281  }
3282 
3283  // If we're already at the end and we have no split uses, we're done.
3284  if (SJ == SE && SplitUses.empty())
3285  break;
3286 
3287  // If we have no split uses or no gap in offsets, we're ready to move to
3288  // the next slice.
3289  if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
3290  BeginOffset = SJ->beginOffset();
3291  continue;
3292  }
3293 
3294  // Even if we have split slices, if the next slice is splittable and the
3295  // split slices reach it, we can simply set up the beginning offset of the
3296  // next iteration to bridge between them.
3297  if (SJ != SE && SJ->isSplittable() &&
3298  MaxSplitUseEndOffset > SJ->beginOffset()) {
3299  BeginOffset = MaxEndOffset;
3300  continue;
3301  }
3302 
3303  // Otherwise, we have a tail of split slices. Rewrite them with an empty
3304  // range of slices.
3305  uint64_t PostSplitEndOffset =
3306  SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
3307 
3308  Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset,
3309  SplitUses);
3310  ++NumPartitions;
3311 
3312  if (SJ == SE)
3313  break; // Skip the rest, we don't need to do any cleanup.
3314 
3315  removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
3316  PostSplitEndOffset);
3317 
3318  // Now just reset the begin offset for the next iteration.
3319  BeginOffset = SJ->beginOffset();
3320  }
3321 
3322  NumAllocaPartitions += NumPartitions;
3323  MaxPartitionsPerAlloca =
3324  std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
3325 
3326  return Changed;
3327 }
3328 
3329 /// \brief Analyze an alloca for SROA.
3330 ///
3331 /// This analyzes the alloca to ensure we can reason about it, builds
3332 /// the slices of the alloca, and then hands it off to be split and
3333 /// rewritten as needed.
3334 bool SROA::runOnAlloca(AllocaInst &AI) {
3335  DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
3336  ++NumAllocasAnalyzed;
3337 
3338  // Special case dead allocas, as they're trivial.
3339  if (AI.use_empty()) {
3340  AI.eraseFromParent();
3341  return true;
3342  }
3343 
3344  // Skip alloca forms that this analysis can't handle.
3345  if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
3346  DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
3347  return false;
3348 
3349  bool Changed = false;
3350 
3351  // First, split any FCA loads and stores touching this alloca to promote
3352  // better splitting and promotion opportunities.
3353  AggLoadStoreRewriter AggRewriter(*DL);
3354  Changed |= AggRewriter.rewrite(AI);
3355 
3356  // Build the slices using a recursive instruction-visiting builder.
3357  AllocaSlices S(*DL, AI);
3358  DEBUG(S.print(dbgs()));
3359  if (S.isEscaped())
3360  return Changed;
3361 
3362  // Delete all the dead users of this alloca before splitting and rewriting it.
3363  for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
3364  DE = S.dead_user_end();
3365  DI != DE; ++DI) {
3366  Changed = true;
3367  (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
3368  DeadInsts.insert(*DI);
3369  }
3370  for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
3371  DE = S.dead_op_end();
3372  DO != DE; ++DO) {
3373  Value *OldV = **DO;
3374  // Clobber the use with an undef value.
3375  **DO = UndefValue::get(OldV->getType());
3376  if (Instruction *OldI = dyn_cast<Instruction>(OldV))
3377  if (isInstructionTriviallyDead(OldI)) {
3378  Changed = true;
3379  DeadInsts.insert(OldI);
3380  }
3381  }
3382 
3383  // No slices to split. Leave the dead alloca for a later pass to clean up.
3384  if (S.begin() == S.end())
3385  return Changed;
3386 
3387  Changed |= splitAlloca(AI, S);
3388 
3389  DEBUG(dbgs() << " Speculating PHIs\n");
3390  while (!SpeculatablePHIs.empty())
3391  speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
3392 
3393  DEBUG(dbgs() << " Speculating Selects\n");
3394  while (!SpeculatableSelects.empty())
3395  speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
3396 
3397  return Changed;
3398 }
3399 
3400 /// \brief Delete the dead instructions accumulated in this run.
3401 ///
3402 /// Recursively deletes the dead instructions we've accumulated. This is done
3403 /// at the very end to maximize locality of the recursive delete and to
3404 /// minimize the problems of invalidated instruction pointers as such pointers
3405 /// are used heavily in the intermediate stages of the algorithm.
3406 ///
3407 /// We also record the alloca instructions deleted here so that they aren't
3408 /// subsequently handed to mem2reg to promote.
3409 void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
3410  while (!DeadInsts.empty()) {
3411  Instruction *I = DeadInsts.pop_back_val();
3412  DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
3413 
3414  I->replaceAllUsesWith(UndefValue::get(I->getType()));
3415 
3416  for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
3417  if (Instruction *U = dyn_cast<Instruction>(*OI)) {
3418  // Zero out the operand and see if it becomes trivially dead.
3419  *OI = 0;
3421  DeadInsts.insert(U);
3422  }
3423 
3424  if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3425  DeletedAllocas.insert(AI);
3426 
3427  ++NumDeleted;
3428  I->eraseFromParent();
3429  }
3430 }
3431 
3434  SmallPtrSet<Instruction *, 8> &Visited) {
3435  for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
3436  ++UI)
3437  if (Visited.insert(cast<Instruction>(*UI)))
3438  Worklist.push_back(cast<Instruction>(*UI));
3439 }
3440 
3441 /// \brief Promote the allocas, using the best available technique.
3442 ///
3443 /// This attempts to promote whatever allocas have been identified as viable in
3444 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
3445 /// If there is a domtree available, we attempt to promote using the full power
3446 /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
3447 /// based on the SSAUpdater utilities. This function returns whether any
3448 /// promotion occurred.
3449 bool SROA::promoteAllocas(Function &F) {
3450  if (PromotableAllocas.empty())
3451  return false;
3452 
3453  NumPromoted += PromotableAllocas.size();
3454 
3455  if (DT && !ForceSSAUpdater) {
3456  DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
3457  PromoteMemToReg(PromotableAllocas, *DT);
3458  PromotableAllocas.clear();
3459  return true;
3460  }
3461 
3462  DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
3463  SSAUpdater SSA;
3464  DIBuilder DIB(*F.getParent());
3466 
3467  // We need a worklist to walk the uses of each alloca.
3471 
3472  for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
3473  AllocaInst *AI = PromotableAllocas[Idx];
3474  Insts.clear();
3475  Worklist.clear();
3476  Visited.clear();
3477 
3478  enqueueUsersInWorklist(*AI, Worklist, Visited);
3479 
3480  while (!Worklist.empty()) {
3481  Instruction *I = Worklist.pop_back_val();
3482 
3483  // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
3484  // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
3485  // leading to them) here. Eventually it should use them to optimize the
3486  // scalar values produced.
3487  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3488  assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
3490  II->eraseFromParent();
3491  continue;
3492  }
3493 
3494  // Push the loads and stores we find onto the list. SROA will already
3495  // have validated that all loads and stores are viable candidates for
3496  // promotion.
3497  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
3498  assert(LI->getType() == AI->getAllocatedType());
3499  Insts.push_back(LI);
3500  continue;
3501  }
3502  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
3503  assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
3504  Insts.push_back(SI);
3505  continue;
3506  }
3507 
3508  // For everything else, we know that only no-op bitcasts and GEPs will
3509  // make it this far, just recurse through them and recall them for later
3510  // removal.
3511  DeadInsts.push_back(I);
3512  enqueueUsersInWorklist(*I, Worklist, Visited);
3513  }
3514  AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
3515  while (!DeadInsts.empty())
3516  DeadInsts.pop_back_val()->eraseFromParent();
3517  AI->eraseFromParent();
3518  }
3519 
3520  PromotableAllocas.clear();
3521  return true;
3522 }
3523 
3524 namespace {
3525  /// \brief A predicate to test whether an alloca belongs to a set.
3526  class IsAllocaInSet {
3527  typedef SmallPtrSet<AllocaInst *, 4> SetType;
3528  const SetType &Set;
3529 
3530  public:
3531  typedef AllocaInst *argument_type;
3532 
3533  IsAllocaInSet(const SetType &Set) : Set(Set) {}
3534  bool operator()(AllocaInst *AI) const { return Set.count(AI); }
3535  };
3536 }
3537 
3538 bool SROA::runOnFunction(Function &F) {
3539  DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
3540  C = &F.getContext();
3541  DL = getAnalysisIfAvailable<DataLayout>();
3542  if (!DL) {
3543  DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
3544  return false;
3545  }
3546  DT = getAnalysisIfAvailable<DominatorTree>();
3547 
3548  BasicBlock &EntryBB = F.getEntryBlock();
3549  for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
3550  I != E; ++I)
3551  if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3552  Worklist.insert(AI);
3553 
3554  bool Changed = false;
3555  // A set of deleted alloca instruction pointers which should be removed from
3556  // the list of promotable allocas.
3557  SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
3558 
3559  do {
3560  while (!Worklist.empty()) {
3561  Changed |= runOnAlloca(*Worklist.pop_back_val());
3562  deleteDeadInstructions(DeletedAllocas);
3563 
3564  // Remove the deleted allocas from various lists so that we don't try to
3565  // continue processing them.
3566  if (!DeletedAllocas.empty()) {
3567  Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
3568  PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
3569  PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
3570  PromotableAllocas.end(),
3571  IsAllocaInSet(DeletedAllocas)),
3572  PromotableAllocas.end());
3573  DeletedAllocas.clear();
3574  }
3575  }
3576 
3577  Changed |= promoteAllocas(F);
3578 
3579  Worklist = PostPromotionWorklist;
3580  PostPromotionWorklist.clear();
3581  } while (!Worklist.empty());
3582 
3583  return Changed;
3584 }
3585 
3586 void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
3587  if (RequiresDomTree)
3588  AU.addRequired<DominatorTree>();
3589  AU.setPreservesCFG();
3590 }
unsigned getAlignment() const
Scalar Replacement Of static false Type * findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, uint64_t EndOffset)
Definition: SROA.cpp:937
RetTy visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:195
Value * getValueOperand()
Definition: Instructions.h:343
use_iterator use_end()
Definition: Value.h:152
static void removeFinishedSplitUses(SmallVectorImpl< AllocaSlices::iterator > &SplitUses, uint64_t &MaxSplitUseEndOffset, uint64_t Offset)
Definition: SROA.cpp:3193
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:181
IntegerType * getType() const
Definition: Constants.h:139
RetTy visitMemSetInst(MemSetInst &I)
Definition: InstVisitor.h:208
void reserve(unsigned N)
Definition: SmallVector.h:425
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:37
void addIncoming(Value *V, BasicBlock *BB)
LLVMContext & getContext() const
Definition: Function.cpp:167
LLVM Argument representation.
Definition: Argument.h:35
Base class for instruction visitors.
Definition: InstVisitor.h:81
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1306
bool isVolatile() const
Definition: Instructions.h:287
bool isVolatile() const
Intrinsic::ID getIntrinsicID() const
Definition: IntrinsicInst.h:43
ConstantInt * getAlignmentCst() const
Value * getValue() const
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
This class represents zero extension of integer types.
iterator end() const
Definition: ArrayRef.h:98
bool isSimple() const
Definition: Instructions.h:338
static Value * extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name)
Definition: SROA.cpp:1766
bool insert(PtrType Ptr)
Definition: SmallPtrSet.h:253
bool mayHaveSideEffects() const
Definition: Instruction.h:324
void operator<(const Optional< T > &X, const Optional< U > &Y)
Poison comparison between two Optional objects. Clients needs to explicitly compare the underlying va...
static cl::opt< bool > ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden)
iterator insert(iterator I, const T &Elt)
Definition: SmallVector.h:537
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:173
Scalar Replacement Of Aggregates
Definition: SROA.cpp:932
static const bool value
Definition: type_traits.h:74
MDNode - a tuple of other values.
Definition: Metadata.h:69
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AliasSetTracker *AST=0)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
F(f)
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, unsigned Align, const DataLayout *TD=0)
Definition: Loads.cpp:56
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type cast(const Y &Val)
Definition: Casting.h:224
This class represents a sign extension of integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:61
RetTy visitPHINode(PHINode &I)
Definition: InstVisitor.h:181
void setDest(Value *Ptr)
bool isSimple() const
Definition: Instructions.h:218
void setAlignment(Constant *A)
void setDebugLoc(const DebugLoc &Loc)
setDebugLoc - Set the debug location information for this instruction.
Definition: Instruction.h:175
static void enqueueUsersInWorklist(Instruction &I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSet< Instruction *, 8 > &Visited)
Definition: SROA.cpp:3432
op_iterator op_begin()
Definition: User.h:116
LoopInfoBase< BlockT, LoopT > * LI
Definition: LoopInfoImpl.h:411
static bool isSafePHIToSpeculate(PHINode &PN, const DataLayout *DL=0)
Definition: SROA.cpp:1005
Builder for the alloca slices.
Definition: SROA.cpp:313
Type * getPointerElementType() const
Definition: Type.h:373
StringRef getName() const
Definition: Value.cpp:167
bool isSingleValueType() const
Definition: Type.h:259
element_iterator element_end() const
Definition: DerivedTypes.h:279
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:322
bool isArrayAllocation() const
AnalysisUsage & addRequired()
STATISTIC(NumAllocasAnalyzed,"Number of allocas analyzed for replacement")
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:167
bool isPacked() const
Definition: DerivedTypes.h:241
std::string str() const
str - Return the twine contents as a std::string.
Definition: Twine.cpp:16
static Value * getPointerOperand(Instruction &Inst)
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:277
const StructLayout * getStructLayout(StructType *Ty) const
Definition: DataLayout.cpp:445
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:216
const APInt & getValue() const
Return the constant's value.
Definition: Constants.h:105
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:261
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
Definition: SmallVector.h:430
#define llvm_unreachable(msg)
Type * getArrayElementType() const
Definition: Type.h:368
Definition: Use.h:60
static Type * getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, uint64_t Size)
Try to find a partition of the aggregate type passed in for a given offset and size.
Definition: SROA.cpp:2939
static Value * getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *PointerTy)
Compute an adjusted pointer from Ptr by Offset bytes where the resulting pointer has PointerTy...
Definition: SROA.cpp:1359
FunctionPass * createSROAPass(bool RequiresDomTree=true)
Definition: SROA.cpp:925
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:172
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:421
static Value * convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, Type *NewTy)
Generic routine to convert an SSA value to a value of a different type.
Definition: SROA.cpp:1485
static Value * getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, SmallVectorImpl< Value * > &Indices)
Recursively compute indices for a natural GEP.
Definition: SROA.cpp:1250
element_iterator element_begin() const
Definition: DerivedTypes.h:278
Type * getAllocatedType() const
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:90
A base class for visitors over the uses of a pointer value.
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:56
static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL=0)
Definition: SROA.cpp:1124
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
Get the constant's value with a saturation limit.
Definition: Constants.h:218
This class represents a no-op cast from one type to another.
ConstantFolder - Create constants with minimum, target independent, folding.
void replaceAllUsesWith(Value *V)
Definition: Value.cpp:303
bool isArrayTy() const
Definition: Type.h:216
static Constant * getUDiv(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2062
unsigned getNumElements() const
Return the number of elements in the Vector type.
Definition: DerivedTypes.h:408
RetTy visitMemTransferInst(MemTransferInst &I)
Definition: InstVisitor.h:211
static Value * buildGEP(IRBuilderTy &IRB, Value *BasePtr, SmallVectorImpl< Value * > &Indices)
Build a GEP out of a base pointer and indices.
Definition: SROA.cpp:1190
Type * getElementType() const
Definition: DerivedTypes.h:319
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy)
Test whether we can convert a value from the old to the new type.
Definition: SROA.cpp:1452
unsigned getNumIncomingValues() const
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:442
unsigned getNumSuccessors() const
Definition: InstrTypes.h:59
Scalar Replacement Of false
Definition: SROA.cpp:932
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:314
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:181
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
LLVM Basic Block Representation.
Definition: BasicBlock.h:72
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Definition: DataLayout.h:318
bool isVectorTy() const
Definition: Type.h:229
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:287
LLVM Constant Representation.
Definition: Constant.h:41
PointerType * getType() const
Definition: Instructions.h:91
const Value * getCondition() const
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0)
Definition: Local.cpp:266
unsigned getAlignment() const
Definition: Instructions.h:103
Value * getRawDest() const
Value * stripInBoundsOffsets()
Strips off unneeded pointer casts and any in-bounds offsets from the specified value, returning the original pointer value.
Definition: Value.cpp:433
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:178
op_iterator op_end()
Definition: User.h:118
BasicBlock * getIncomingBlock(unsigned i) const
ItTy next(ItTy it, Dist n)
Definition: STLExtras.h:154
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:199
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1252
iterator begin() const
Definition: ArrayRef.h:97
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1116
sroa
Definition: SROA.cpp:932
static Value * getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, SmallVectorImpl< Value * > &Indices)
Get a natural GEP off of the BasePtr walking through Ty toward TargetTy without changing the offset o...
Definition: SROA.cpp:1212
APInt LLVM_ATTRIBUTE_UNUSED_RESULT sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1879
Value * getOperand(unsigned i) const
Definition: User.h:88
Value * getPointerOperand()
Definition: Instructions.h:223
static bool isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S, uint64_t SliceBeginOffset, uint64_t SliceEndOffset, AllocaSlices::const_iterator I, AllocaSlices::const_iterator E, ArrayRef< AllocaSlices::iterator > SplitUses)
Test whether the given alloca partitioning and range of slices can be promoted to a vector...
Definition: SROA.cpp:1614
Integer representation type.
Definition: DerivedTypes.h:37
RetTy visitLoadInst(LoadInst &I)
Definition: InstVisitor.h:175
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallPtrSet.h:74
void setAlignment(unsigned Align)
static bool isVectorPromotionViableForSlice(const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset, uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize, AllocaSlices::const_iterator I)
Test whether the given slice use can be promoted to a vector.
Definition: SROA.cpp:1539
static Constant * getAllOnesValue(Type *Ty)
Get the all ones value.
Definition: Constants.cpp:163
bool isPointerTy() const
Definition: Type.h:220
static UndefValue * get(Type *T)
Definition: Constants.cpp:1334
iterator erase(iterator I)
Definition: SmallVector.h:478
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:517
static bool isIntegerWideningViableForSlice(const DataLayout &DL, Type *AllocaTy, uint64_t AllocBeginOffset, uint64_t Size, AllocaSlices &S, AllocaSlices::const_iterator I, bool &WholeAllocaOp)
Test whether a slice of an alloca is valid for integer widening.
Definition: SROA.cpp:1652
MDNode * getVariable() const
PointerType * getPointerTo(unsigned AddrSpace=0)
Definition: Type.cpp:756
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:284
const Value * getTrueValue() const
void setMetadata(unsigned KindID, MDNode *Node)
Definition: Metadata.cpp:589
static Type * stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty)
Strip aggregate type wrapping.
Definition: SROA.cpp:2901
This provides the default implementation of the IRBuilder 'InsertHelper' method that is called whenev...
Definition: IRBuilder.h:39
static bool isZero(Value *V, DataLayout *DL)
Definition: Lint.cpp:507
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Definition: DataLayout.cpp:610
unsigned getABITypeAlignment(Type *Ty) const
Definition: DataLayout.cpp:582
unsigned getIntegerBitWidth() const
Definition: Type.cpp:178
Class for constant integers.
Definition: Constants.h:51
Value * getIncomingValue(unsigned i) const
uint64_t getTypeAllocSize(Type *Ty) const
Definition: DataLayout.h:326
friend const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:173
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater...
Definition: SSAUpdater.h:133
bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI, DIBuilder &Builder)
Definition: Local.cpp:971
Type * getType() const
Definition: Value.h:111
MDNode * getMetadata(unsigned KindID) const
Definition: Instruction.h:140
static Value * insertVector(IRBuilderTy &IRB, Value *Old, Value *V, unsigned BeginIndex, const Twine &Name)
Definition: SROA.cpp:1848
bool isVolatile() const
Definition: Instructions.h:170
#define LLVM_ATTRIBUTE_NOINLINE
Definition: Compiler.h:254
Value * getLength() const
uint64_t getSizeInBytes() const
Definition: DataLayout.h:425
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:244
unsigned getElementContainingOffset(uint64_t Offset) const
Definition: DataLayout.cpp:78
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
Definition: Constants.cpp:492
void setPreservesCFG()
Definition: Pass.cpp:249
const BasicBlock & getEntryBlock() const
Definition: Function.h:380
void setOperand(unsigned i, Value *Val)
Definition: User.h:92
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
Definition: Debug.cpp:101
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:591
Value * getArgOperand(unsigned i) const
Class for arbitrary precision integers.
Definition: APInt.h:75
bool isIntegerTy() const
Definition: Type.h:196
void initializeSROAPass(PassRegistry &)
RetTy visitStoreInst(StoreInst &I)
Definition: InstVisitor.h:176
#define NDEBUG
Definition: regutils.h:45
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, BasicBlock::iterator InsertPt) const
Definition: IRBuilder.h:41
static Value * insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, Value *V, uint64_t Offset, const Twine &Name)
Definition: SROA.cpp:1789
void setLength(Value *L)
Virtual Register Rewriter
Definition: VirtRegMap.cpp:185
bool isDereferenceablePointer() const
Definition: Value.cpp:500
use_iterator use_begin()
Definition: Value.h:150
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:1686
uint64_t MinAlign(uint64_t A, uint64_t B)
Definition: MathExtras.h:535
static Constant * getZExt(Constant *C, Type *Ty)
Definition: Constants.cpp:1555
bool isLegalInteger(unsigned Width) const
Definition: DataLayout.h:210
INITIALIZE_PASS_BEGIN(SROA,"sroa","Scalar Replacement Of Aggregates", false, false) INITIALIZE_PASS_END(SROA
unsigned getAlignment() const
Definition: Instructions.h:181
void * PointerTy
Definition: GenericValue.h:23
static void speculateSelectInstLoads(SelectInst &SI)
Definition: SROA.cpp:1150
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
unsigned getPointerSizeInBits(unsigned AS=0) const
Definition: DataLayout.h:271
void setSource(Value *Ptr)
const Type * getScalarType() const
Definition: Type.cpp:51
static bool isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, uint64_t AllocBeginOffset, AllocaSlices &S, AllocaSlices::const_iterator I, AllocaSlices::const_iterator E, ArrayRef< AllocaSlices::iterator > SplitUses)
Test whether the given alloca partition's integer operations can be widened to promotable ones...
Definition: SROA.cpp:1720
static Value * extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, unsigned EndIndex, const Twine &Name)
Definition: SROA.cpp:1820
uint64_t getTypeStoreSize(Type *Ty) const
Definition: DataLayout.h:311
Value * getRawSource() const
bool use_empty() const
Definition: Value.h:149
Module * getParent()
Definition: GlobalValue.h:286
LLVM Value Representation.
Definition: Value.h:66
A vector that has set insertion semantics.
Definition: SetVector.h:37
static VectorType * get(Type *ElementType, unsigned NumElements)
Definition: Type.cpp:706
APInt shl(const APInt &LHS, unsigned shiftAmt)
Left-shift function.
Definition: APInt.h:1797
bool isTriviallyEmpty() const
Definition: Twine.h:387
static void speculatePHINodeLoads(PHINode &PN)
Definition: SROA.cpp:1070
bool isSized() const
Definition: Type.h:278
uint64_t getTypeSizeInBits(Type *Ty) const
Definition: DataLayout.h:459
ItTy prior(ItTy it, Dist n)
Definition: STLExtras.h:167
#define DEBUG(X)
Definition: Debug.h:97
SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
Definition: SROA.cpp:328
const Value * getFalseValue() const
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:983
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1684
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:256
bool isBigEndian() const
Definition: DataLayout.h:196
tier< T1, T2 > tie(T1 &f, T2 &s)
Definition: STLExtras.h:216
Value * getPointerOperand()
Definition: Instructions.h:346
const BasicBlock * getParent() const
Definition: Instruction.h:52
SUnit - Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:249
static Value * getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, SmallVectorImpl< Value * > &Indices)
Get a natural GEP from a base pointer to a particular offset and resulting in a particular type...
Definition: SROA.cpp:1320
static Value * foldSelectInst(SelectInst &SI)
Definition: SROA.cpp:297
#define LLVM_ATTRIBUTE_USED
Definition: Compiler.h:179