LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
AMDILCFGStructurizer.cpp
Go to the documentation of this file.
1 //===-- AMDILCFGStructurizer.cpp - CFG Structurizer -----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //==-----------------------------------------------------------------------===//
10 
11 #define DEBUG_TYPE "structcfg"
12 
13 #include "AMDGPU.h"
14 #include "AMDGPUInstrInfo.h"
15 #include "R600InstrInfo.h"
16 #include "llvm/Support/Debug.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
35 
36 using namespace llvm;
37 
38 #define DEFAULT_VEC_SLOTS 8
39 
40 // TODO: move-begin.
41 
42 //===----------------------------------------------------------------------===//
43 //
44 // Statistics for CFGStructurizer.
45 //
46 //===----------------------------------------------------------------------===//
47 
48 STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern "
49  "matched");
50 STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern "
51  "matched");
52 STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
53  "pattern matched");
54 STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
55 STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
56 
57 //===----------------------------------------------------------------------===//
58 //
59 // Miscellaneous utility for CFGStructurizer.
60 //
61 //===----------------------------------------------------------------------===//
62 namespace {
63 #define SHOWNEWINSTR(i) \
64  DEBUG(dbgs() << "New instr: " << *i << "\n");
65 
66 #define SHOWNEWBLK(b, msg) \
67 DEBUG( \
68  dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
69  dbgs() << "\n"; \
70 );
71 
72 #define SHOWBLK_DETAIL(b, msg) \
73 DEBUG( \
74  if (b) { \
75  dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
76  b->print(dbgs()); \
77  dbgs() << "\n"; \
78  } \
79 );
80 
81 #define INVALIDSCCNUM -1
82 
83 template<class NodeT>
84 void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
85  size_t sz = Src.size();
86  for (size_t i = 0; i < sz/2; ++i) {
87  NodeT *t = Src[i];
88  Src[i] = Src[sz - i - 1];
89  Src[sz - i - 1] = t;
90  }
91 }
92 
93 } // end anonymous namespace
94 
95 //===----------------------------------------------------------------------===//
96 //
97 // supporting data structure for CFGStructurizer
98 //
99 //===----------------------------------------------------------------------===//
100 
101 
102 namespace {
103 
104 class BlockInformation {
105 public:
106  bool IsRetired;
107  int SccNum;
108  BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {}
109 };
110 
111 } // end anonymous namespace
112 
113 //===----------------------------------------------------------------------===//
114 //
115 // CFGStructurizer
116 //
117 //===----------------------------------------------------------------------===//
118 
119 namespace {
120 class AMDGPUCFGStructurizer : public MachineFunctionPass {
121 public:
122  typedef SmallVector<MachineBasicBlock *, 32> MBBVector;
123  typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap;
124  typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap;
125 
126  enum PathToKind {
127  Not_SinglePath = 0,
128  SinglePath_InPath = 1,
129  SinglePath_NotInPath = 2
130  };
131 
132  static char ID;
133 
134  AMDGPUCFGStructurizer(TargetMachine &tm) :
135  MachineFunctionPass(ID), TM(tm),
136  TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
137  TRI(&TII->getRegisterInfo()) { }
138 
139  const char *getPassName() const {
140  return "AMD IL Control Flow Graph structurizer Pass";
141  }
142 
143  void getAnalysisUsage(AnalysisUsage &AU) const {
149  }
150 
151  /// Perform the CFG structurization
152  bool run();
153 
154  /// Perform the CFG preparation
155  /// This step will remove every unconditionnal/dead jump instructions and make
156  /// sure all loops have an exit block
157  bool prepare();
158 
159  bool runOnMachineFunction(MachineFunction &MF) {
160  DEBUG(MF.dump(););
161  OrderedBlks.clear();
162  FuncRep = &MF;
163  MLI = &getAnalysis<MachineLoopInfo>();
164  DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
165  MDT = &getAnalysis<MachineDominatorTree>();
166  DEBUG(MDT->print(dbgs(), (const llvm::Module*)0););
167  PDT = &getAnalysis<MachinePostDominatorTree>();
168  DEBUG(PDT->print(dbgs()););
169  prepare();
170  run();
171  DEBUG(MF.dump(););
172  return true;
173  }
174 
175 protected:
176  TargetMachine &TM;
179  MachineLoopInfo *MLI;
180  const R600InstrInfo *TII;
181  const AMDGPURegisterInfo *TRI;
182 
183  // PRINT FUNCTIONS
184  /// Print the ordered Blocks.
185  void printOrderedBlocks() const {
186  size_t i = 0;
187  for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(),
188  iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) {
189  dbgs() << "BB" << (*iterBlk)->getNumber();
190  dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
191  if (i != 0 && i % 10 == 0) {
192  dbgs() << "\n";
193  } else {
194  dbgs() << " ";
195  }
196  }
197  }
198  static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) {
199  for (MachineLoop::iterator iter = LoopInfo.begin(),
200  iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) {
201  (*iter)->print(dbgs(), 0);
202  }
203  }
204 
205  // UTILITY FUNCTIONS
206  int getSCCNum(MachineBasicBlock *MBB) const;
207  MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
208  bool hasBackEdge(MachineBasicBlock *MBB) const;
209  static unsigned getLoopDepth(MachineLoop *LoopRep);
210  bool isRetiredBlock(MachineBasicBlock *MBB) const;
211  bool isActiveLoophead(MachineBasicBlock *MBB) const;
212  PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
213  bool AllowSideEntry = true) const;
214  int countActiveBlock(MBBVector::const_iterator It,
215  MBBVector::const_iterator E) const;
216  bool needMigrateBlock(MachineBasicBlock *MBB) const;
217 
218  // Utility Functions
219  void reversePredicateSetter(MachineBasicBlock::iterator I);
220  /// Compute the reversed DFS post order of Blocks
221  void orderBlocks(MachineFunction *MF);
222 
223  // Function originaly from CFGStructTraits
224  void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
225  DebugLoc DL = DebugLoc());
226  MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
227  DebugLoc DL = DebugLoc());
228  MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
229  void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
230  DebugLoc DL);
231  void insertCondBranchBefore(MachineBasicBlock *MBB,
232  MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
233  DebugLoc DL);
234  void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
235  static int getBranchNzeroOpcode(int OldOpcode);
236  static int getBranchZeroOpcode(int OldOpcode);
237  static int getContinueNzeroOpcode(int OldOpcode);
238  static int getContinueZeroOpcode(int OldOpcode);
239  static MachineBasicBlock *getTrueBranch(MachineInstr *MI);
240  static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB);
241  static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB,
242  MachineInstr *MI);
243  static bool isCondBranch(MachineInstr *MI);
244  static bool isUncondBranch(MachineInstr *MI);
245  static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB);
246  static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB);
247  /// The correct naming for this is getPossibleLoopendBlockBranchInstr.
248  ///
249  /// BB with backward-edge could have move instructions after the branch
250  /// instruction. Such move instruction "belong to" the loop backward-edge.
251  MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
252  static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
253  static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
254  static bool isReturnBlock(MachineBasicBlock *MBB);
255  static void cloneSuccessorList(MachineBasicBlock *DstMBB,
256  MachineBasicBlock *SrcMBB) ;
257  static MachineBasicBlock *clone(MachineBasicBlock *MBB);
258  /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose
259  /// because the AMDGPU instruction is not recognized as terminator fix this
260  /// and retire this routine
261  void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB,
262  MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk);
263  static void wrapup(MachineBasicBlock *MBB);
264 
265 
266  int patternMatch(MachineBasicBlock *MBB);
267  int patternMatchGroup(MachineBasicBlock *MBB);
268  int serialPatternMatch(MachineBasicBlock *MBB);
269  int ifPatternMatch(MachineBasicBlock *MBB);
270  int loopendPatternMatch();
271  int mergeLoop(MachineLoop *LoopRep);
272  int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
273 
274  void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
275  MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
276  MachineLoop *ContLoop);
277  /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
278  /// the same loop with LoopLandInfo without explicitly keeping track of
279  /// loopContBlks and loopBreakBlks, this is a method to get the information.
280  bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB,
281  MachineBasicBlock *Src2MBB);
282  int handleJumpintoIf(MachineBasicBlock *HeadMBB,
283  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
284  int handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
285  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
286  int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
287  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
288  MachineBasicBlock **LandMBBPtr);
289  void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
290  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
291  MachineBasicBlock *LandMBB, bool Detail = false);
292  int cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
293  MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB);
294  void mergeSerialBlock(MachineBasicBlock *DstMBB,
295  MachineBasicBlock *SrcMBB);
296 
297  void mergeIfthenelseBlock(MachineInstr *BranchMI,
298  MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
299  MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB);
300  void mergeLooplandBlock(MachineBasicBlock *DstMBB,
301  MachineBasicBlock *LandMBB);
302  void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
303  MachineBasicBlock *LandMBB);
304  void settleLoopcontBlock(MachineBasicBlock *ContingMBB,
305  MachineBasicBlock *ContMBB);
306  /// normalizeInfiniteLoopExit change
307  /// B1:
308  /// uncond_br LoopHeader
309  ///
310  /// to
311  /// B1:
312  /// cond_br 1 LoopHeader dummyExit
313  /// and return the newly added dummy exit block
314  MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep);
315  void removeUnconditionalBranch(MachineBasicBlock *MBB);
316  /// Remove duplicate branches instructions in a block.
317  /// For instance
318  /// B0:
319  /// cond_br X B1 B2
320  /// cond_br X B1 B2
321  /// is transformed to
322  /// B0:
323  /// cond_br X B1 B2
324  void removeRedundantConditionalBranch(MachineBasicBlock *MBB);
325  void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB);
326  void removeSuccessor(MachineBasicBlock *MBB);
327  MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB,
328  MachineBasicBlock *PredMBB);
329  void migrateInstruction(MachineBasicBlock *SrcMBB,
331  void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
332  void retireBlock(MachineBasicBlock *MBB);
333  void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL);
334 
335  MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
336  /// This is work around solution for findNearestCommonDominator not avaiable
337  /// to post dom a proper fix should go to Dominators.h.
338  MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
339  MachineBasicBlock *MBB2);
340 
341 private:
342  MBBInfoMap BlockInfoMap;
343  LoopLandInfoMap LLInfoMap;
344  std::map<MachineLoop *, bool> Visited;
345  MachineFunction *FuncRep;
347 };
348 
349 int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const {
350  MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
351  if (It == BlockInfoMap.end())
352  return INVALIDSCCNUM;
353  return (*It).second->SccNum;
354 }
355 
356 MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
357  const {
358  LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
359  if (It == LLInfoMap.end())
360  return NULL;
361  return (*It).second;
362 }
363 
364 bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
365  MachineLoop *LoopRep = MLI->getLoopFor(MBB);
366  if (!LoopRep)
367  return false;
368  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
369  return MBB->isSuccessor(LoopHeader);
370 }
371 
372 unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
373  return LoopRep ? LoopRep->getLoopDepth() : 0;
374 }
375 
376 bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
377  MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
378  if (It == BlockInfoMap.end())
379  return false;
380  return (*It).second->IsRetired;
381 }
382 
383 bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const {
384  MachineLoop *LoopRep = MLI->getLoopFor(MBB);
385  while (LoopRep && LoopRep->getHeader() == MBB) {
386  MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep);
387  if(!LoopLand)
388  return true;
389  if (!isRetiredBlock(LoopLand))
390  return true;
391  LoopRep = LoopRep->getParentLoop();
392  }
393  return false;
394 }
395 AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo(
396  MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
397  bool AllowSideEntry) const {
398  assert(DstMBB);
399  if (SrcMBB == DstMBB)
400  return SinglePath_InPath;
401  while (SrcMBB && SrcMBB->succ_size() == 1) {
402  SrcMBB = *SrcMBB->succ_begin();
403  if (SrcMBB == DstMBB)
404  return SinglePath_InPath;
405  if (!AllowSideEntry && SrcMBB->pred_size() > 1)
406  return Not_SinglePath;
407  }
408  if (SrcMBB && SrcMBB->succ_size()==0)
409  return SinglePath_NotInPath;
410  return Not_SinglePath;
411 }
412 
413 int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It,
414  MBBVector::const_iterator E) const {
415  int Count = 0;
416  while (It != E) {
417  if (!isRetiredBlock(*It))
418  ++Count;
419  ++It;
420  }
421  return Count;
422 }
423 
424 bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
425  unsigned BlockSizeThreshold = 30;
426  unsigned CloneInstrThreshold = 100;
427  bool MultiplePreds = MBB && (MBB->pred_size() > 1);
428 
429  if(!MultiplePreds)
430  return false;
431  unsigned BlkSize = MBB->size();
432  return ((BlkSize > BlockSizeThreshold) &&
433  (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold));
434 }
435 
436 void AMDGPUCFGStructurizer::reversePredicateSetter(
438  while (I--) {
439  if (I->getOpcode() == AMDGPU::PRED_X) {
440  switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
441  case OPCODE_IS_ZERO_INT:
442  static_cast<MachineInstr *>(I)->getOperand(2)
443  .setImm(OPCODE_IS_NOT_ZERO_INT);
444  return;
446  static_cast<MachineInstr *>(I)->getOperand(2)
447  .setImm(OPCODE_IS_ZERO_INT);
448  return;
449  case OPCODE_IS_ZERO:
450  static_cast<MachineInstr *>(I)->getOperand(2)
451  .setImm(OPCODE_IS_NOT_ZERO);
452  return;
453  case OPCODE_IS_NOT_ZERO:
454  static_cast<MachineInstr *>(I)->getOperand(2)
455  .setImm(OPCODE_IS_ZERO);
456  return;
457  default:
458  llvm_unreachable("PRED_X Opcode invalid!");
459  }
460  }
461  }
462 }
463 
464 void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
465  int NewOpcode, DebugLoc DL) {
466  MachineInstr *MI = MBB->getParent()
467  ->CreateMachineInstr(TII->get(NewOpcode), DL);
468  MBB->push_back(MI);
469  //assume the instruction doesn't take any reg operand ...
470  SHOWNEWINSTR(MI);
471 }
472 
473 MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
474  int NewOpcode, DebugLoc DL) {
475  MachineInstr *MI =
476  MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
477  if (MBB->begin() != MBB->end())
478  MBB->insert(MBB->begin(), MI);
479  else
480  MBB->push_back(MI);
481  SHOWNEWINSTR(MI);
482  return MI;
483 }
484 
485 MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
486  MachineBasicBlock::iterator I, int NewOpcode) {
487  MachineInstr *OldMI = &(*I);
488  MachineBasicBlock *MBB = OldMI->getParent();
489  MachineInstr *NewMBB =
490  MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
491  MBB->insert(I, NewMBB);
492  //assume the instruction doesn't take any reg operand ...
493  SHOWNEWINSTR(NewMBB);
494  return NewMBB;
495 }
496 
497 void AMDGPUCFGStructurizer::insertCondBranchBefore(
498  MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
499  MachineInstr *OldMI = &(*I);
500  MachineBasicBlock *MBB = OldMI->getParent();
501  MachineFunction *MF = MBB->getParent();
502  MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
503  MBB->insert(I, NewMI);
504  MachineInstrBuilder MIB(*MF, NewMI);
505  MIB.addReg(OldMI->getOperand(1).getReg(), false);
506  SHOWNEWINSTR(NewMI);
507  //erase later oldInstr->eraseFromParent();
508 }
509 
510 void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
511  MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
512  DebugLoc DL) {
513  MachineFunction *MF = blk->getParent();
514  MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
515  //insert before
516  blk->insert(I, NewInstr);
517  MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
518  SHOWNEWINSTR(NewInstr);
519 }
520 
521 void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
522  int NewOpcode, int RegNum) {
523  MachineFunction *MF = MBB->getParent();
524  MachineInstr *NewInstr =
525  MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
526  MBB->push_back(NewInstr);
527  MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
528  SHOWNEWINSTR(NewInstr);
529 }
530 
531 int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
532  switch(OldOpcode) {
533  case AMDGPU::JUMP_COND:
534  case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
535  case AMDGPU::BRANCH_COND_i32:
536  case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
537  default: llvm_unreachable("internal error");
538  }
539  return -1;
540 }
541 
542 int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) {
543  switch(OldOpcode) {
544  case AMDGPU::JUMP_COND:
545  case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
546  case AMDGPU::BRANCH_COND_i32:
547  case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
548  default: llvm_unreachable("internal error");
549  }
550  return -1;
551 }
552 
553 int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) {
554  switch(OldOpcode) {
555  case AMDGPU::JUMP_COND:
556  case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
557  default: llvm_unreachable("internal error");
558  };
559  return -1;
560 }
561 
562 int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) {
563  switch(OldOpcode) {
564  case AMDGPU::JUMP_COND:
565  case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
566  default: llvm_unreachable("internal error");
567  }
568  return -1;
569 }
570 
571 MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) {
572  return MI->getOperand(0).getMBB();
573 }
574 
575 void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI,
576  MachineBasicBlock *MBB) {
577  MI->getOperand(0).setMBB(MBB);
578 }
579 
581 AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB,
582  MachineInstr *MI) {
583  assert(MBB->succ_size() == 2);
584  MachineBasicBlock *TrueBranch = getTrueBranch(MI);
587  ++Next;
588  return (*It == TrueBranch) ? *Next : *It;
589 }
590 
592  switch (MI->getOpcode()) {
593  case AMDGPU::JUMP_COND:
594  case AMDGPU::BRANCH_COND_i32:
595  case AMDGPU::BRANCH_COND_f32: return true;
596  default:
597  return false;
598  }
599  return false;
600 }
601 
602 bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) {
603  switch (MI->getOpcode()) {
604  case AMDGPU::JUMP:
605  case AMDGPU::BRANCH:
606  return true;
607  default:
608  return false;
609  }
610  return false;
611 }
612 
613 DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) {
614  //get DebugLoc from the first MachineBasicBlock instruction with debug info
615  DebugLoc DL;
616  for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end();
617  ++It) {
618  MachineInstr *instr = &(*It);
619  if (instr->getDebugLoc().isUnknown() == false)
620  DL = instr->getDebugLoc();
621  }
622  return DL;
623 }
624 
625 MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
626  MachineBasicBlock *MBB) {
628  MachineInstr *MI = &*It;
629  if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
630  return MI;
631  return NULL;
632 }
633 
634 MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
635  MachineBasicBlock *MBB) {
636  for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend();
637  It != E; ++It) {
638  // FIXME: Simplify
639  MachineInstr *MI = &*It;
640  if (MI) {
641  if (isCondBranch(MI) || isUncondBranch(MI))
642  return MI;
643  else if (!TII->isMov(MI->getOpcode()))
644  break;
645  }
646  }
647  return NULL;
648 }
649 
650 MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
652  if (It != MBB->rend()) {
653  MachineInstr *instr = &(*It);
654  if (instr->getOpcode() == AMDGPU::RETURN)
655  return instr;
656  }
657  return NULL;
658 }
659 
660 MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
662  if (It != MBB->rend()) {
663  MachineInstr *MI = &(*It);
664  if (MI->getOpcode() == AMDGPU::CONTINUE)
665  return MI;
666  }
667  return NULL;
668 }
669 
670 bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
671  MachineInstr *MI = getReturnInstr(MBB);
672  bool IsReturn = (MBB->succ_size() == 0);
673  if (MI)
674  assert(IsReturn);
675  else if (IsReturn)
676  DEBUG(
677  dbgs() << "BB" << MBB->getNumber()
678  <<" is return block without RETURN instr\n";);
679  return IsReturn;
680 }
681 
682 void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB,
683  MachineBasicBlock *SrcMBB) {
684  for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(),
685  iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It)
686  DstMBB->addSuccessor(*It); // *iter's predecessor is also taken care of
687 }
688 
689 MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
690  MachineFunction *Func = MBB->getParent();
691  MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
692  Func->push_back(NewMBB); //insert to function
693  for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
694  It != E; ++It) {
695  MachineInstr *MI = Func->CloneMachineInstr(It);
696  NewMBB->push_back(MI);
697  }
698  return NewMBB;
699 }
700 
701 void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith(
702  MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB,
703  MachineBasicBlock *NewBlk) {
704  MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB);
705  if (BranchMI && isCondBranch(BranchMI) &&
706  getTrueBranch(BranchMI) == OldMBB)
707  setTrueBranch(BranchMI, NewBlk);
708 }
709 
710 void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
711  assert((!MBB->getParent()->getJumpTableInfo()
712  || MBB->getParent()->getJumpTableInfo()->isEmpty())
713  && "found a jump table");
714 
715  //collect continue right before endloop
717  MachineBasicBlock::iterator Pre = MBB->begin();
718  MachineBasicBlock::iterator E = MBB->end();
720  while (It != E) {
721  if (Pre->getOpcode() == AMDGPU::CONTINUE
722  && It->getOpcode() == AMDGPU::ENDLOOP)
723  ContInstr.push_back(Pre);
724  Pre = It;
725  ++It;
726  }
727 
728  //delete continue right before endloop
729  for (unsigned i = 0; i < ContInstr.size(); ++i)
730  ContInstr[i]->eraseFromParent();
731 
732  // TODO to fix up jump table so later phase won't be confused. if
733  // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
734  // there isn't such an interface yet. alternatively, replace all the other
735  // blocks in the jump table with the entryBlk //}
736 
737 }
738 
739 
740 bool AMDGPUCFGStructurizer::prepare() {
741  bool Changed = false;
742 
743  //FIXME: if not reducible flow graph, make it so ???
744 
745  DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
746 
747  orderBlocks(FuncRep);
748 
750 
751  // Add an ExitBlk to loop that don't have one
752  for (MachineLoopInfo::iterator It = MLI->begin(),
753  E = MLI->end(); It != E; ++It) {
754  MachineLoop *LoopRep = (*It);
755  MBBVector ExitingMBBs;
756  LoopRep->getExitingBlocks(ExitingMBBs);
757 
758  if (ExitingMBBs.size() == 0) {
759  MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep);
760  if (DummyExitBlk)
761  RetBlks.push_back(DummyExitBlk);
762  }
763  }
764 
765  // Remove unconditional branch instr.
766  // Add dummy exit block iff there are multiple returns.
768  It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) {
769  MachineBasicBlock *MBB = *It;
770  removeUnconditionalBranch(MBB);
771  removeRedundantConditionalBranch(MBB);
772  if (isReturnBlock(MBB)) {
773  RetBlks.push_back(MBB);
774  }
775  assert(MBB->succ_size() <= 2);
776  }
777 
778  if (RetBlks.size() >= 2) {
779  addDummyExitBlock(RetBlks);
780  Changed = true;
781  }
782 
783  return Changed;
784 }
785 
786 bool AMDGPUCFGStructurizer::run() {
787 
788  //Assume reducible CFG...
789  DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG(););
790 
791 #ifdef STRESSTEST
792  //Use the worse block ordering to test the algorithm.
793  ReverseVector(orderedBlks);
794 #endif
795 
796  DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
797  int NumIter = 0;
798  bool Finish = false;
799  MachineBasicBlock *MBB;
800  bool MakeProgress = false;
801  int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(),
802  OrderedBlks.end());
803 
804  do {
805  ++NumIter;
806  DEBUG(
807  dbgs() << "numIter = " << NumIter
808  << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
809  );
810 
812  OrderedBlks.begin();
814  OrderedBlks.end();
815 
817  It;
818  MachineBasicBlock *SccBeginMBB = NULL;
819  int SccNumBlk = 0; // The number of active blocks, init to a
820  // maximum possible number.
821  int SccNumIter; // Number of iteration in this SCC.
822 
823  while (It != E) {
824  MBB = *It;
825 
826  if (!SccBeginMBB) {
827  SccBeginIter = It;
828  SccBeginMBB = MBB;
829  SccNumIter = 0;
830  SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
831  DEBUG(
832  dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
833  dbgs() << "\n";
834  );
835  }
836 
837  if (!isRetiredBlock(MBB))
838  patternMatch(MBB);
839 
840  ++It;
841 
842  bool ContNextScc = true;
843  if (It == E
844  || getSCCNum(SccBeginMBB) != getSCCNum(*It)) {
845  // Just finish one scc.
846  ++SccNumIter;
847  int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
848  if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
849  DEBUG(
850  dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
851  << ", sccNumIter = " << SccNumIter;
852  dbgs() << "doesn't make any progress\n";
853  );
854  ContNextScc = true;
855  } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
856  SccNumBlk = sccRemainedNumBlk;
857  It = SccBeginIter;
858  ContNextScc = false;
859  DEBUG(
860  dbgs() << "repeat processing SCC" << getSCCNum(MBB)
861  << "sccNumIter = " << SccNumIter << "\n";
862  FuncRep->viewCFG();
863  );
864  } else {
865  // Finish the current scc.
866  ContNextScc = true;
867  }
868  } else {
869  // Continue on next component in the current scc.
870  ContNextScc = false;
871  }
872 
873  if (ContNextScc)
874  SccBeginMBB = NULL;
875  } //while, "one iteration" over the function.
876 
877  MachineBasicBlock *EntryMBB =
879  if (EntryMBB->succ_size() == 0) {
880  Finish = true;
881  DEBUG(
882  dbgs() << "Reduce to one block\n";
883  );
884  } else {
885  int NewnumRemainedBlk
886  = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
887  // consider cloned blocks ??
888  if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) {
889  MakeProgress = true;
890  NumRemainedBlk = NewnumRemainedBlk;
891  } else {
892  MakeProgress = false;
893  DEBUG(
894  dbgs() << "No progress\n";
895  );
896  }
897  }
898  } while (!Finish && MakeProgress);
899 
900  // Misc wrap up to maintain the consistency of the Function representation.
902 
903  // Detach retired Block, release memory.
904  for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end();
905  It != E; ++It) {
906  if ((*It).second && (*It).second->IsRetired) {
907  assert(((*It).first)->getNumber() != -1);
908  DEBUG(
909  dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
910  );
911  (*It).first->eraseFromParent(); //Remove from the parent Function.
912  }
913  delete (*It).second;
914  }
915  BlockInfoMap.clear();
916  LLInfoMap.clear();
917 
918  DEBUG(
919  FuncRep->viewCFG();
920  );
921 
922  if (!Finish)
923  llvm_unreachable("IRREDUCIBL_CF");
924 
925  return true;
926 }
927 
928 
929 
930 void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
931  int SccNum = 0;
932  MachineBasicBlock *MBB;
933  for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF);
934  It != E; ++It, ++SccNum) {
935  std::vector<MachineBasicBlock *> &SccNext = *It;
936  for (std::vector<MachineBasicBlock *>::const_iterator
937  blockIter = SccNext.begin(), blockEnd = SccNext.end();
938  blockIter != blockEnd; ++blockIter) {
939  MBB = *blockIter;
940  OrderedBlks.push_back(MBB);
941  recordSccnum(MBB, SccNum);
942  }
943  }
944 
945  //walk through all the block in func to check for unreachable
946  typedef GraphTraits<MachineFunction *> GTM;
947  MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF);
948  for (; It != E; ++It) {
949  MachineBasicBlock *MBB = &(*It);
950  SccNum = getSCCNum(MBB);
951  if (SccNum == INVALIDSCCNUM)
952  dbgs() << "unreachable block BB" << MBB->getNumber() << "\n";
953  }
954 }
955 
956 int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
957  int NumMatch = 0;
958  int CurMatch;
959 
960  DEBUG(
961  dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
962  );
963 
964  while ((CurMatch = patternMatchGroup(MBB)) > 0)
965  NumMatch += CurMatch;
966 
967  DEBUG(
968  dbgs() << "End patternMatch BB" << MBB->getNumber()
969  << ", numMatch = " << NumMatch << "\n";
970  );
971 
972  return NumMatch;
973 }
974 
975 int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) {
976  int NumMatch = 0;
977  NumMatch += loopendPatternMatch();
978  NumMatch += serialPatternMatch(MBB);
979  NumMatch += ifPatternMatch(MBB);
980  return NumMatch;
981 }
982 
983 
984 int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) {
985  if (MBB->succ_size() != 1)
986  return 0;
987 
988  MachineBasicBlock *childBlk = *MBB->succ_begin();
989  if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk))
990  return 0;
991 
992  mergeSerialBlock(MBB, childBlk);
993  ++numSerialPatternMatch;
994  return 1;
995 }
996 
997 int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
998  //two edges
999  if (MBB->succ_size() != 2)
1000  return 0;
1001  if (hasBackEdge(MBB))
1002  return 0;
1003  MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
1004  if (!BranchMI)
1005  return 0;
1006 
1007  assert(isCondBranch(BranchMI));
1008  int NumMatch = 0;
1009 
1010  MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI);
1011  NumMatch += serialPatternMatch(TrueMBB);
1012  NumMatch += ifPatternMatch(TrueMBB);
1013  MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI);
1014  NumMatch += serialPatternMatch(FalseMBB);
1015  NumMatch += ifPatternMatch(FalseMBB);
1016  MachineBasicBlock *LandBlk;
1017  int Cloned = 0;
1018 
1019  assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty());
1020  // TODO: Simplify
1021  if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1
1022  && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) {
1023  // Diamond pattern
1024  LandBlk = *TrueMBB->succ_begin();
1025  } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
1026  // Triangle pattern, false is empty
1027  LandBlk = FalseMBB;
1028  FalseMBB = NULL;
1029  } else if (FalseMBB->succ_size() == 1
1030  && *FalseMBB->succ_begin() == TrueMBB) {
1031  // Triangle pattern, true is empty
1032  // We reverse the predicate to make a triangle, empty false pattern;
1033  std::swap(TrueMBB, FalseMBB);
1034  reversePredicateSetter(MBB->end());
1035  LandBlk = FalseMBB;
1036  FalseMBB = NULL;
1037  } else if (FalseMBB->succ_size() == 1
1038  && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
1039  LandBlk = *FalseMBB->succ_begin();
1040  } else if (TrueMBB->succ_size() == 1
1041  && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) {
1042  LandBlk = *TrueMBB->succ_begin();
1043  } else {
1044  return NumMatch + handleJumpintoIf(MBB, TrueMBB, FalseMBB);
1045  }
1046 
1047  // improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
1048  // new BB created for landBlk==NULL may introduce new challenge to the
1049  // reduction process.
1050  if (LandBlk &&
1051  ((TrueMBB && TrueMBB->pred_size() > 1)
1052  || (FalseMBB && FalseMBB->pred_size() > 1))) {
1053  Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk);
1054  }
1055 
1056  if (TrueMBB && TrueMBB->pred_size() > 1) {
1057  TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB);
1058  ++Cloned;
1059  }
1060 
1061  if (FalseMBB && FalseMBB->pred_size() > 1) {
1062  FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB);
1063  ++Cloned;
1064  }
1065 
1066  mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk);
1067 
1068  ++numIfPatternMatch;
1069 
1070  numClonedBlock += Cloned;
1071 
1072  return 1 + Cloned + NumMatch;
1073 }
1074 
1075 int AMDGPUCFGStructurizer::loopendPatternMatch() {
1076  std::vector<MachineLoop *> NestedLoops;
1077  for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end();
1078  It != E; ++It) {
1080  LpE = df_end(*It);
1081  for (; LpIt != LpE; ++LpIt)
1082  NestedLoops.push_back(*LpIt);
1083  }
1084  if (NestedLoops.size() == 0)
1085  return 0;
1086 
1087  // Process nested loop outside->inside, so "continue" to a outside loop won't
1088  // be mistaken as "break" of the current loop.
1089  int Num = 0;
1090  for (std::vector<MachineLoop *>::reverse_iterator It = NestedLoops.rbegin(),
1091  E = NestedLoops.rend(); It != E; ++It) {
1092  MachineLoop *ExaminedLoop = *It;
1093  if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
1094  continue;
1095  DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
1096  int NumBreak = mergeLoop(ExaminedLoop);
1097  if (NumBreak == -1)
1098  break;
1099  Num += NumBreak;
1100  }
1101  return Num;
1102 }
1103 
1104 int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
1105  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
1106  MBBVector ExitingMBBs;
1107  LoopRep->getExitingBlocks(ExitingMBBs);
1108  assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
1109  DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
1110  // We assume a single ExitBlk
1111  MBBVector ExitBlks;
1112  LoopRep->getExitBlocks(ExitBlks);
1114  for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
1115  ExitBlkSet.insert(ExitBlks[i]);
1116  assert(ExitBlkSet.size() == 1);
1117  MachineBasicBlock *ExitBlk = *ExitBlks.begin();
1118  assert(ExitBlk && "Loop has several exit block");
1119  MBBVector LatchBlks;
1120  typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits;
1121  InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader),
1122  PE = InvMBBTraits::child_end(LoopHeader);
1123  for (; PI != PE; PI++) {
1124  if (LoopRep->contains(*PI))
1125  LatchBlks.push_back(*PI);
1126  }
1127 
1128  for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
1129  mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
1130  for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
1131  settleLoopcontBlock(LatchBlks[i], LoopHeader);
1132  int Match = 0;
1133  do {
1134  Match = 0;
1135  Match += serialPatternMatch(LoopHeader);
1136  Match += ifPatternMatch(LoopHeader);
1137  } while (Match > 0);
1138  mergeLooplandBlock(LoopHeader, ExitBlk);
1139  MachineLoop *ParentLoop = LoopRep->getParentLoop();
1140  if (ParentLoop)
1141  MLI->changeLoopFor(LoopHeader, ParentLoop);
1142  else
1143  MLI->removeBlock(LoopHeader);
1144  Visited[LoopRep] = true;
1145  return 1;
1146 }
1147 
1148 int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
1149  MachineBasicBlock *LoopHeader) {
1150  int NumCont = 0;
1153  GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
1154  E = GTIM::child_end(LoopHeader);
1155  for (; It != E; ++It) {
1156  MachineBasicBlock *MBB = *It;
1157  if (LoopRep->contains(MBB)) {
1158  handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
1159  LoopHeader, LoopRep);
1160  ContMBB.push_back(MBB);
1161  ++NumCont;
1162  }
1163  }
1164 
1166  E = ContMBB.end(); It != E; ++It) {
1167  (*It)->removeSuccessor(LoopHeader);
1168  }
1169 
1170  numLoopcontPatternMatch += NumCont;
1171 
1172  return NumCont;
1173 }
1174 
1175 
1176 bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
1177  MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
1178  if (Src1MBB->succ_size() == 0) {
1179  MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB);
1180  if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
1181  MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
1182  if (TheEntry) {
1183  DEBUG(
1184  dbgs() << "isLoopContBreakBlock yes src1 = BB"
1185  << Src1MBB->getNumber()
1186  << " src2 = BB" << Src2MBB->getNumber() << "\n";
1187  );
1188  return true;
1189  }
1190  }
1191  }
1192  return false;
1193 }
1194 
1195 int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
1196  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
1197  int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
1198  if (Num == 0) {
1199  DEBUG(
1200  dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
1201  );
1202  Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
1203  }
1204  return Num;
1205 }
1206 
1207 int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
1208  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
1209  int Num = 0;
1210  MachineBasicBlock *DownBlk;
1211 
1212  //trueBlk could be the common post dominator
1213  DownBlk = TrueMBB;
1214 
1215  DEBUG(
1216  dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
1217  << " true = BB" << TrueMBB->getNumber()
1218  << ", numSucc=" << TrueMBB->succ_size()
1219  << " false = BB" << FalseMBB->getNumber() << "\n";
1220  );
1221 
1222  while (DownBlk) {
1223  DEBUG(
1224  dbgs() << "check down = BB" << DownBlk->getNumber();
1225  );
1226 
1227  if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
1228  DEBUG(
1229  dbgs() << " working\n";
1230  );
1231 
1232  Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
1233  Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
1234 
1235  numClonedBlock += Num;
1236  Num += serialPatternMatch(*HeadMBB->succ_begin());
1237  Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
1238  Num += ifPatternMatch(HeadMBB);
1239  assert(Num > 0);
1240 
1241  break;
1242  }
1243  DEBUG(
1244  dbgs() << " not working\n";
1245  );
1246  DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL;
1247  } // walk down the postDomTree
1248 
1249  return Num;
1250 }
1251 
1252 void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf(
1253  MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB,
1254  MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) {
1255  dbgs() << "head = BB" << HeadMBB->getNumber()
1256  << " size = " << HeadMBB->size();
1257  if (Detail) {
1258  dbgs() << "\n";
1259  HeadMBB->print(dbgs());
1260  dbgs() << "\n";
1261  }
1262 
1263  if (TrueMBB) {
1264  dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = "
1265  << TrueMBB->size() << " numPred = " << TrueMBB->pred_size();
1266  if (Detail) {
1267  dbgs() << "\n";
1268  TrueMBB->print(dbgs());
1269  dbgs() << "\n";
1270  }
1271  }
1272  if (FalseMBB) {
1273  dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = "
1274  << FalseMBB->size() << " numPred = " << FalseMBB->pred_size();
1275  if (Detail) {
1276  dbgs() << "\n";
1277  FalseMBB->print(dbgs());
1278  dbgs() << "\n";
1279  }
1280  }
1281  if (LandMBB) {
1282  dbgs() << ", land = BB" << LandMBB->getNumber() << " size = "
1283  << LandMBB->size() << " numPred = " << LandMBB->pred_size();
1284  if (Detail) {
1285  dbgs() << "\n";
1286  LandMBB->print(dbgs());
1287  dbgs() << "\n";
1288  }
1289  }
1290 
1291  dbgs() << "\n";
1292 }
1293 
1294 int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
1295  MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
1296  MachineBasicBlock **LandMBBPtr) {
1297  bool MigrateTrue = false;
1298  bool MigrateFalse = false;
1299 
1300  MachineBasicBlock *LandBlk = *LandMBBPtr;
1301 
1302  assert((!TrueMBB || TrueMBB->succ_size() <= 1)
1303  && (!FalseMBB || FalseMBB->succ_size() <= 1));
1304 
1305  if (TrueMBB == FalseMBB)
1306  return 0;
1307 
1308  MigrateTrue = needMigrateBlock(TrueMBB);
1309  MigrateFalse = needMigrateBlock(FalseMBB);
1310 
1311  if (!MigrateTrue && !MigrateFalse)
1312  return 0;
1313 
1314  // If we need to migrate either trueBlk and falseBlk, migrate the rest that
1315  // have more than one predecessors. without doing this, its predecessor
1316  // rather than headBlk will have undefined value in initReg.
1317  if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1)
1318  MigrateTrue = true;
1319  if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
1320  MigrateFalse = true;
1321 
1322  DEBUG(
1323  dbgs() << "before improveSimpleJumpintoIf: ";
1324  showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
1325  );
1326 
1327  // org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
1328  //
1329  // new: headBlk => if () {initReg = 1; org trueBlk branch} else
1330  // {initReg = 0; org falseBlk branch }
1331  // => landBlk => if (initReg) {org trueBlk} else {org falseBlk}
1332  // => org landBlk
1333  // if landBlk->pred_size() > 2, put the about if-else inside
1334  // if (initReg !=2) {...}
1335  //
1336  // add initReg = initVal to headBlk
1337 
1338  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
1339  if (!MigrateTrue || !MigrateFalse) {
1340  // XXX: We have an opportunity here to optimize the "branch into if" case
1341  // here. Branch into if looks like this:
1342  // entry
1343  // / |
1344  // diamond_head branch_from
1345  // / \ |
1346  // diamond_false diamond_true
1347  // \ /
1348  // done
1349  //
1350  // The diamond_head block begins the "if" and the diamond_true block
1351  // is the block being "branched into".
1352  //
1353  // If MigrateTrue is true, then TrueBB is the block being "branched into"
1354  // and if MigrateFalse is true, then FalseBB is the block being
1355  // "branched into"
1356  //
1357  // Here is the pseudo code for how I think the optimization should work:
1358  // 1. Insert MOV GPR0, 0 before the branch instruction in diamond_head.
1359  // 2. Insert MOV GPR0, 1 before the branch instruction in branch_from.
1360  // 3. Move the branch instruction from diamond_head into its own basic
1361  // block (new_block).
1362  // 4. Add an unconditional branch from diamond_head to new_block
1363  // 5. Replace the branch instruction in branch_from with an unconditional
1364  // branch to new_block. If branch_from has multiple predecessors, then
1365  // we need to replace the True/False block in the branch
1366  // instruction instead of replacing it.
1367  // 6. Change the condition of the branch instruction in new_block from
1368  // COND to (COND || GPR0)
1369  //
1370  // In order insert these MOV instruction, we will need to use the
1371  // RegisterScavenger. Usually liveness stops being tracked during
1372  // the late machine optimization passes, however if we implement
1373  // bool TargetRegisterInfo::requiresRegisterScavenging(
1374  // const MachineFunction &MF)
1375  // and have it return true, liveness will be tracked correctly
1376  // by generic optimization passes. We will also need to make sure that
1377  // all of our target-specific passes that run after regalloc and before
1378  // the CFGStructurizer track liveness and we will need to modify this pass
1379  // to correctly track liveness.
1380  //
1381  // After the above changes, the new CFG should look like this:
1382  // entry
1383  // / |
1384  // diamond_head branch_from
1385  // \ /
1386  // new_block
1387  // / |
1388  // diamond_false diamond_true
1389  // \ /
1390  // done
1391  //
1392  // Without this optimization, we are forced to duplicate the diamond_true
1393  // block and we will end up with a CFG like this:
1394  //
1395  // entry
1396  // / |
1397  // diamond_head branch_from
1398  // / \ |
1399  // diamond_false diamond_true diamond_true (duplicate)
1400  // \ / |
1401  // done --------------------|
1402  //
1403  // Duplicating diamond_true can be very costly especially if it has a
1404  // lot of instructions.
1405  return 0;
1406  }
1407 
1408  int NumNewBlk = 0;
1409 
1410  bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2);
1411 
1412  //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
1413  MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
1414 
1415  if (LandBlkHasOtherPred) {
1416  llvm_unreachable("Extra register needed to handle CFG");
1417  unsigned CmpResReg =
1418  HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
1419  llvm_unreachable("Extra compare instruction needed to handle CFG");
1420  insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
1421  CmpResReg, DebugLoc());
1422  }
1423 
1424  // XXX: We are running this after RA, so creating virtual registers will
1425  // cause an assertion failure in the PostRA scheduling pass.
1426  unsigned InitReg =
1427  HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
1428  insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg,
1429  DebugLoc());
1430 
1431  if (MigrateTrue) {
1432  migrateInstruction(TrueMBB, LandBlk, I);
1433  // need to uncondionally insert the assignment to ensure a path from its
1434  // predecessor rather than headBlk has valid value in initReg if
1435  // (initVal != 1).
1436  llvm_unreachable("Extra register needed to handle CFG");
1437  }
1438  insertInstrBefore(I, AMDGPU::ELSE);
1439 
1440  if (MigrateFalse) {
1441  migrateInstruction(FalseMBB, LandBlk, I);
1442  // need to uncondionally insert the assignment to ensure a path from its
1443  // predecessor rather than headBlk has valid value in initReg if
1444  // (initVal != 0)
1445  llvm_unreachable("Extra register needed to handle CFG");
1446  }
1447 
1448  if (LandBlkHasOtherPred) {
1449  // add endif
1450  insertInstrBefore(I, AMDGPU::ENDIF);
1451 
1452  // put initReg = 2 to other predecessors of landBlk
1453  for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(),
1454  PE = LandBlk->pred_end(); PI != PE; ++PI) {
1455  MachineBasicBlock *MBB = *PI;
1456  if (MBB != TrueMBB && MBB != FalseMBB)
1457  llvm_unreachable("Extra register needed to handle CFG");
1458  }
1459  }
1460  DEBUG(
1461  dbgs() << "result from improveSimpleJumpintoIf: ";
1462  showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
1463  );
1464 
1465  // update landBlk
1466  *LandMBBPtr = LandBlk;
1467 
1468  return NumNewBlk;
1469 }
1470 
1471 void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
1472  MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
1473  MachineLoop *ContLoop) {
1474  DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
1475  << " header = BB" << ContMBB->getNumber() << "\n";
1476  dbgs() << "Trying to continue loop-depth = "
1477  << getLoopDepth(ContLoop)
1478  << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
1479  settleLoopcontBlock(ContingMBB, ContMBB);
1480 }
1481 
1482 void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
1483  MachineBasicBlock *SrcMBB) {
1484  DEBUG(
1485  dbgs() << "serialPattern BB" << DstMBB->getNumber()
1486  << " <= BB" << SrcMBB->getNumber() << "\n";
1487  );
1488  DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
1489 
1490  DstMBB->removeSuccessor(SrcMBB);
1491  cloneSuccessorList(DstMBB, SrcMBB);
1492 
1493  removeSuccessor(SrcMBB);
1494  MLI->removeBlock(SrcMBB);
1495  retireBlock(SrcMBB);
1496 }
1497 
1498 void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
1499  MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
1500  MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
1501  assert (TrueMBB);
1502  DEBUG(
1503  dbgs() << "ifPattern BB" << MBB->getNumber();
1504  dbgs() << "{ ";
1505  if (TrueMBB) {
1506  dbgs() << "BB" << TrueMBB->getNumber();
1507  }
1508  dbgs() << " } else ";
1509  dbgs() << "{ ";
1510  if (FalseMBB) {
1511  dbgs() << "BB" << FalseMBB->getNumber();
1512  }
1513  dbgs() << " }\n ";
1514  dbgs() << "landBlock: ";
1515  if (!LandMBB) {
1516  dbgs() << "NULL";
1517  } else {
1518  dbgs() << "BB" << LandMBB->getNumber();
1519  }
1520  dbgs() << "\n";
1521  );
1522 
1523  int OldOpcode = BranchMI->getOpcode();
1524  DebugLoc BranchDL = BranchMI->getDebugLoc();
1525 
1526 // transform to
1527 // if cond
1528 // trueBlk
1529 // else
1530 // falseBlk
1531 // endif
1532 // landBlk
1533 
1534  MachineBasicBlock::iterator I = BranchMI;
1535  insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode),
1536  BranchDL);
1537 
1538  if (TrueMBB) {
1539  MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end());
1540  MBB->removeSuccessor(TrueMBB);
1541  if (LandMBB && TrueMBB->succ_size()!=0)
1542  TrueMBB->removeSuccessor(LandMBB);
1543  retireBlock(TrueMBB);
1544  MLI->removeBlock(TrueMBB);
1545  }
1546 
1547  if (FalseMBB) {
1548  insertInstrBefore(I, AMDGPU::ELSE);
1549  MBB->splice(I, FalseMBB, FalseMBB->begin(),
1550  FalseMBB->end());
1551  MBB->removeSuccessor(FalseMBB);
1552  if (LandMBB && FalseMBB->succ_size() != 0)
1553  FalseMBB->removeSuccessor(LandMBB);
1554  retireBlock(FalseMBB);
1555  MLI->removeBlock(FalseMBB);
1556  }
1557  insertInstrBefore(I, AMDGPU::ENDIF);
1558 
1559  BranchMI->eraseFromParent();
1560 
1561  if (LandMBB && TrueMBB && FalseMBB)
1562  MBB->addSuccessor(LandMBB);
1563 
1564 }
1565 
1566 void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
1567  MachineBasicBlock *LandMBB) {
1568  DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
1569  << " land = BB" << LandMBB->getNumber() << "\n";);
1570 
1571  insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
1572  insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
1573  DstBlk->addSuccessor(LandMBB);
1574  DstBlk->removeSuccessor(DstBlk);
1575 }
1576 
1577 
1578 void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
1579  MachineBasicBlock *LandMBB) {
1580  DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
1581  << " land = BB" << LandMBB->getNumber() << "\n";);
1582  MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
1583  assert(BranchMI && isCondBranch(BranchMI));
1584  DebugLoc DL = BranchMI->getDebugLoc();
1585  MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI);
1586  MachineBasicBlock::iterator I = BranchMI;
1587  if (TrueBranch != LandMBB)
1588  reversePredicateSetter(I);
1589  insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL);
1590  insertInstrBefore(I, AMDGPU::BREAK);
1591  insertInstrBefore(I, AMDGPU::ENDIF);
1592  //now branchInst can be erase safely
1593  BranchMI->eraseFromParent();
1594  //now take care of successors, retire blocks
1595  ExitingMBB->removeSuccessor(LandMBB);
1596 }
1597 
1598 void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
1599  MachineBasicBlock *ContMBB) {
1600  DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
1601  << ContingMBB->getNumber()
1602  << ", cont = BB" << ContMBB->getNumber() << "\n";);
1603 
1604  MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
1605  if (MI) {
1606  assert(isCondBranch(MI));
1608  MachineBasicBlock *TrueBranch = getTrueBranch(MI);
1609  int OldOpcode = MI->getOpcode();
1610  DebugLoc DL = MI->getDebugLoc();
1611 
1612  bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI);
1613 
1614  if (UseContinueLogical == false) {
1615  int BranchOpcode =
1616  TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) :
1617  getBranchZeroOpcode(OldOpcode);
1618  insertCondBranchBefore(I, BranchOpcode, DL);
1619  // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
1620  insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL);
1621  insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL);
1622  } else {
1623  int BranchOpcode =
1624  TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) :
1625  getContinueZeroOpcode(OldOpcode);
1626  insertCondBranchBefore(I, BranchOpcode, DL);
1627  }
1628 
1629  MI->eraseFromParent();
1630  } else {
1631  // if we've arrived here then we've already erased the branch instruction
1632  // travel back up the basic block to see the last reference of our debug
1633  // location we've just inserted that reference here so it should be
1634  // representative insertEnd to ensure phi-moves, if exist, go before the
1635  // continue-instr.
1636  insertInstrEnd(ContingMBB, AMDGPU::CONTINUE,
1637  getLastDebugLocInBB(ContingMBB));
1638  }
1639 }
1640 
1641 int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
1642  MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) {
1643  int Cloned = 0;
1644  assert(PreMBB->isSuccessor(SrcMBB));
1645  while (SrcMBB && SrcMBB != DstMBB) {
1646  assert(SrcMBB->succ_size() == 1);
1647  if (SrcMBB->pred_size() > 1) {
1648  SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB);
1649  ++Cloned;
1650  }
1651 
1652  PreMBB = SrcMBB;
1653  SrcMBB = *SrcMBB->succ_begin();
1654  }
1655 
1656  return Cloned;
1657 }
1658 
1660 AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
1661  MachineBasicBlock *PredMBB) {
1662  assert(PredMBB->isSuccessor(MBB) &&
1663  "succBlk is not a prececessor of curBlk");
1664 
1665  MachineBasicBlock *CloneMBB = clone(MBB); //clone instructions
1666  replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB);
1667  //srcBlk, oldBlk, newBlk
1668 
1669  PredMBB->removeSuccessor(MBB);
1670  PredMBB->addSuccessor(CloneMBB);
1671 
1672  // add all successor to cloneBlk
1673  cloneSuccessorList(CloneMBB, MBB);
1674 
1675  numClonedInstr += MBB->size();
1676 
1677  DEBUG(
1678  dbgs() << "Cloned block: " << "BB"
1679  << MBB->getNumber() << "size " << MBB->size() << "\n";
1680  );
1681 
1682  SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
1683 
1684  return CloneMBB;
1685 }
1686 
1687 void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
1689  MachineBasicBlock::iterator SpliceEnd;
1690  //look for the input branchinstr, not the AMDGPU branchinstr
1691  MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
1692  if (!BranchMI) {
1693  DEBUG(
1694  dbgs() << "migrateInstruction don't see branch instr\n" ;
1695  );
1696  SpliceEnd = SrcMBB->end();
1697  } else {
1698  DEBUG(
1699  dbgs() << "migrateInstruction see branch instr\n" ;
1700  BranchMI->dump();
1701  );
1702  SpliceEnd = BranchMI;
1703  }
1704  DEBUG(
1705  dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
1706  << "srcSize = " << SrcMBB->size() << "\n";
1707  );
1708 
1709  //splice insert before insertPos
1710  DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
1711 
1712  DEBUG(
1713  dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
1714  << "srcSize = " << SrcMBB->size() << "\n";
1715  );
1716 }
1717 
1719 AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
1720  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
1721  MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch();
1722  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
1723 
1724  if (!LoopHeader || !LoopLatch)
1725  return NULL;
1726  MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
1727  // Is LoopRep an infinite loop ?
1728  if (!BranchMI || !isUncondBranch(BranchMI))
1729  return NULL;
1730 
1731  MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
1732  FuncRep->push_back(DummyExitBlk); //insert to function
1733  SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
1734  DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
1735  MachineBasicBlock::iterator I = BranchMI;
1736  unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC);
1737  llvm_unreachable("Extra register needed to handle CFG");
1738  MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32);
1739  MachineInstrBuilder MIB(*FuncRep, NewMI);
1740  MIB.addMBB(LoopHeader);
1741  MIB.addReg(ImmReg, false);
1742  SHOWNEWINSTR(NewMI);
1743  BranchMI->eraseFromParent();
1744  LoopLatch->addSuccessor(DummyExitBlk);
1745 
1746  return DummyExitBlk;
1747 }
1748 
1749 void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
1750  MachineInstr *BranchMI;
1751 
1752  // I saw two unconditional branch in one basic block in example
1753  // test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
1754  while ((BranchMI = getLoopendBlockBranchInstr(MBB))
1755  && isUncondBranch(BranchMI)) {
1756  DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump(););
1757  BranchMI->eraseFromParent();
1758  }
1759 }
1760 
1761 void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
1762  MachineBasicBlock *MBB) {
1763  if (MBB->succ_size() != 2)
1764  return;
1765  MachineBasicBlock *MBB1 = *MBB->succ_begin();
1766  MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
1767  if (MBB1 != MBB2)
1768  return;
1769 
1770  MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
1771  assert(BranchMI && isCondBranch(BranchMI));
1772  DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump(););
1773  BranchMI->eraseFromParent();
1774  SHOWNEWBLK(MBB1, "Removing redundant successor");
1775  MBB->removeSuccessor(MBB1);
1776 }
1777 
1778 void AMDGPUCFGStructurizer::addDummyExitBlock(
1780  MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
1781  FuncRep->push_back(DummyExitBlk); //insert to function
1782  insertInstrEnd(DummyExitBlk, AMDGPU::RETURN);
1783 
1785  E = RetMBB.end(); It != E; ++It) {
1786  MachineBasicBlock *MBB = *It;
1787  MachineInstr *MI = getReturnInstr(MBB);
1788  if (MI)
1789  MI->eraseFromParent();
1790  MBB->addSuccessor(DummyExitBlk);
1791  DEBUG(
1792  dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
1793  << " successors\n";
1794  );
1795  }
1796  SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
1797 }
1798 
1799 void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) {
1800  while (MBB->succ_size())
1801  MBB->removeSuccessor(*MBB->succ_begin());
1802 }
1803 
1804 void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
1805  int SccNum) {
1806  BlockInformation *&srcBlkInfo = BlockInfoMap[MBB];
1807  if (!srcBlkInfo)
1808  srcBlkInfo = new BlockInformation();
1809  srcBlkInfo->SccNum = SccNum;
1810 }
1811 
1812 void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
1813  DEBUG(
1814  dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
1815  );
1816 
1817  BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
1818 
1819  if (!SrcBlkInfo)
1820  SrcBlkInfo = new BlockInformation();
1821 
1822  SrcBlkInfo->IsRetired = true;
1823  assert(MBB->succ_size() == 0 && MBB->pred_size() == 0
1824  && "can't retire block yet");
1825 }
1826 
1827 void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
1828  MachineBasicBlock *MBB) {
1829  MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
1830  if (!MBB) {
1831  MBB = FuncRep->CreateMachineBasicBlock();
1832  FuncRep->push_back(MBB); //insert to function
1833  SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
1834  }
1835  TheEntry = MBB;
1836  DEBUG(
1837  dbgs() << "setLoopLandBlock loop-header = BB"
1838  << loopRep->getHeader()->getNumber()
1839  << " landing-block = BB" << MBB->getNumber() << "\n";
1840  );
1841 }
1842 
1844 AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
1845  MachineBasicBlock *MBB2) {
1846 
1847  if (PDT->dominates(MBB1, MBB2))
1848  return MBB1;
1849  if (PDT->dominates(MBB2, MBB1))
1850  return MBB2;
1851 
1852  MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
1853  MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
1854 
1855  // Handle newly cloned node.
1856  if (!Node1 && MBB1->succ_size() == 1)
1857  return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
1858  if (!Node2 && MBB2->succ_size() == 1)
1859  return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
1860 
1861  if (!Node1 || !Node2)
1862  return NULL;
1863 
1864  Node1 = Node1->getIDom();
1865  while (Node1) {
1866  if (PDT->dominates(Node1, Node2))
1867  return Node1->getBlock();
1868  Node1 = Node1->getIDom();
1869  }
1870 
1871  return NULL;
1872 }
1873 
1875 AMDGPUCFGStructurizer::findNearestCommonPostDom(
1876  std::set<MachineBasicBlock *> &MBBs) {
1877  MachineBasicBlock *CommonDom;
1878  std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
1879  std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
1880  for (CommonDom = *It; It != E && CommonDom; ++It) {
1881  MachineBasicBlock *MBB = *It;
1882  if (MBB != CommonDom)
1883  CommonDom = findNearestCommonPostDom(MBB, CommonDom);
1884  }
1885 
1886  DEBUG(
1887  dbgs() << "Common post dominator for exit blocks is ";
1888  if (CommonDom)
1889  dbgs() << "BB" << CommonDom->getNumber() << "\n";
1890  else
1891  dbgs() << "NULL\n";
1892  );
1893 
1894  return CommonDom;
1895 }
1896 
1897 char AMDGPUCFGStructurizer::ID = 0;
1898 
1899 } // end anonymous namespace
1900 
1901 
1903  return new AMDGPUCFGStructurizer(tm);
1904 }
unsigned succ_size() const
const MachineFunction * getParent() const
AnalysisUsage & addPreserved()
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImp=false)
Interface definition for R600InstrInfo.
MachineBasicBlock * getMBB() const
The main container class for the LLVM Intermediate Representation.
Definition: Module.h:112
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
FunctionPass * createAMDGPUCFGStructurizerPass(TargetMachine &tm)
bool insert(PtrType Ptr)
Definition: SmallPtrSet.h:253
#define OPCODE_IS_ZERO
LoopT * getParentLoop() const
Definition: LoopInfo.h:96
#define OPCODE_IS_NOT_ZERO_INT
BlockT * getHeader() const
Definition: LoopInfo.h:95
BlockT * getLoopLatch() const
Definition: LoopInfoImpl.h:154
STATISTIC(numSerialPatternMatch,"CFGStructurizer number of serial pattern ""matched")
DomTreeNodeBase< NodeT > * getIDom() const
Definition: Dominators.h:83
AnalysisUsage & addRequired()
bool isUnknown() const
isUnknown - Return true if this is an unknown location.
Definition: DebugLoc.h:70
const HexagonInstrInfo * TII
#define SHOWNEWINSTR(i)
#define llvm_unreachable(msg)
iterator begin() const
std::vector< MachineBasicBlock * >::iterator succ_iterator
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Definition: LoopInfoImpl.h:33
void print(raw_ostream &OS, SlotIndexes *=0) const
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
#define false
Definition: ConvertUTF.c:64
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Definition: LoopInfoImpl.h:62
scc_iterator< T > scc_begin(const T &G)
Definition: SCCIterator.h:199
#define OPCODE_IS_NOT_ZERO
int getOpcode() const
Definition: MachineInstr.h:261
const MachineJumpTableInfo * getJumpTableInfo() const
std::vector< MachineBasicBlock * >::iterator pred_iterator
reverse_iterator rend()
NodeT * getBlock() const
Definition: Dominators.h:82
reverse_iterator rbegin()
void dump() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:119
bundle_iterator< MachineInstr, instr_iterator > iterator
#define INVALIDSCCNUM
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=0)
scc_iterator< T > scc_end(const T &G)
Definition: SCCIterator.h:204
df_iterator< T > df_end(const T &G)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:267
#define SHOWNEWBLK(b, msg)
void setMBB(MachineBasicBlock *MBB)
ItTy next(ItTy it, Dist n)
Definition: STLExtras.h:154
bool contains(const LoopT *L) const
Definition: LoopInfo.h:104
void removeSuccessor(MachineBasicBlock *succ)
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
bool isSuccessor(const MachineBasicBlock *MBB) const
unsigned size() const
Definition: SmallPtrSet.h:75
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
Definition: Debug.cpp:101
void dump() const
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:591
df_iterator< T > df_begin(const T &G)
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
MachineRegisterInfo & getRegInfo()
unsigned getNumBlocks() const
getNumBlocks - Get the number of blocks in this loop in constant time.
Definition: LoopInfo.h:144
std::reverse_iterator< const_iterator > reverse_iterator
Definition: Path.h:79
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:54
instr_iterator insert(instr_iterator I, MachineInstr *M)
unsigned getReg() const
getReg - Returns the register number.
std::reverse_iterator< iterator > reverse_iterator
void push_back(MachineBasicBlock *MBB)
static bool isCondBranch(unsigned Opc)
BasicBlockListType::iterator iterator
#define DEBUG(X)
Definition: Debug.h:97
iterator end() const
std::vector< LoopT * >::const_iterator iterator
Definition: LoopInfo.h:127
LoopInfoBase< MachineBasicBlock, MachineLoop >::iterator iterator
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
unsigned getLoopDepth() const
Definition: LoopInfo.h:88
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
unsigned pred_size() const
DebugLoc getDebugLoc() const
Definition: MachineInstr.h:244
#define OPCODE_IS_ZERO_INT