LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
AMDGPUISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Defines an instruction selector for the AMDGPU target.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
25 #include "llvm/Support/Compiler.h"
26 #include <list>
27 #include <queue>
28 
29 using namespace llvm;
30 
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
34 
35 namespace {
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39  // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40  // make the right decision when generating code for different targets.
41  const AMDGPUSubtarget &Subtarget;
42 public:
43  AMDGPUDAGToDAGISel(TargetMachine &TM);
44  virtual ~AMDGPUDAGToDAGISel();
45 
46  SDNode *Select(SDNode *N);
47  virtual const char *getPassName() const;
48  virtual void PostprocessISelDAG();
49 
50 private:
51  inline SDValue getSmallIPtrImm(unsigned Imm);
52  bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53  const R600InstrInfo *TII);
54  bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55  bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
56 
57  // Complex pattern selectors
58  bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59  bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60  bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
61  SDValue SimplifyI24(SDValue &Op);
62  bool SelectI24(SDValue Addr, SDValue &Op);
63  bool SelectU24(SDValue Addr, SDValue &Op);
64 
65  static bool checkType(const Value *ptr, unsigned int addrspace);
66 
67  static bool isGlobalStore(const StoreSDNode *N);
68  static bool isPrivateStore(const StoreSDNode *N);
69  static bool isLocalStore(const StoreSDNode *N);
70  static bool isRegionStore(const StoreSDNode *N);
71 
72  bool isCPLoad(const LoadSDNode *N) const;
73  bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74  bool isGlobalLoad(const LoadSDNode *N) const;
75  bool isParamLoad(const LoadSDNode *N) const;
76  bool isPrivateLoad(const LoadSDNode *N) const;
77  bool isLocalLoad(const LoadSDNode *N) const;
78  bool isRegionLoad(const LoadSDNode *N) const;
79 
80  const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
81  bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82  bool SelectGlobalValueVariableOffset(SDValue Addr,
83  SDValue &BaseReg, SDValue& Offset);
84  bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
85  bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
86 
87  // Include the pieces autogenerated from the target description.
88 #include "AMDGPUGenDAGISel.inc"
89 };
90 } // end anonymous namespace
91 
92 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
93 // DAG, ready for instruction scheduling.
95  ) {
96  return new AMDGPUDAGToDAGISel(TM);
97 }
98 
99 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
100  : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
101 }
102 
103 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
104 }
105 
106 /// \brief Determine the register class for \p OpNo
107 /// \returns The register class of the virtual register that will be used for
108 /// the given operand number \OpNo or NULL if the register class cannot be
109 /// determined.
110 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
111  unsigned OpNo) const {
112  if (!N->isMachineOpcode()) {
113  return NULL;
114  }
115  switch (N->getMachineOpcode()) {
116  default: {
117  const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
118  unsigned OpIdx = Desc.getNumDefs() + OpNo;
119  if (OpIdx >= Desc.getNumOperands())
120  return NULL;
121  int RegClass = Desc.OpInfo[OpIdx].RegClass;
122  if (RegClass == -1) {
123  return NULL;
124  }
125  return TM.getRegisterInfo()->getRegClass(RegClass);
126  }
127  case AMDGPU::REG_SEQUENCE: {
128  const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
129  cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
130  unsigned SubRegIdx =
131  dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
132  return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
133  }
134  }
135 }
136 
137 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
138  return CurDAG->getTargetConstant(Imm, MVT::i32);
139 }
140 
141 bool AMDGPUDAGToDAGISel::SelectADDRParam(
142  SDValue Addr, SDValue& R1, SDValue& R2) {
143 
144  if (Addr.getOpcode() == ISD::FrameIndex) {
145  if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
146  R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
147  R2 = CurDAG->getTargetConstant(0, MVT::i32);
148  } else {
149  R1 = Addr;
150  R2 = CurDAG->getTargetConstant(0, MVT::i32);
151  }
152  } else if (Addr.getOpcode() == ISD::ADD) {
153  R1 = Addr.getOperand(0);
154  R2 = Addr.getOperand(1);
155  } else {
156  R1 = Addr;
157  R2 = CurDAG->getTargetConstant(0, MVT::i32);
158  }
159  return true;
160 }
161 
162 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
163  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
165  return false;
166  }
167  return SelectADDRParam(Addr, R1, R2);
168 }
169 
170 
171 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
172  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
174  return false;
175  }
176 
177  if (Addr.getOpcode() == ISD::FrameIndex) {
178  if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
179  R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
180  R2 = CurDAG->getTargetConstant(0, MVT::i64);
181  } else {
182  R1 = Addr;
183  R2 = CurDAG->getTargetConstant(0, MVT::i64);
184  }
185  } else if (Addr.getOpcode() == ISD::ADD) {
186  R1 = Addr.getOperand(0);
187  R2 = Addr.getOperand(1);
188  } else {
189  R1 = Addr;
190  R2 = CurDAG->getTargetConstant(0, MVT::i64);
191  }
192  return true;
193 }
194 
196  unsigned int Opc = N->getOpcode();
197  if (N->isMachineOpcode()) {
198  N->setNodeId(-1);
199  return NULL; // Already selected.
200  }
201  switch (Opc) {
202  default: break;
203  case ISD::BUILD_VECTOR: {
204  unsigned RegClassID;
205  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
206  const AMDGPURegisterInfo *TRI =
207  static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
208  const SIRegisterInfo *SIRI =
209  static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
210  EVT VT = N->getValueType(0);
211  unsigned NumVectorElts = VT.getVectorNumElements();
212  assert(VT.getVectorElementType().bitsEq(MVT::i32));
213  if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
214  bool UseVReg = true;
215  for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
216  U != E; ++U) {
217  if (!U->isMachineOpcode()) {
218  continue;
219  }
220  const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
221  if (!RC) {
222  continue;
223  }
224  if (SIRI->isSGPRClass(RC)) {
225  UseVReg = false;
226  }
227  }
228  switch(NumVectorElts) {
229  case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
230  AMDGPU::SReg_32RegClassID;
231  break;
232  case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
233  AMDGPU::SReg_64RegClassID;
234  break;
235  case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
236  AMDGPU::SReg_128RegClassID;
237  break;
238  case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
239  AMDGPU::SReg_256RegClassID;
240  break;
241  case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
242  AMDGPU::SReg_512RegClassID;
243  break;
244  default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
245  }
246  } else {
247  // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
248  // that adds a 128 bits reg copy when going through TwoAddressInstructions
249  // pass. We want to avoid 128 bits copies as much as possible because they
250  // can't be bundled by our scheduler.
251  switch(NumVectorElts) {
252  case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
253  case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
254  default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
255  }
256  }
257 
258  SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
259 
260  if (NumVectorElts == 1) {
261  return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
263  N->getOperand(0), RegClass);
264  }
265 
266  assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
267  "supported yet");
268  // 16 = Max Num Vector Elements
269  // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
270  // 1 = Vector Register Class
271  SDValue RegSeqArgs[16 * 2 + 1];
272 
273  RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
274  bool IsRegSeq = true;
275  for (unsigned i = 0; i < N->getNumOperands(); i++) {
276  // XXX: Why is this here?
277  if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
278  IsRegSeq = false;
279  break;
280  }
281  RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
282  RegSeqArgs[1 + (2 * i) + 1] =
283  CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
284  }
285  if (!IsRegSeq)
286  break;
287  return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
288  RegSeqArgs, 2 * N->getNumOperands() + 1);
289  }
290  case ISD::BUILD_PAIR: {
291  SDValue RC, SubReg0, SubReg1;
292  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
293  if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
294  break;
295  }
296  if (N->getValueType(0) == MVT::i128) {
297  RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
298  SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
299  SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
300  } else if (N->getValueType(0) == MVT::i64) {
301  RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
302  SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
303  SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
304  } else {
305  llvm_unreachable("Unhandled value type for BUILD_PAIR");
306  }
307  const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
308  N->getOperand(1), SubReg1 };
309  return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
310  SDLoc(N), N->getValueType(0), Ops);
311  }
313  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
314  if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
315  break;
316  SDValue Addr, Offset;
317 
318  SelectADDRIndirect(N->getOperand(1), Addr, Offset);
319  const SDValue Ops[] = {
320  Addr,
321  Offset,
322  CurDAG->getTargetConstant(0, MVT::i32),
323  N->getOperand(0),
324  };
325  return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
326  CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
327  Ops);
328  }
330  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
331  if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
332  break;
333  SDValue Addr, Offset;
334  SelectADDRIndirect(N->getOperand(2), Addr, Offset);
335  const SDValue Ops[] = {
336  N->getOperand(1),
337  Addr,
338  Offset,
339  CurDAG->getTargetConstant(0, MVT::i32),
340  N->getOperand(0),
341  };
342  return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
343  CurDAG->getVTList(MVT::Other),
344  Ops);
345  }
346  }
347  return SelectCode(N);
348 }
349 
350 
351 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
352  if (!ptr) {
353  return false;
354  }
355  Type *ptrType = ptr->getType();
356  return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
357 }
358 
359 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
361 }
362 
363 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
367 }
368 
369 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
371 }
372 
373 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
375 }
376 
377 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
378  if (CbId == -1) {
380  }
382 }
383 
384 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
386  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
387  if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
388  N->getMemoryVT().bitsLT(MVT::i32)) {
389  return true;
390  }
391  }
393 }
394 
395 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
397 }
398 
399 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
401 }
402 
403 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
405 }
406 
407 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
408  MachineMemOperand *MMO = N->getMemOperand();
410  if (MMO) {
411  const Value *V = MMO->getValue();
413  if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
414  return true;
415  }
416  }
417  }
418  return false;
419 }
420 
421 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
423  // Check to make sure we are not a constant pool load or a constant load
424  // that is marked as a private load
425  if (isCPLoad(N) || isConstantLoad(N, -1)) {
426  return false;
427  }
428  }
435  return true;
436  }
437  return false;
438 }
439 
440 const char *AMDGPUDAGToDAGISel::getPassName() const {
441  return "AMDGPU DAG->DAG Pattern Instruction Selection";
442 }
443 
444 #ifdef DEBUGTMP
445 #undef INT64_C
446 #endif
447 #undef DEBUGTMP
448 
449 //===----------------------------------------------------------------------===//
450 // Complex Patterns
451 //===----------------------------------------------------------------------===//
452 
453 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
454  SDValue& IntPtr) {
455  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
456  IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
457  return true;
458  }
459  return false;
460 }
461 
462 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
463  SDValue& BaseReg, SDValue &Offset) {
464  if (!dyn_cast<ConstantSDNode>(Addr)) {
465  BaseReg = Addr;
466  Offset = CurDAG->getIntPtrConstant(0, true);
467  return true;
468  }
469  return false;
470 }
471 
472 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
473  SDValue &Offset) {
474  ConstantSDNode * IMMOffset;
475 
476  if (Addr.getOpcode() == ISD::ADD
477  && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
478  && isInt<16>(IMMOffset->getZExtValue())) {
479 
480  Base = Addr.getOperand(0);
481  Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
482  return true;
483  // If the pointer address is constant, we can move it to the offset field.
484  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
485  && isInt<16>(IMMOffset->getZExtValue())) {
486  Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
487  SDLoc(CurDAG->getEntryNode()),
488  AMDGPU::ZERO, MVT::i32);
489  Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
490  return true;
491  }
492 
493  // Default case, no offset
494  Base = Addr;
495  Offset = CurDAG->getTargetConstant(0, MVT::i32);
496  return true;
497 }
498 
499 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
500  SDValue &Offset) {
501  ConstantSDNode *C;
502 
503  if ((C = dyn_cast<ConstantSDNode>(Addr))) {
504  Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
505  Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
506  } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
507  (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
508  Base = Addr.getOperand(0);
509  Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
510  } else {
511  Base = Addr;
512  Offset = CurDAG->getTargetConstant(0, MVT::i32);
513  }
514 
515  return true;
516 }
517 
518 SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
519  APInt Demanded = APInt(32, 0x00FFFFFF);
520  APInt KnownZero, KnownOne;
521  TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
522  const TargetLowering *TLI = getTargetLowering();
523  if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
524  CurDAG->ReplaceAllUsesWith(Op, TLO.New);
525  CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
526  return SimplifyI24(TLO.New);
527  } else {
528  return Op;
529  }
530 }
531 
532 bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
533 
534  assert(Op.getValueType() == MVT::i32);
535 
536  if (CurDAG->ComputeNumSignBits(Op) == 9) {
537  I24 = SimplifyI24(Op);
538  return true;
539  }
540  return false;
541 }
542 
543 bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
544  APInt KnownZero;
545  APInt KnownOne;
546  CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
547 
548  assert (Op.getValueType() == MVT::i32);
549 
550  // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
551  // i32. These smaller types are legal to use with the i24 instructions.
552  if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
553  Op.getOpcode() == ISD::ANY_EXTEND ||
554  ISD::isEXTLoad(Op.getNode())) {
555  U24 = SimplifyI24(Op);
556  return true;
557  }
558  return false;
559 }
560 
561 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
562  const AMDGPUTargetLowering& Lowering =
563  (*(const AMDGPUTargetLowering*)getTargetLowering());
564  bool IsModified = false;
565  do {
566  IsModified = false;
567  // Go over all selected nodes and try to fold them a bit more
568  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
569  E = CurDAG->allnodes_end(); I != E; ++I) {
570 
571  SDNode *Node = I;
572 
573  MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
574  if (!MachineNode)
575  continue;
576 
577  SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
578  if (ResNode != Node) {
579  ReplaceUses(Node, ResNode);
580  IsModified = true;
581  }
582  }
583  CurDAG->RemoveDeadNodes();
584  } while (IsModified);
585 }
Interface definition for R600InstrInfo.
Address space for local memory.
Definition: AMDGPU.h:78
static AbsOpt Abs
SDVTList getVTList() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
Definition: MCInstrDesc.h:198
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
enum Generation getGeneration() const
unsigned getOpcode() const
unsigned getNumOperands() const
const SDValue & getOperand(unsigned Num) const
void setNodeId(int Id)
setNodeId - Set unique node id.
#define R2(n)
virtual SDNode * PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:735
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, APInt &KnownZero, APInt &KnownOne, TargetLoweringOpt &TLO, unsigned Depth=0) const
static Type * checkType(Type *Ty)
Definition: Value.cpp:39
Address space for global memory (RAT0, VTX0).
Definition: AMDGPU.h:76
const HexagonInstrInfo * TII
#define llvm_unreachable(msg)
EVT getValueType(unsigned ResNo) const
Address space for indirect addressible parameter memory (VTX1)
Definition: AMDGPU.h:82
EVT getVectorElementType() const
Definition: ValueTypes.h:762
TargetRegisterInfo interface that is implemented by all hw codegen targets.
Address space for direct addressible parameter memory (CONST0)
Definition: AMDGPU.h:81
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
SDNode * getNode() const
get the SDNode which holds the desired result
bool isParamLoad(const MachineInstr *MI)
Definition: NVPTXutil.cpp:21
const SDValue & getOperand(unsigned i) const
FunctionPass * createAMDGPUISelDag(TargetMachine &tm)
This pass converts a legalized DAG into a AMDGPU-specific.
SI DAG Lowering interface definition.
Address space for region memory.
Definition: AMDGPU.h:79
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1252
unsigned getOpcode() const
use_iterator use_begin() const
bool bitsEq(EVT VT) const
bitsEq - Return true if this has the same number of bits as VT.
Definition: ValueTypes.h:717
bool isEXTLoad(const SDNode *N)
static bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG)
Interface definition of the TargetLowering class that is common to all AMD GPUs.
Type * getType() const
Definition: Value.h:111
bool isSGPRClass(const TargetRegisterClass *RC) const
MachineMemOperand * getMemOperand() const
Class for arbitrary precision integers.
Definition: APInt.h:75
const Value * getValue() const
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:360
Address space for private memory.
Definition: AMDGPU.h:75
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
unsigned getAddressSpace() const
getAddressSpace - Return the address space for the associated pointer
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
const Value * getSrcValue() const
Returns the SrcValue and offset that describes the location of the access.
bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:272
EVT getValueType() const
LLVM Value Representation.
Definition: Value.h:66
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction. Note that variadic (isVari...
Definition: MCInstrDesc.h:190
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:148
Address space for constant memory.
Definition: AMDGPU.h:77
bool isMachineOpcode() const
unsigned getMachineOpcode() const
uint64_t getZExtValue() const
unsigned getVectorNumElements() const
Definition: ValueTypes.h:771