LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ARMISelLowering.h
Go to the documentation of this file.
1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
17 
18 #include "ARM.h"
19 #include "ARMSubtarget.h"
21 #include "llvm/CodeGen/FastISel.h"
25 #include <vector>
26 
27 namespace llvm {
28  class ARMConstantPoolValue;
29 
30  namespace ARMISD {
31  // ARM Specific DAG Nodes
32  enum NodeType {
33  // Start the numbering where the builtin ops and target ops leave off.
35 
36  Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
37  // TargetExternalSymbol, and TargetGlobalAddress.
38  WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in
39  // DYN mode.
40  WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
41  // PIC mode.
42  WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
43 
44  // Add pseudo op to model memcpy for struct byval.
46 
47  CALL, // Function call.
48  CALL_PRED, // Function call that's predicable.
49  CALL_NOLINK, // Function call with branch not branch-and-link.
50  tCALL, // Thumb function call.
51  BRCOND, // Conditional branch.
52  BR_JT, // Jumptable branch.
53  BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
54  RET_FLAG, // Return with a flag operand.
55  INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
56 
57  PIC_ADD, // Add with a PC operand and a PIC label.
58 
59  CMP, // ARM compare instructions.
60  CMN, // ARM CMN instructions.
61  CMPZ, // ARM compare that sets only Z flag.
62  CMPFP, // ARM VFP compare instruction, sets FPSCR.
63  CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
64  FMSTAT, // ARM fmstat instruction.
65 
66  CMOV, // ARM conditional move instructions.
67 
69 
70  RBIT, // ARM bitreverse instruction
71 
72  FTOSI, // FP to sint within a FP register.
73  FTOUI, // FP to uint within a FP register.
74  SITOF, // sint to FP within a FP register.
75  UITOF, // uint to FP within a FP register.
76 
77  SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
78  SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
79  RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
80 
81  ADDC, // Add with carry
82  ADDE, // Add using carry
83  SUBC, // Sub with carry
84  SUBE, // Sub using carry
85 
86  VMOVRRD, // double to two gprs.
87  VMOVDRR, // Two gprs to double.
88 
89  EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
90  EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
91 
92  TC_RETURN, // Tail call return pseudo.
93 
95 
96  DYN_ALLOC, // Dynamic allocation on the stack.
97 
98  MEMBARRIER_MCR, // Memory barrier (MCR)
99 
100  PRELOAD, // Preload
101 
102  VCEQ, // Vector compare equal.
103  VCEQZ, // Vector compare equal to zero.
104  VCGE, // Vector compare greater than or equal.
105  VCGEZ, // Vector compare greater than or equal to zero.
106  VCLEZ, // Vector compare less than or equal to zero.
107  VCGEU, // Vector compare unsigned greater than or equal.
108  VCGT, // Vector compare greater than.
109  VCGTZ, // Vector compare greater than zero.
110  VCLTZ, // Vector compare less than zero.
111  VCGTU, // Vector compare unsigned greater than.
112  VTST, // Vector test bits.
113 
114  // Vector shift by immediate:
115  VSHL, // ...left
116  VSHRs, // ...right (signed)
117  VSHRu, // ...right (unsigned)
118  VSHLLs, // ...left long (signed)
119  VSHLLu, // ...left long (unsigned)
120  VSHLLi, // ...left long (with maximum shift count)
121  VSHRN, // ...right narrow
122 
123  // Vector rounding shift by immediate:
124  VRSHRs, // ...right (signed)
125  VRSHRu, // ...right (unsigned)
126  VRSHRN, // ...right narrow
127 
128  // Vector saturating shift by immediate:
129  VQSHLs, // ...left (signed)
130  VQSHLu, // ...left (unsigned)
131  VQSHLsu, // ...left (signed to unsigned)
132  VQSHRNs, // ...right narrow (signed)
133  VQSHRNu, // ...right narrow (unsigned)
134  VQSHRNsu, // ...right narrow (signed to unsigned)
135 
136  // Vector saturating rounding shift by immediate:
137  VQRSHRNs, // ...right narrow (signed)
138  VQRSHRNu, // ...right narrow (unsigned)
139  VQRSHRNsu, // ...right narrow (signed to unsigned)
140 
141  // Vector shift and insert:
142  VSLI, // ...left
143  VSRI, // ...right
144 
145  // Vector get lane (VMOV scalar to ARM core register)
146  // (These are used for 8- and 16-bit element types only.)
147  VGETLANEu, // zero-extend vector extract element
148  VGETLANEs, // sign-extend vector extract element
149 
150  // Vector move immediate and move negated immediate:
153 
154  // Vector move f32 immediate:
156 
157  // Vector duplicate:
160 
161  // Vector shuffles:
162  VEXT, // extract
163  VREV64, // reverse elements within 64-bit doublewords
164  VREV32, // reverse elements within 32-bit words
165  VREV16, // reverse elements within 16-bit halfwords
166  VZIP, // zip (interleave)
167  VUZP, // unzip (deinterleave)
168  VTRN, // transpose
169  VTBL1, // 1-register shuffle with mask
170  VTBL2, // 2-register shuffle with mask
171 
172  // Vector multiply long:
173  VMULLs, // ...signed
174  VMULLu, // ...unsigned
175 
176  UMLAL, // 64bit Unsigned Accumulate Multiply
177  SMLAL, // 64bit Signed Accumulate Multiply
178 
179  // Operands of the standard BUILD_VECTOR node are not legalized, which
180  // is fine if BUILD_VECTORs are always lowered to shuffles or other
181  // operations, but for ARM some BUILD_VECTORs are legal as-is and their
182  // operands need to be legalized. Define an ARM-specific version of
183  // BUILD_VECTOR for this purpose.
185 
186  // Floating-point max and min:
191 
192  // Bit-field insert
194 
195  // Vector OR with immediate
197  // Vector AND with NOT of immediate
199 
200  // Vector bitwise select
202 
203  // Vector load N-element structure to all lanes:
207 
208  // NEON loads with post-increment base updates:
219 
220  // NEON stores with post-increment base updates:
228  };
229  }
230 
231  /// Define some predicates that are used for node matching.
232  namespace ARM {
233  bool isBitFieldInvertedMask(unsigned v);
234  }
235 
236  //===--------------------------------------------------------------------===//
237  // ARMTargetLowering - ARM Implementation of the TargetLowering interface
238 
240  public:
241  explicit ARMTargetLowering(TargetMachine &TM);
242 
243  virtual unsigned getJumpTableEncoding() const;
244 
245  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
246 
247  /// ReplaceNodeResults - Replace the results of node with an illegal result
248  /// type with new values built out of custom code.
249  ///
250  virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
251  SelectionDAG &DAG) const;
252 
253  virtual const char *getTargetNodeName(unsigned Opcode) const;
254 
256  // ARM does not support scalar condition selects on vectors.
257  return (Kind != ScalarCondVectorVal);
258  }
259 
260  /// getSetCCResultType - Return the value type to use for ISD::SETCC.
261  virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
262 
263  virtual MachineBasicBlock *
265  MachineBasicBlock *MBB) const;
266 
267  virtual void
269 
271  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
272 
273  bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
274 
275  /// allowsUnalignedMemoryAccesses - Returns true if the target allows
276  /// unaligned memory accesses of the specified type. Returns whether it
277  /// is "fast" by reference in the second argument.
278  virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
279 
280  virtual EVT getOptimalMemOpType(uint64_t Size,
281  unsigned DstAlign, unsigned SrcAlign,
282  bool IsMemset, bool ZeroMemset,
283  bool MemcpyStrSrc,
284  MachineFunction &MF) const;
285 
287  virtual bool isZExtFree(SDValue Val, EVT VT2) const;
288 
289  virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
290 
291 
292  /// isLegalAddressingMode - Return true if the addressing mode represented
293  /// by AM is legal for this target, for a load/store of the specified type.
294  virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
295  bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
296 
297  /// isLegalICmpImmediate - Return true if the specified immediate is legal
298  /// icmp immediate, that is the target has icmp instructions which can
299  /// compare a register against the immediate without having to materialize
300  /// the immediate into a register.
301  virtual bool isLegalICmpImmediate(int64_t Imm) const;
302 
303  /// isLegalAddImmediate - Return true if the specified immediate is legal
304  /// add immediate, that is the target has add instructions which can
305  /// add a register and the immediate without having to materialize
306  /// the immediate into a register.
307  virtual bool isLegalAddImmediate(int64_t Imm) const;
308 
309  /// getPreIndexedAddressParts - returns true by value, base pointer and
310  /// offset pointer and addressing mode by reference if the node's address
311  /// can be legally represented as pre-indexed load / store address.
312  virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
313  SDValue &Offset,
315  SelectionDAG &DAG) const;
316 
317  /// getPostIndexedAddressParts - returns true by value, base pointer and
318  /// offset pointer and addressing mode by reference if this node can be
319  /// combined with a load / store to form a post-indexed load / store.
320  virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
321  SDValue &Base, SDValue &Offset,
323  SelectionDAG &DAG) const;
324 
325  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
326  APInt &KnownZero,
327  APInt &KnownOne,
328  const SelectionDAG &DAG,
329  unsigned Depth) const;
330 
331 
332  virtual bool ExpandInlineAsm(CallInst *CI) const;
333 
334  ConstraintType getConstraintType(const std::string &Constraint) const;
335 
336  /// Examine constraint string and operand type and determine a weight value.
337  /// The operand object must already have been set up with the operand type.
339  AsmOperandInfo &info, const char *constraint) const;
340 
341  std::pair<unsigned, const TargetRegisterClass*>
342  getRegForInlineAsmConstraint(const std::string &Constraint,
343  MVT VT) const;
344 
345  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
346  /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
347  /// true it means one of the asm constraint of the inline asm instruction
348  /// being processed is 'm'.
349  virtual void LowerAsmOperandForConstraint(SDValue Op,
350  std::string &Constraint,
351  std::vector<SDValue> &Ops,
352  SelectionDAG &DAG) const;
353 
354  const ARMSubtarget* getSubtarget() const {
355  return Subtarget;
356  }
357 
358  /// getRegClassFor - Return the register class that should be used for the
359  /// specified value type.
360  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const;
361 
362  /// getMaximalGlobalOffset - Returns the maximal possible offset which can
363  /// be used for loads / stores from the global.
364  virtual unsigned getMaximalGlobalOffset() const;
365 
366  /// createFastISel - This method returns a target specific FastISel object,
367  /// or null if the target does not support "fast" ISel.
368  virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
369  const TargetLibraryInfo *libInfo) const;
370 
372 
373  bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
374  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
375 
376  /// isFPImmLegal - Returns true if the target can instruction select the
377  /// specified FP immediate natively. If false, the legalizer will
378  /// materialize the FP immediate as a load from a constant pool.
379  virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
380 
381  virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
382  const CallInst &I,
383  unsigned Intrinsic) const;
384  protected:
385  std::pair<const TargetRegisterClass*, uint8_t>
386  findRepresentativeClass(MVT VT) const;
387 
388  private:
389  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
390  /// make the right decision when generating code for different targets.
391  const ARMSubtarget *Subtarget;
392 
393  const TargetRegisterInfo *RegInfo;
394 
395  const InstrItineraryData *Itins;
396 
397  /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
398  ///
399  unsigned ARMPCLabelIndex;
400 
401  void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
402  void addDRTypeForNEON(MVT VT);
403  void addQRTypeForNEON(MVT VT);
404 
405  typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
406  void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
407  SDValue Chain, SDValue &Arg,
408  RegsToPassVector &RegsToPass,
409  CCValAssign &VA, CCValAssign &NextVA,
410  SDValue &StackPtr,
411  SmallVectorImpl<SDValue> &MemOpChains,
412  ISD::ArgFlagsTy Flags) const;
413  SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
414  SDValue &Root, SelectionDAG &DAG,
415  SDLoc dl) const;
416 
417  CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
418  bool isVarArg) const;
419  SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
420  SDLoc dl, SelectionDAG &DAG,
421  const CCValAssign &VA,
422  ISD::ArgFlagsTy Flags) const;
423  SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
424  SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
425  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
426  const ARMSubtarget *Subtarget) const;
427  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
428  SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
429  SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
430  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
431  SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
432  SelectionDAG &DAG) const;
433  SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
434  SelectionDAG &DAG,
435  TLSModel::Model model) const;
436  SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
437  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
438  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
439  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
440  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
441  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
442  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
443  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
444  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
445  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
446  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
447  SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
448  const ARMSubtarget *ST) const;
449  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
450  const ARMSubtarget *ST) const;
451  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
452  SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
453 
454  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
455  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
456  /// expanded to FMAs when this method returns true, otherwise fmuladd is
457  /// expanded to fmul + fadd.
458  ///
459  /// ARM supports both fused and unfused multiply-add operations; we already
460  /// lower a pair of fmul and fadd to the latter so it's not clear that there
461  /// would be a gain or that the gain would be worthwhile enough to risk
462  /// correctness bugs.
463  virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
464 
465  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
466 
467  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
468  CallingConv::ID CallConv, bool isVarArg,
469  const SmallVectorImpl<ISD::InputArg> &Ins,
470  SDLoc dl, SelectionDAG &DAG,
471  SmallVectorImpl<SDValue> &InVals,
472  bool isThisReturn, SDValue ThisVal) const;
473 
474  virtual SDValue
475  LowerFormalArguments(SDValue Chain,
476  CallingConv::ID CallConv, bool isVarArg,
477  const SmallVectorImpl<ISD::InputArg> &Ins,
478  SDLoc dl, SelectionDAG &DAG,
479  SmallVectorImpl<SDValue> &InVals) const;
480 
481  int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
482  SDLoc dl, SDValue &Chain,
483  const Value *OrigArg,
484  unsigned InRegsParamRecordIdx,
485  unsigned OffsetFromOrigArg,
486  unsigned ArgOffset,
487  unsigned ArgSize,
488  bool ForceMutable) const;
489 
490  void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
491  SDLoc dl, SDValue &Chain,
492  unsigned ArgOffset,
493  bool ForceMutable = false) const;
494 
495  void computeRegArea(CCState &CCInfo, MachineFunction &MF,
496  unsigned InRegsParamRecordIdx,
497  unsigned ArgSize,
498  unsigned &ArgRegsSize,
499  unsigned &ArgRegsSaveSize) const;
500 
501  virtual SDValue
502  LowerCall(TargetLowering::CallLoweringInfo &CLI,
503  SmallVectorImpl<SDValue> &InVals) const;
504 
505  /// HandleByVal - Target-specific cleanup for ByVal support.
506  virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
507 
508  /// IsEligibleForTailCallOptimization - Check whether the call is eligible
509  /// for tail call optimization. Targets which want to do tail call
510  /// optimization should implement this function.
511  bool IsEligibleForTailCallOptimization(SDValue Callee,
512  CallingConv::ID CalleeCC,
513  bool isVarArg,
514  bool isCalleeStructRet,
515  bool isCallerStructRet,
516  const SmallVectorImpl<ISD::OutputArg> &Outs,
517  const SmallVectorImpl<SDValue> &OutVals,
518  const SmallVectorImpl<ISD::InputArg> &Ins,
519  SelectionDAG& DAG) const;
520 
521  virtual bool CanLowerReturn(CallingConv::ID CallConv,
522  MachineFunction &MF, bool isVarArg,
523  const SmallVectorImpl<ISD::OutputArg> &Outs,
524  LLVMContext &Context) const;
525 
526  virtual SDValue
527  LowerReturn(SDValue Chain,
528  CallingConv::ID CallConv, bool isVarArg,
529  const SmallVectorImpl<ISD::OutputArg> &Outs,
530  const SmallVectorImpl<SDValue> &OutVals,
531  SDLoc dl, SelectionDAG &DAG) const;
532 
533  virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
534 
535  virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
536 
537  SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
538  SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
539  SDValue getVFPCmp(SDValue LHS, SDValue RHS,
540  SelectionDAG &DAG, SDLoc dl) const;
541  SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
542 
543  SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
544 
545  MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
546  MachineBasicBlock *BB,
547  unsigned Size) const;
548  MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
549  MachineBasicBlock *BB,
550  unsigned Size,
551  unsigned BinOpcode) const;
552  MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
553  MachineBasicBlock *BB,
554  unsigned Op1,
555  unsigned Op2,
556  bool NeedsCarry = false,
557  bool IsCmpxchg = false,
558  bool IsMinMax = false,
559  ARMCC::CondCodes CC = ARMCC::AL) const;
560  MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
561  MachineBasicBlock *BB,
562  unsigned Size,
563  bool signExtend,
564  ARMCC::CondCodes Cond) const;
565  MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
566  MachineBasicBlock *BB) const;
567 
568  void SetupEntryBlockForSjLj(MachineInstr *MI,
569  MachineBasicBlock *MBB,
570  MachineBasicBlock *DispatchBB, int FI) const;
571 
572  MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
573  MachineBasicBlock *MBB) const;
574 
575  bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
576 
577  MachineBasicBlock *EmitStructByval(MachineInstr *MI,
578  MachineBasicBlock *MBB) const;
579  };
580 
585  };
586 
587 
588  namespace ARM {
589  FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
590  const TargetLibraryInfo *libInfo);
591  }
592 }
593 
594 #endif // ARMISELLOWERING_H
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(MVT VT) const
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
virtual void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
ARMTargetLowering(TargetMachine &TM)
virtual bool isZExtFree(Type *, Type *) const
ConstraintType getConstraintType(const std::string &Constraint) const
lazy value info
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
getSetCCResultType - Return the value type to use for ISD::SETCC.
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
virtual FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const
virtual bool ExpandInlineAsm(CallInst *CI) const
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
virtual void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const
virtual bool isZExtFree(SDValue Val, EVT VT2) const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
const ARMSubtarget * getSubtarget() const
virtual unsigned getJumpTableEncoding() const
virtual bool isSelectSupported(SelectSupportKind Kind) const
virtual unsigned getMaximalGlobalOffset() const
Instr is a return instruction.
Definition: GCMetadata.h:52
CCValAssign - Represent assignment of one arg/retval to a location.
virtual bool isLegalICmpImmediate(int64_t Imm) const
virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const
static const int FIRST_TARGET_MEMORY_OPCODE
Definition: ISDOpcodes.h:648
Class for arbitrary precision integers.
Definition: APInt.h:75
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:234
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
SelectSupportKind
Enum that describes what type of support for selects the target has.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
virtual bool isLegalAddImmediate(int64_t Imm) const
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const
bool isBitFieldInvertedMask(unsigned v)
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const