LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
PPCISelLowering.h
Go to the documentation of this file.
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
16 #define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
17 
18 #include "PPC.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCRegisterInfo.h"
21 #include "PPCSubtarget.h"
25 
26 namespace llvm {
27  namespace PPCISD {
28  enum NodeType {
29  // Start the numbering where the builtin ops and target ops leave off.
31 
32  /// FSEL - Traditional three-operand fsel node.
33  ///
35 
36  /// FCFID - The FCFID instruction, taking an f64 operand and producing
37  /// and f64 value containing the FP representation of the integer that
38  /// was temporarily in the f64 operand.
40 
41  /// Newer FCFID[US] integer-to-floating-point conversion instructions for
42  /// unsigned integers and single-precision outputs.
44 
45  /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
46  /// operand, producing an f64 value containing the integer representation
47  /// of that FP value.
49 
50  /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
51  /// unsigned integers.
53 
54  /// Reciprocal estimate instructions (unary FP ops).
56 
57  // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
58  // three v4f32 operands and producing a v4f32 result.
60 
61  /// VPERM - The PPC VPERM Instruction.
62  ///
64 
65  /// Hi/Lo - These represent the high and low 16-bit parts of a global
66  /// address respectively. These nodes have two operands, the first of
67  /// which must be a TargetGlobalAddress, and the second of which must be a
68  /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
69  /// though these are usually folded into other nodes.
70  Hi, Lo,
71 
73 
74  /// The following three target-specific nodes are used for calls through
75  /// function pointers in the 64-bit SVR4 ABI.
76 
77  /// Restore the TOC from the TOC save area of the current stack frame.
78  /// This is basically a hard coded load instruction which additionally
79  /// takes/produces a flag.
81 
82  /// Like a regular LOAD but additionally taking/producing a flag.
84 
85  /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is
86  /// a hard coded load instruction.
88 
89  /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
90  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
91  /// compute an allocation on the stack.
93 
94  /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
95  /// at function entry, used for PIC code.
97 
98  /// These nodes represent the 32-bit PPC shifts that operate on 6-bit
99  /// shift amounts. These nodes are generated by the multi-precision shift
100  /// code.
102 
103  /// CALL - A direct function call.
104  /// CALL_NOP is a call with the special NOP which follows 64-bit
105  /// SVR4 calls.
107 
108  /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
109  /// MTCTR instruction.
111 
112  /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
113  /// BCTRL instruction.
115 
116  /// Return with a flag operand, matched by 'blr'
118 
119  /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
120  /// This copies the bits corresponding to the specified CRREG into the
121  /// resultant GPR. Bits corresponding to other CR regs are undefined.
123 
124  // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
126 
127  // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
129 
130  /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
131  /// instructions. For lack of better number, we use the opcode number
132  /// encoding for the OPC field to identify the compare. For example, 838
133  /// is VCMPGTSH.
135 
136  /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
137  /// altivec VCMP*o instructions. For lack of better number, we use the
138  /// opcode number encoding for the OPC field to identify the compare. For
139  /// example, 838 is VCMPGTSH.
141 
142  /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
143  /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
144  /// condition register to branch on, OPC is the branch opcode to use (e.g.
145  /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
146  /// an optional input flag argument.
148 
149  /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
150  /// loops.
152 
153  /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
154  /// towards zero. Used only as part of the long double-to-int
155  /// conversion sequence.
157 
158  /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
160 
161  /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and
162  /// reserve indexed. This is used to implement atomic operations.
164 
165  /// STCX = This corresponds to PPC stcx. instrcution: store conditional
166  /// indexed. This is used to implement atomic operations.
168 
169  /// TC_RETURN - A tail call return.
170  /// operand #0 chain
171  /// operand #1 callee (register or absolute)
172  /// operand #2 stack adjustment
173  /// operand #3 optional in flag
175 
176  /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
179 
180  /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
181  /// TLS model, produces an ADDIS8 instruction that adds the GOT
182  /// base to sym\@got\@tprel\@ha.
184 
185  /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
186  /// TLS model, produces a LD instruction with base register G8RReg
187  /// and offset sym\@got\@tprel\@l. This completes the addition that
188  /// finds the offset of "sym" relative to the thread pointer.
190 
191  /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
192  /// model, produces an ADD instruction that adds the contents of
193  /// G8RReg to the thread pointer. Symbol contains a relocation
194  /// sym\@tls which is to be replaced by the thread pointer and
195  /// identifies to the linker that the instruction is part of a
196  /// TLS sequence.
198 
199  /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
200  /// model, produces an ADDIS8 instruction that adds the GOT base
201  /// register to sym\@got\@tlsgd\@ha.
203 
204  /// G8RC = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
205  /// model, produces an ADDI8 instruction that adds G8RReg to
206  /// sym\@got\@tlsgd\@l.
208 
209  /// G8RC = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
210  /// model, produces a call to __tls_get_addr(sym\@tlsgd).
212 
213  /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
214  /// model, produces an ADDIS8 instruction that adds the GOT base
215  /// register to sym\@got\@tlsld\@ha.
217 
218  /// G8RC = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
219  /// model, produces an ADDI8 instruction that adds G8RReg to
220  /// sym\@got\@tlsld\@l.
222 
223  /// G8RC = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
224  /// model, produces a call to __tls_get_addr(sym\@tlsld).
226 
227  /// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the
228  /// local-dynamic TLS model, produces an ADDIS8 instruction
229  /// that adds X3 to sym\@dtprel\@ha. The Chain operand is needed
230  /// to tie this in place following a copy to %X3 from the result
231  /// of a GET_TLSLD_ADDR.
233 
234  /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
235  /// model, produces an ADDI8 instruction that adds G8RReg to
236  /// sym\@got\@dtprel\@l.
238 
239  /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
240  /// during instruction selection to optimize a BUILD_VECTOR into
241  /// operations on splats. This is necessary to avoid losing these
242  /// optimizations due to constant folding.
244 
245  /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
246  /// operand identifies the operating system entry point.
247  SC,
248 
249  /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
250  /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
251  /// the GPRC input, then stores it through Ptr. Type can be either i16 or
252  /// i32.
254 
255  /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
256  /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
257  /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
258  /// or i32.
260 
261  /// STFIWX - The STFIWX instruction. The first operand is an input token
262  /// chain, then an f64 value to store, then an address to store it to.
264 
265  /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
266  /// load which sign-extends from a 32-bit integer value into the
267  /// destination 64-bit register.
269 
270  /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
271  /// load which zero-extends from a 32-bit integer value into the
272  /// destination 64-bit register.
274 
275  /// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium and large code model,
276  /// produces an ADDIS8 instruction that adds the TOC base register to
277  /// sym\@toc\@ha.
279 
280  /// G8RC = LD_TOC_L Symbol, G8RReg - For medium and large code model,
281  /// produces a LD instruction with base register G8RReg and offset
282  /// sym\@toc\@l. Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
284 
285  /// G8RC = ADDI_TOC_L G8RReg, Symbol - For medium code model, produces
286  /// an ADDI8 instruction that adds G8RReg to sym\@toc\@l.
287  /// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
289  };
290  }
291 
292  /// Define some predicates that are used for node matching.
293  namespace PPC {
294  /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
295  /// VPKUHUM instruction.
296  bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
297 
298  /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
299  /// VPKUWUM instruction.
300  bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
301 
302  /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
303  /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
304  bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
305  bool isUnary);
306 
307  /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
308  /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
309  bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
310  bool isUnary);
311 
312  /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
313  /// amount, otherwise return -1.
314  int isVSLDOIShuffleMask(SDNode *N, bool isUnary);
315 
316  /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
317  /// specifies a splat of a single element that is suitable for input to
318  /// VSPLTB/VSPLTH/VSPLTW.
319  bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
320 
321  /// isAllNegativeZeroVector - Returns true if all elements of build_vector
322  /// are -0.0.
323  bool isAllNegativeZeroVector(SDNode *N);
324 
325  /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
326  /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
327  unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
328 
329  /// get_VSPLTI_elt - If this is a build_vector of constants which can be
330  /// formed by using a vspltis[bhw] instruction of the specified element
331  /// size, return the constant being splatted. The ByteSize field indicates
332  /// the number of bytes of each element [124] -> [bhw].
333  SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
334  }
335 
337  const PPCSubtarget &PPCSubTarget;
338 
339  public:
340  explicit PPCTargetLowering(PPCTargetMachine &TM);
341 
342  /// getTargetNodeName() - This method returns the name of a target specific
343  /// DAG node.
344  virtual const char *getTargetNodeName(unsigned Opcode) const;
345 
346  virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
347 
348  /// getSetCCResultType - Return the ISD::SETCC ValueType
349  virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
350 
351  /// getPreIndexedAddressParts - returns true by value, base pointer and
352  /// offset pointer and addressing mode by reference if the node's address
353  /// can be legally represented as pre-indexed load / store address.
354  virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
355  SDValue &Offset,
357  SelectionDAG &DAG) const;
358 
359  /// SelectAddressRegReg - Given the specified addressed, check to see if it
360  /// can be represented as an indexed [r+r] operation. Returns false if it
361  /// can be more efficiently represented with [r+imm].
362  bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
363  SelectionDAG &DAG) const;
364 
365  /// SelectAddressRegImm - Returns true if the address N can be represented
366  /// by a base register plus a signed 16-bit displacement [r+imm], and if it
367  /// is not better represented as reg+reg. If Aligned is true, only accept
368  /// displacements suitable for STD and friends, i.e. multiples of 4.
369  bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
370  SelectionDAG &DAG, bool Aligned) const;
371 
372  /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
373  /// represented as an indexed [r+r] operation.
374  bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
375  SelectionDAG &DAG) const;
376 
378 
379  /// LowerOperation - Provide custom lowering hooks for some operations.
380  ///
381  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
382 
383  /// ReplaceNodeResults - Replace the results of node with an illegal result
384  /// type with new values built out of custom code.
385  ///
386  virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
387  SelectionDAG &DAG) const;
388 
389  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
390 
391  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
392  APInt &KnownZero,
393  APInt &KnownOne,
394  const SelectionDAG &DAG,
395  unsigned Depth = 0) const;
396 
397  virtual MachineBasicBlock *
399  MachineBasicBlock *MBB) const;
401  MachineBasicBlock *MBB, bool is64Bit,
402  unsigned BinOpcode) const;
404  MachineBasicBlock *MBB,
405  bool is8bit, unsigned Opcode) const;
406 
408  MachineBasicBlock *MBB) const;
409 
411  MachineBasicBlock *MBB) const;
412 
413  ConstraintType getConstraintType(const std::string &Constraint) const;
414 
415  /// Examine constraint string and operand type and determine a weight value.
416  /// The operand object must already have been set up with the operand type.
418  AsmOperandInfo &info, const char *constraint) const;
419 
420  std::pair<unsigned, const TargetRegisterClass*>
421  getRegForInlineAsmConstraint(const std::string &Constraint,
422  MVT VT) const;
423 
424  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
425  /// function arguments in the caller parameter area. This is the actual
426  /// alignment, not its logarithm.
427  unsigned getByValTypeAlignment(Type *Ty) const;
428 
429  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
430  /// vector. If it is invalid, don't add anything to Ops.
431  virtual void LowerAsmOperandForConstraint(SDValue Op,
432  std::string &Constraint,
433  std::vector<SDValue> &Ops,
434  SelectionDAG &DAG) const;
435 
436  /// isLegalAddressingMode - Return true if the addressing mode represented
437  /// by AM is legal for this target, for a load/store of the specified type.
438  virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
439 
440  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
441 
442  /// getOptimalMemOpType - Returns the target specific optimal type for load
443  /// and store operations as a result of memset, memcpy, and memmove
444  /// lowering. If DstAlign is zero that means it's safe to destination
445  /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
446  /// means there isn't a need to check it against alignment requirement,
447  /// probably because the source does not need to be loaded. If 'IsMemset' is
448  /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
449  /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
450  /// source is constant so it does not need to be loaded.
451  /// It returns EVT::Other if the type should be determined using generic
452  /// target-independent logic.
453  virtual EVT
454  getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
455  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
456  MachineFunction &MF) const;
457 
458  /// Is unaligned memory access allowed for the given type, and is it fast
459  /// relative to software emulation.
460  virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const;
461 
462  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
463  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
464  /// expanded to FMAs when this method returns true, otherwise fmuladd is
465  /// expanded to fmul + fadd.
466  virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
467 
468  /// createFastISel - This method returns a target-specific FastISel object,
469  /// or null if the target does not support "fast" instruction selection.
470  virtual FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
471  const TargetLibraryInfo *LibInfo) const;
472 
473  private:
474  SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
475  SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
476 
477  bool
478  IsEligibleForTailCallOptimization(SDValue Callee,
479  CallingConv::ID CalleeCC,
480  bool isVarArg,
482  SelectionDAG& DAG) const;
483 
484  SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
485  int SPDiff,
486  SDValue Chain,
487  SDValue &LROpOut,
488  SDValue &FPOpOut,
489  bool isDarwinABI,
490  SDLoc dl) const;
491 
492  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
493  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
494  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
495  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
496  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
497  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
498  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
499  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
500  SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
501  SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
502  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
503  const PPCSubtarget &Subtarget) const;
504  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG,
505  const PPCSubtarget &Subtarget) const;
506  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG,
507  const PPCSubtarget &Subtarget) const;
508  SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
509  const PPCSubtarget &Subtarget) const;
510  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
511  const PPCSubtarget &Subtarget) const;
512  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
513  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDLoc dl) const;
514  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
515  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
516  SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
517  SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
518  SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
519  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
520  SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
521  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
522  SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
523  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
524 
525  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
526  CallingConv::ID CallConv, bool isVarArg,
528  SDLoc dl, SelectionDAG &DAG,
529  SmallVectorImpl<SDValue> &InVals) const;
530  SDValue FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall,
531  bool isVarArg,
532  SelectionDAG &DAG,
533  SmallVector<std::pair<unsigned, SDValue>, 8>
534  &RegsToPass,
535  SDValue InFlag, SDValue Chain,
536  SDValue &Callee,
537  int SPDiff, unsigned NumBytes,
539  SmallVectorImpl<SDValue> &InVals) const;
540 
541  virtual SDValue
542  LowerFormalArguments(SDValue Chain,
543  CallingConv::ID CallConv, bool isVarArg,
545  SDLoc dl, SelectionDAG &DAG,
546  SmallVectorImpl<SDValue> &InVals) const;
547 
548  virtual SDValue
549  LowerCall(TargetLowering::CallLoweringInfo &CLI,
550  SmallVectorImpl<SDValue> &InVals) const;
551 
552  virtual bool
553  CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
554  bool isVarArg,
556  LLVMContext &Context) const;
557 
558  virtual SDValue
559  LowerReturn(SDValue Chain,
560  CallingConv::ID CallConv, bool isVarArg,
562  const SmallVectorImpl<SDValue> &OutVals,
563  SDLoc dl, SelectionDAG &DAG) const;
564 
565  SDValue
566  extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG,
567  SDValue ArgVal, SDLoc dl) const;
568 
569  void
570  setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
571  unsigned nAltivecParamsAtEnd,
572  unsigned MinReservedArea, bool isPPC64) const;
573 
574  SDValue
575  LowerFormalArguments_Darwin(SDValue Chain,
576  CallingConv::ID CallConv, bool isVarArg,
578  SDLoc dl, SelectionDAG &DAG,
579  SmallVectorImpl<SDValue> &InVals) const;
580  SDValue
581  LowerFormalArguments_64SVR4(SDValue Chain,
582  CallingConv::ID CallConv, bool isVarArg,
584  SDLoc dl, SelectionDAG &DAG,
585  SmallVectorImpl<SDValue> &InVals) const;
586  SDValue
587  LowerFormalArguments_32SVR4(SDValue Chain,
588  CallingConv::ID CallConv, bool isVarArg,
590  SDLoc dl, SelectionDAG &DAG,
591  SmallVectorImpl<SDValue> &InVals) const;
592 
593  SDValue
594  createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
595  SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
596  SelectionDAG &DAG, SDLoc dl) const;
597 
598  SDValue
599  LowerCall_Darwin(SDValue Chain, SDValue Callee,
600  CallingConv::ID CallConv,
601  bool isVarArg, bool isTailCall,
603  const SmallVectorImpl<SDValue> &OutVals,
605  SDLoc dl, SelectionDAG &DAG,
606  SmallVectorImpl<SDValue> &InVals) const;
607  SDValue
608  LowerCall_64SVR4(SDValue Chain, SDValue Callee,
609  CallingConv::ID CallConv,
610  bool isVarArg, bool isTailCall,
612  const SmallVectorImpl<SDValue> &OutVals,
614  SDLoc dl, SelectionDAG &DAG,
615  SmallVectorImpl<SDValue> &InVals) const;
616  SDValue
617  LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
618  bool isVarArg, bool isTailCall,
620  const SmallVectorImpl<SDValue> &OutVals,
622  SDLoc dl, SelectionDAG &DAG,
623  SmallVectorImpl<SDValue> &InVals) const;
624 
625  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
626  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
627 
628  SDValue DAGCombineFastRecip(SDValue Op, DAGCombinerInfo &DCI) const;
629  SDValue DAGCombineFastRecipFSQRT(SDValue Op, DAGCombinerInfo &DCI) const;
630 
631  CCAssignFn *useFastISelCCs(unsigned Flag) const;
632  };
633 
634  namespace PPC {
635  FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
636  const TargetLibraryInfo *LibInfo);
637  }
638 
639  bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
640  CCValAssign::LocInfo &LocInfo,
641  ISD::ArgFlagsTy &ArgFlags,
642  CCState &State);
643 
644  bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
645  MVT &LocVT,
646  CCValAssign::LocInfo &LocInfo,
647  ISD::ArgFlagsTy &ArgFlags,
648  CCState &State);
649 
650  bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
651  MVT &LocVT,
652  CCValAssign::LocInfo &LocInfo,
653  ISD::ArgFlagsTy &ArgFlags,
654  CCState &State);
655 }
656 
657 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize)
int isVSLDOIShuffleMask(SDNode *N, bool isUnary)
Return with a flag operand, matched by 'blr'.
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast=0) const
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode) const
virtual FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool isUnary)
lazy value info
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary)
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
MachineBasicBlock * EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, bool is64Bit, unsigned BinOpcode) const
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const
Reciprocal estimate instructions (unary FP ops).
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
PPCTargetLowering(PPCTargetMachine &TM)
virtual void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool isUnary)
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
bool isAllNegativeZeroVector(SDNode *N)
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
Like a regular LOAD but additionally taking/producing a flag.
virtual const char * getTargetNodeName(unsigned Opcode) const
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary)
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
getSetCCResultType - Return the ISD::SETCC ValueType
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const
static const int FIRST_TARGET_MEMORY_OPCODE
Definition: ISDOpcodes.h:648
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
Class for arbitrary precision integers.
Definition: APInt.h:75
ConstraintType getConstraintType(const std::string &Constraint) const
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:234
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
#define N
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, bool Aligned) const
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
unsigned getByValTypeAlignment(Type *Ty) const
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const