LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
X86FrameLowering.cpp
Go to the documentation of this file.
1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of TargetFrameLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCSymbol.h"
32 
33 using namespace llvm;
34 
35 // FIXME: completely move here.
37 
39  return !MF.getFrameInfo()->hasVarSizedObjects();
40 }
41 
42 /// hasFP - Return true if the specified function should have a dedicated frame
43 /// pointer register. This is true if the function has variable sized allocas
44 /// or if frame pointer elimination is disabled.
46  const MachineFrameInfo *MFI = MF.getFrameInfo();
47  const MachineModuleInfo &MMI = MF.getMMI();
48  const TargetRegisterInfo *RegInfo = TM.getRegisterInfo();
49 
50  return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
51  RegInfo->needsStackRealignment(MF) ||
52  MFI->hasVarSizedObjects() ||
53  MFI->isFrameAddressTaken() || MF.hasMSInlineAsm() ||
54  MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
55  MMI.callsUnwindInit() || MMI.callsEHReturn());
56 }
57 
58 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
59  if (IsLP64) {
60  if (isInt<8>(Imm))
61  return X86::SUB64ri8;
62  return X86::SUB64ri32;
63  } else {
64  if (isInt<8>(Imm))
65  return X86::SUB32ri8;
66  return X86::SUB32ri;
67  }
68 }
69 
70 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
71  if (IsLP64) {
72  if (isInt<8>(Imm))
73  return X86::ADD64ri8;
74  return X86::ADD64ri32;
75  } else {
76  if (isInt<8>(Imm))
77  return X86::ADD32ri8;
78  return X86::ADD32ri;
79  }
80 }
81 
82 static unsigned getLEArOpcode(unsigned IsLP64) {
83  return IsLP64 ? X86::LEA64r : X86::LEA32r;
84 }
85 
86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
87 /// when it reaches the "return" instruction. We can then pop a stack object
88 /// to this register without worry about clobbering it.
91  const TargetRegisterInfo &TRI,
92  bool Is64Bit) {
93  const MachineFunction *MF = MBB.getParent();
94  const Function *F = MF->getFunction();
95  if (!F || MF->getMMI().callsEHReturn())
96  return 0;
97 
98  static const uint16_t CallerSavedRegs32Bit[] = {
100  };
101 
102  static const uint16_t CallerSavedRegs64Bit[] = {
103  X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
104  X86::R8, X86::R9, X86::R10, X86::R11, 0
105  };
106 
107  unsigned Opc = MBBI->getOpcode();
108  switch (Opc) {
109  default: return 0;
110  case X86::RET:
111  case X86::RETI:
112  case X86::TCRETURNdi:
113  case X86::TCRETURNri:
114  case X86::TCRETURNmi:
115  case X86::TCRETURNdi64:
116  case X86::TCRETURNri64:
117  case X86::TCRETURNmi64:
118  case X86::EH_RETURN:
119  case X86::EH_RETURN64: {
121  for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
122  MachineOperand &MO = MBBI->getOperand(i);
123  if (!MO.isReg() || MO.isDef())
124  continue;
125  unsigned Reg = MO.getReg();
126  if (!Reg)
127  continue;
128  for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
129  Uses.insert(*AI);
130  }
131 
132  const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
133  for (; *CS; ++CS)
134  if (!Uses.count(*CS))
135  return *CS;
136  }
137  }
138 
139  return 0;
140 }
141 
142 
143 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
144 /// stack pointer by a constant value.
145 static
147  unsigned StackPtr, int64_t NumBytes,
148  bool Is64Bit, bool IsLP64, bool UseLEA,
149  const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
150  bool isSub = NumBytes < 0;
151  uint64_t Offset = isSub ? -NumBytes : NumBytes;
152  unsigned Opc;
153  if (UseLEA)
154  Opc = getLEArOpcode(IsLP64);
155  else
156  Opc = isSub
157  ? getSUBriOpcode(IsLP64, Offset)
158  : getADDriOpcode(IsLP64, Offset);
159 
160  uint64_t Chunk = (1LL << 31) - 1;
161  DebugLoc DL = MBB.findDebugLoc(MBBI);
162 
163  while (Offset) {
164  uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
165  if (ThisVal == (Is64Bit ? 8 : 4)) {
166  // Use push / pop instead.
167  unsigned Reg = isSub
168  ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
169  : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
170  if (Reg) {
171  Opc = isSub
172  ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
173  : (Is64Bit ? X86::POP64r : X86::POP32r);
174  MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
175  .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
176  if (isSub)
178  Offset -= ThisVal;
179  continue;
180  }
181  }
182 
183  MachineInstr *MI = NULL;
184 
185  if (UseLEA) {
186  MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
187  StackPtr, false, isSub ? -ThisVal : ThisVal);
188  } else {
189  MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
190  .addReg(StackPtr)
191  .addImm(ThisVal);
192  MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
193  }
194 
195  if (isSub)
197 
198  Offset -= ThisVal;
199  }
200 }
201 
202 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
203 static
205  unsigned StackPtr, uint64_t *NumBytes = NULL) {
206  if (MBBI == MBB.begin()) return;
207 
209  unsigned Opc = PI->getOpcode();
210  if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
211  Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
212  Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
213  PI->getOperand(0).getReg() == StackPtr) {
214  if (NumBytes)
215  *NumBytes += PI->getOperand(2).getImm();
216  MBB.erase(PI);
217  } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
218  Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
219  PI->getOperand(0).getReg() == StackPtr) {
220  if (NumBytes)
221  *NumBytes -= PI->getOperand(2).getImm();
222  MBB.erase(PI);
223  }
224 }
225 
226 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
227 static
230  unsigned StackPtr, uint64_t *NumBytes = NULL) {
231  // FIXME: THIS ISN'T RUN!!!
232  return;
233 
234  if (MBBI == MBB.end()) return;
235 
237  if (NI == MBB.end()) return;
238 
239  unsigned Opc = NI->getOpcode();
240  if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
241  Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
242  NI->getOperand(0).getReg() == StackPtr) {
243  if (NumBytes)
244  *NumBytes -= NI->getOperand(2).getImm();
245  MBB.erase(NI);
246  MBBI = NI;
247  } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
248  Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
249  NI->getOperand(0).getReg() == StackPtr) {
250  if (NumBytes)
251  *NumBytes += NI->getOperand(2).getImm();
252  MBB.erase(NI);
253  MBBI = NI;
254  }
255 }
256 
257 /// mergeSPUpdates - Checks the instruction before/after the passed
258 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the
259 /// stack adjustment is returned as a positive value for ADD/LEA and a negative for
260 /// SUB.
263  unsigned StackPtr,
264  bool doMergeWithPrevious) {
265  if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
266  (!doMergeWithPrevious && MBBI == MBB.end()))
267  return 0;
268 
269  MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
270  MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
271  unsigned Opc = PI->getOpcode();
272  int Offset = 0;
273 
274  if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
275  Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
276  Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
277  PI->getOperand(0).getReg() == StackPtr){
278  Offset += PI->getOperand(2).getImm();
279  MBB.erase(PI);
280  if (!doMergeWithPrevious) MBBI = NI;
281  } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
282  Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
283  PI->getOperand(0).getReg() == StackPtr) {
284  Offset -= PI->getOperand(2).getImm();
285  MBB.erase(PI);
286  if (!doMergeWithPrevious) MBBI = NI;
287  }
288 
289  return Offset;
290 }
291 
292 static bool isEAXLiveIn(MachineFunction &MF) {
294  EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
295  unsigned Reg = II->first;
296 
297  if (Reg == X86::EAX || Reg == X86::AX ||
298  Reg == X86::AH || Reg == X86::AL)
299  return true;
300  }
301 
302  return false;
303 }
304 
306  MCSymbol *Label,
307  unsigned FramePtr) const {
308  MachineFrameInfo *MFI = MF.getFrameInfo();
309  MachineModuleInfo &MMI = MF.getMMI();
310  const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
311 
312  // Add callee saved registers to move list.
313  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
314  if (CSI.empty()) return;
315 
316  const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
317  bool HasFP = hasFP(MF);
318 
319  // Calculate amount of bytes used for return address storing.
320  int stackGrowth = -RegInfo->getSlotSize();
321 
322  // FIXME: This is dirty hack. The code itself is pretty mess right now.
323  // It should be rewritten from scratch and generalized sometimes.
324 
325  // Determine maximum offset (minimum due to stack growth).
326  int64_t MaxOffset = 0;
327  for (std::vector<CalleeSavedInfo>::const_iterator
328  I = CSI.begin(), E = CSI.end(); I != E; ++I)
329  MaxOffset = std::min(MaxOffset,
330  MFI->getObjectOffset(I->getFrameIdx()));
331 
332  // Calculate offsets.
333  int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
334  for (std::vector<CalleeSavedInfo>::const_iterator
335  I = CSI.begin(), E = CSI.end(); I != E; ++I) {
336  int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
337  unsigned Reg = I->getReg();
338  Offset = MaxOffset - Offset + saveAreaOffset;
339 
340  // Don't output a new machine move if we're re-saving the frame
341  // pointer. This happens when the PrologEpilogInserter has inserted an extra
342  // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
343  // generates one when frame pointers are used. If we generate a "machine
344  // move" for this extra "PUSH", the linker will lose track of the fact that
345  // the frame pointer should have the value of the first "PUSH" when it's
346  // trying to unwind.
347  //
348  // FIXME: This looks inelegant. It's possibly correct, but it's covering up
349  // another bug. I.e., one where we generate a prolog like this:
350  //
351  // pushl %ebp
352  // movl %esp, %ebp
353  // pushl %ebp
354  // pushl %esi
355  // ...
356  //
357  // The immediate re-push of EBP is unnecessary. At the least, it's an
358  // optimization bug. EBP can be used as a scratch register in certain
359  // cases, but probably not when we have a frame pointer.
360  if (HasFP && FramePtr == Reg)
361  continue;
362 
363  unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
364  MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset));
365  }
366 }
367 
368 /// usesTheStack - This function checks if any of the users of EFLAGS
369 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
370 /// to use the stack, and if we don't adjust the stack we clobber the first
371 /// frame index.
372 /// See X86InstrInfo::copyPhysReg.
373 static bool usesTheStack(const MachineFunction &MF) {
374  const MachineRegisterInfo &MRI = MF.getRegInfo();
375 
376  for (MachineRegisterInfo::reg_iterator ri = MRI.reg_begin(X86::EFLAGS),
377  re = MRI.reg_end(); ri != re; ++ri)
378  if (ri->isCopy())
379  return true;
380 
381  return false;
382 }
383 
384 /// emitPrologue - Push callee-saved registers onto the stack, which
385 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
386 /// space for local variables. Also emit labels used by the exception handler to
387 /// generate the exception handling frames.
389  MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
390  MachineBasicBlock::iterator MBBI = MBB.begin();
391  MachineFrameInfo *MFI = MF.getFrameInfo();
392  const Function *Fn = MF.getFunction();
393  const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
394  const X86InstrInfo &TII = *TM.getInstrInfo();
395  MachineModuleInfo &MMI = MF.getMMI();
397  bool needsFrameMoves = MMI.hasDebugInfo() ||
398  Fn->needsUnwindTableEntry();
399  uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
400  uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
401  bool HasFP = hasFP(MF);
402  bool Is64Bit = STI.is64Bit();
403  bool IsLP64 = STI.isTarget64BitLP64();
404  bool IsWin64 = STI.isTargetWin64();
405  bool UseLEA = STI.useLeaForSP();
406  unsigned StackAlign = getStackAlignment();
407  unsigned SlotSize = RegInfo->getSlotSize();
408  unsigned FramePtr = RegInfo->getFrameRegister(MF);
409  unsigned StackPtr = RegInfo->getStackRegister();
410  unsigned BasePtr = RegInfo->getBaseRegister();
411  DebugLoc DL;
412 
413  // If we're forcing a stack realignment we can't rely on just the frame
414  // info, we need to know the ABI stack alignment as well in case we
415  // have a call out. Otherwise just make sure we have some alignment - we'll
416  // go with the minimum SlotSize.
417  if (ForceStackAlign) {
418  if (MFI->hasCalls())
419  MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
420  else if (MaxAlign < SlotSize)
421  MaxAlign = SlotSize;
422  }
423 
424  // Add RETADDR move area to callee saved frame size.
425  int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
426  if (TailCallReturnAddrDelta < 0)
428  X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
429 
430  // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
431  // function, and use up to 128 bytes of stack space, don't have a frame
432  // pointer, calls, or dynamic alloca then we do not need to adjust the
433  // stack pointer (we fit in the Red Zone). We also check that we don't
434  // push and pop from the stack.
435  if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
437  !RegInfo->needsStackRealignment(MF) &&
438  !MFI->hasVarSizedObjects() && // No dynamic alloca.
439  !MFI->adjustsStack() && // No calls.
440  !IsWin64 && // Win64 has no Red Zone
441  !usesTheStack(MF) && // Don't push and pop.
442  !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack
443  uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
444  if (HasFP) MinSize += SlotSize;
445  StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
446  MFI->setStackSize(StackSize);
447  }
448 
449  // Insert stack pointer adjustment for later moving of return addr. Only
450  // applies to tail call optimized functions where the callee argument stack
451  // size is bigger than the callers.
452  if (TailCallReturnAddrDelta < 0) {
453  MachineInstr *MI =
454  BuildMI(MBB, MBBI, DL,
455  TII.get(getSUBriOpcode(IsLP64, -TailCallReturnAddrDelta)),
456  StackPtr)
457  .addReg(StackPtr)
458  .addImm(-TailCallReturnAddrDelta)
460  MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
461  }
462 
463  // Mapping for machine moves:
464  //
465  // DST: VirtualFP AND
466  // SRC: VirtualFP => DW_CFA_def_cfa_offset
467  // ELSE => DW_CFA_def_cfa
468  //
469  // SRC: VirtualFP AND
470  // DST: Register => DW_CFA_def_cfa_register
471  //
472  // ELSE
473  // OFFSET < 0 => DW_CFA_offset_extended_sf
474  // REG < 64 => DW_CFA_offset + Reg
475  // ELSE => DW_CFA_offset_extended
476 
477  uint64_t NumBytes = 0;
478  int stackGrowth = -SlotSize;
479 
480  if (HasFP) {
481  // Calculate required stack adjustment.
482  uint64_t FrameSize = StackSize - SlotSize;
483  if (RegInfo->needsStackRealignment(MF)) {
484  // Callee-saved registers are pushed on stack before the stack
485  // is realigned.
486  FrameSize -= X86FI->getCalleeSavedFrameSize();
487  NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
488  } else {
489  NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
490  }
491 
492  // Get the offset of the stack slot for the EBP register, which is
493  // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
494  // Update the frame offset adjustment.
495  MFI->setOffsetAdjustment(-NumBytes);
496 
497  // Save EBP/RBP into the appropriate stack slot.
498  BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
499  .addReg(FramePtr, RegState::Kill)
501 
502  if (needsFrameMoves) {
503  // Mark the place where EBP/RBP was saved.
504  MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
505  BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
506  .addSym(FrameLabel);
507 
508  // Define the current CFA rule to use the provided offset.
509  assert(StackSize);
510  MMI.addFrameInst(
511  MCCFIInstruction::createDefCfaOffset(FrameLabel, 2 * stackGrowth));
512 
513  // Change the rule for the FramePtr to be an "offset" rule.
514  unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
515  MMI.addFrameInst(MCCFIInstruction::createOffset(FrameLabel, DwarfFramePtr,
516  2 * stackGrowth));
517  }
518 
519  // Update EBP with the new base value.
520  BuildMI(MBB, MBBI, DL,
521  TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
522  .addReg(StackPtr)
524 
525  if (needsFrameMoves) {
526  // Mark effective beginning of when frame pointer becomes valid.
527  MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
528  BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
529  .addSym(FrameLabel);
530 
531  // Define the current CFA to use the EBP/RBP register.
532  unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
533  MMI.addFrameInst(
534  MCCFIInstruction::createDefCfaRegister(FrameLabel, DwarfFramePtr));
535  }
536 
537  // Mark the FramePtr as live-in in every block except the entry.
538  for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
539  I != E; ++I)
540  I->addLiveIn(FramePtr);
541  } else {
542  NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
543  }
544 
545  // Skip the callee-saved push instructions.
546  bool PushedRegs = false;
547  int StackOffset = 2 * stackGrowth;
548 
549  while (MBBI != MBB.end() &&
550  (MBBI->getOpcode() == X86::PUSH32r ||
551  MBBI->getOpcode() == X86::PUSH64r)) {
552  PushedRegs = true;
553  MBBI->setFlag(MachineInstr::FrameSetup);
554  ++MBBI;
555 
556  if (!HasFP && needsFrameMoves) {
557  // Mark callee-saved push instruction.
558  MCSymbol *Label = MMI.getContext().CreateTempSymbol();
559  BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
560 
561  // Define the current CFA rule to use the provided offset.
562  assert(StackSize);
563  MMI.addFrameInst(
564  MCCFIInstruction::createDefCfaOffset(Label, StackOffset));
565  StackOffset += stackGrowth;
566  }
567  }
568 
569  // Realign stack after we pushed callee-saved registers (so that we'll be
570  // able to calculate their offsets from the frame pointer).
571 
572  // NOTE: We push the registers before realigning the stack, so
573  // vector callee-saved (xmm) registers may be saved w/o proper
574  // alignment in this way. However, currently these regs are saved in
575  // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
576  // this shouldn't be a problem.
577  if (RegInfo->needsStackRealignment(MF)) {
578  assert(HasFP && "There should be a frame pointer if stack is realigned.");
579  MachineInstr *MI =
580  BuildMI(MBB, MBBI, DL,
581  TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
582  .addReg(StackPtr)
583  .addImm(-MaxAlign)
585 
586  // The EFLAGS implicit def is dead.
587  MI->getOperand(3).setIsDead();
588  }
589 
590  // If there is an SUB32ri of ESP immediately before this instruction, merge
591  // the two. This can be the case when tail call elimination is enabled and
592  // the callee has more arguments then the caller.
593  NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
594 
595  // If there is an ADD32ri or SUB32ri of ESP immediately after this
596  // instruction, merge the two instructions.
597  mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
598 
599  // Adjust stack pointer: ESP -= numbytes.
600 
601  // Windows and cygwin/mingw require a prologue helper routine when allocating
602  // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
603  // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
604  // stack and adjust the stack pointer in one go. The 64-bit version of
605  // __chkstk is only responsible for probing the stack. The 64-bit prologue is
606  // responsible for adjusting the stack pointer. Touching the stack at 4K
607  // increments is necessary to ensure that the guard pages used by the OS
608  // virtual memory manager are allocated in correct sequence.
609  if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetEnvMacho()) {
610  const char *StackProbeSymbol;
611  bool isSPUpdateNeeded = false;
612 
613  if (Is64Bit) {
614  if (STI.isTargetCygMing())
615  StackProbeSymbol = "___chkstk";
616  else {
617  StackProbeSymbol = "__chkstk";
618  isSPUpdateNeeded = true;
619  }
620  } else if (STI.isTargetCygMing())
621  StackProbeSymbol = "_alloca";
622  else
623  StackProbeSymbol = "_chkstk";
624 
625  // Check whether EAX is livein for this function.
626  bool isEAXAlive = isEAXLiveIn(MF);
627 
628  if (isEAXAlive) {
629  // Sanity check that EAX is not livein for this function.
630  // It should not be, so throw an assert.
631  assert(!Is64Bit && "EAX is livein in x64 case!");
632 
633  // Save EAX
634  BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
635  .addReg(X86::EAX, RegState::Kill)
637  }
638 
639  if (Is64Bit) {
640  // Handle the 64-bit Windows ABI case where we need to call __chkstk.
641  // Function prologue is responsible for adjusting the stack pointer.
642  BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
643  .addImm(NumBytes)
645  } else {
646  // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
647  // We'll also use 4 already allocated bytes for EAX.
648  BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
649  .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
651  }
652 
653  BuildMI(MBB, MBBI, DL,
654  TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
655  .addExternalSymbol(StackProbeSymbol)
659 
660  // MSVC x64's __chkstk does not adjust %rsp itself.
661  // It also does not clobber %rax so we can reuse it when adjusting %rsp.
662  if (isSPUpdateNeeded) {
663  BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
664  .addReg(StackPtr)
665  .addReg(X86::RAX)
667  }
668 
669  if (isEAXAlive) {
670  // Restore EAX
671  MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
672  X86::EAX),
673  StackPtr, false, NumBytes - 4);
675  MBB.insert(MBBI, MI);
676  }
677  } else if (NumBytes)
678  emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
679  UseLEA, TII, *RegInfo);
680 
681  // If we need a base pointer, set it up here. It's whatever the value
682  // of the stack pointer is at this point. Any variable size objects
683  // will be allocated after this, so we can still use the base pointer
684  // to reference locals.
685  if (RegInfo->hasBasePointer(MF)) {
686  // Update the frame pointer with the current stack pointer.
687  unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
688  BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
689  .addReg(StackPtr)
691  }
692 
693  if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
694  // Mark end of stack pointer adjustment.
695  MCSymbol *Label = MMI.getContext().CreateTempSymbol();
696  BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
697  .addSym(Label);
698 
699  if (!HasFP && NumBytes) {
700  // Define the current CFA rule to use the provided offset.
701  assert(StackSize);
703  Label, -StackSize + stackGrowth));
704  }
705 
706  // Emit DWARF info specifying the offsets of the callee-saved registers.
707  if (PushedRegs)
708  emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
709  }
710 }
711 
713  MachineBasicBlock &MBB) const {
714  const MachineFrameInfo *MFI = MF.getFrameInfo();
716  const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
717  const X86InstrInfo &TII = *TM.getInstrInfo();
719  assert(MBBI != MBB.end() && "Returning block has no instructions");
720  unsigned RetOpcode = MBBI->getOpcode();
721  DebugLoc DL = MBBI->getDebugLoc();
722  bool Is64Bit = STI.is64Bit();
723  bool IsLP64 = STI.isTarget64BitLP64();
724  bool UseLEA = STI.useLeaForSP();
725  unsigned StackAlign = getStackAlignment();
726  unsigned SlotSize = RegInfo->getSlotSize();
727  unsigned FramePtr = RegInfo->getFrameRegister(MF);
728  unsigned StackPtr = RegInfo->getStackRegister();
729 
730  switch (RetOpcode) {
731  default:
732  llvm_unreachable("Can only insert epilog into returning blocks");
733  case X86::RET:
734  case X86::RETI:
735  case X86::TCRETURNdi:
736  case X86::TCRETURNri:
737  case X86::TCRETURNmi:
738  case X86::TCRETURNdi64:
739  case X86::TCRETURNri64:
740  case X86::TCRETURNmi64:
741  case X86::EH_RETURN:
742  case X86::EH_RETURN64:
743  break; // These are ok
744  }
745 
746  // Get the number of bytes to allocate from the FrameInfo.
747  uint64_t StackSize = MFI->getStackSize();
748  uint64_t MaxAlign = MFI->getMaxAlignment();
749  unsigned CSSize = X86FI->getCalleeSavedFrameSize();
750  uint64_t NumBytes = 0;
751 
752  // If we're forcing a stack realignment we can't rely on just the frame
753  // info, we need to know the ABI stack alignment as well in case we
754  // have a call out. Otherwise just make sure we have some alignment - we'll
755  // go with the minimum.
756  if (ForceStackAlign) {
757  if (MFI->hasCalls())
758  MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
759  else
760  MaxAlign = MaxAlign ? MaxAlign : 4;
761  }
762 
763  if (hasFP(MF)) {
764  // Calculate required stack adjustment.
765  uint64_t FrameSize = StackSize - SlotSize;
766  if (RegInfo->needsStackRealignment(MF)) {
767  // Callee-saved registers were pushed on stack before the stack
768  // was realigned.
769  FrameSize -= CSSize;
770  NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
771  } else {
772  NumBytes = FrameSize - CSSize;
773  }
774 
775  // Pop EBP.
776  BuildMI(MBB, MBBI, DL,
777  TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
778  } else {
779  NumBytes = StackSize - CSSize;
780  }
781 
782  // Skip the callee-saved pop instructions.
783  while (MBBI != MBB.begin()) {
785  unsigned Opc = PI->getOpcode();
786 
787  if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
788  !PI->isTerminator())
789  break;
790 
791  --MBBI;
792  }
793  MachineBasicBlock::iterator FirstCSPop = MBBI;
794 
795  DL = MBBI->getDebugLoc();
796 
797  // If there is an ADD32ri or SUB32ri of ESP immediately before this
798  // instruction, merge the two instructions.
799  if (NumBytes || MFI->hasVarSizedObjects())
800  mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
801 
802  // If dynamic alloca is used, then reset esp to point to the last callee-saved
803  // slot before popping them off! Same applies for the case, when stack was
804  // realigned.
805  if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
806  if (RegInfo->needsStackRealignment(MF))
807  MBBI = FirstCSPop;
808  if (CSSize != 0) {
809  unsigned Opc = getLEArOpcode(IsLP64);
810  addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
811  FramePtr, false, -CSSize);
812  } else {
813  unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
814  BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
815  .addReg(FramePtr);
816  }
817  } else if (NumBytes) {
818  // Adjust stack pointer back: ESP += numbytes.
819  emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, IsLP64, UseLEA,
820  TII, *RegInfo);
821  }
822 
823  // We're returning from function via eh_return.
824  if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
825  MBBI = MBB.getLastNonDebugInstr();
826  MachineOperand &DestAddr = MBBI->getOperand(0);
827  assert(DestAddr.isReg() && "Offset should be in register!");
828  BuildMI(MBB, MBBI, DL,
829  TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
830  StackPtr).addReg(DestAddr.getReg());
831  } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
832  RetOpcode == X86::TCRETURNmi ||
833  RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
834  RetOpcode == X86::TCRETURNmi64) {
835  bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
836  // Tail call return: adjust the stack pointer and jump to callee.
837  MBBI = MBB.getLastNonDebugInstr();
838  MachineOperand &JumpTarget = MBBI->getOperand(0);
839  MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
840  assert(StackAdjust.isImm() && "Expecting immediate value.");
841 
842  // Adjust stack pointer.
843  int StackAdj = StackAdjust.getImm();
844  int MaxTCDelta = X86FI->getTCReturnAddrDelta();
845  int Offset = 0;
846  assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
847 
848  // Incoporate the retaddr area.
849  Offset = StackAdj-MaxTCDelta;
850  assert(Offset >= 0 && "Offset should never be negative");
851 
852  if (Offset) {
853  // Check for possible merge with preceding ADD instruction.
854  Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
855  emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, IsLP64,
856  UseLEA, TII, *RegInfo);
857  }
858 
859  // Jump to label or value in register.
860  if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
861  MachineInstrBuilder MIB =
862  BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
863  ? X86::TAILJMPd : X86::TAILJMPd64));
864  if (JumpTarget.isGlobal())
865  MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
866  JumpTarget.getTargetFlags());
867  else {
868  assert(JumpTarget.isSymbol());
869  MIB.addExternalSymbol(JumpTarget.getSymbolName(),
870  JumpTarget.getTargetFlags());
871  }
872  } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
873  MachineInstrBuilder MIB =
874  BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
875  ? X86::TAILJMPm : X86::TAILJMPm64));
876  for (unsigned i = 0; i != 5; ++i)
877  MIB.addOperand(MBBI->getOperand(i));
878  } else if (RetOpcode == X86::TCRETURNri64) {
879  BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
880  addReg(JumpTarget.getReg(), RegState::Kill);
881  } else {
882  BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
883  addReg(JumpTarget.getReg(), RegState::Kill);
884  }
885 
886  MachineInstr *NewMI = prior(MBBI);
887  NewMI->copyImplicitOps(MF, MBBI);
888 
889  // Delete the pseudo instruction TCRETURN.
890  MBB.erase(MBBI);
891  } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
892  (X86FI->getTCReturnAddrDelta() < 0)) {
893  // Add the return addr area delta back since we are not tail calling.
894  int delta = -1*X86FI->getTCReturnAddrDelta();
895  MBBI = MBB.getLastNonDebugInstr();
896 
897  // Check for possible merge with preceding ADD instruction.
898  delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
899  emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, IsLP64, UseLEA, TII,
900  *RegInfo);
901  }
902 }
903 
905  const X86RegisterInfo *RegInfo =
906  static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
907  const MachineFrameInfo *MFI = MF.getFrameInfo();
908  int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
909  uint64_t StackSize = MFI->getStackSize();
910 
911  if (RegInfo->hasBasePointer(MF)) {
912  assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
913  if (FI < 0) {
914  // Skip the saved EBP.
915  return Offset + RegInfo->getSlotSize();
916  } else {
917  assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
918  return Offset + StackSize;
919  }
920  } else if (RegInfo->needsStackRealignment(MF)) {
921  if (FI < 0) {
922  // Skip the saved EBP.
923  return Offset + RegInfo->getSlotSize();
924  } else {
925  assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
926  return Offset + StackSize;
927  }
928  // FIXME: Support tail calls
929  } else {
930  if (!hasFP(MF))
931  return Offset + StackSize;
932 
933  // Skip the saved EBP.
934  Offset += RegInfo->getSlotSize();
935 
936  // Skip the RETADDR move area
938  int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
939  if (TailCallReturnAddrDelta < 0)
940  Offset -= TailCallReturnAddrDelta;
941  }
942 
943  return Offset;
944 }
945 
947  unsigned &FrameReg) const {
948  const X86RegisterInfo *RegInfo =
949  static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
950  // We can't calculate offset from frame pointer if the stack is realigned,
951  // so enforce usage of stack/base pointer. The base pointer is used when we
952  // have dynamic allocas in addition to dynamic realignment.
953  if (RegInfo->hasBasePointer(MF))
954  FrameReg = RegInfo->getBaseRegister();
955  else if (RegInfo->needsStackRealignment(MF))
956  FrameReg = RegInfo->getStackRegister();
957  else
958  FrameReg = RegInfo->getFrameRegister(MF);
959  return getFrameIndexOffset(MF, FI);
960 }
961 
964  const std::vector<CalleeSavedInfo> &CSI,
965  const TargetRegisterInfo *TRI) const {
966  if (CSI.empty())
967  return false;
968 
969  DebugLoc DL = MBB.findDebugLoc(MI);
970 
971  MachineFunction &MF = *MBB.getParent();
972 
973  unsigned SlotSize = STI.is64Bit() ? 8 : 4;
974  unsigned FPReg = TRI->getFrameRegister(MF);
975  unsigned CalleeFrameSize = 0;
976 
977  const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
979 
980  // Push GPRs. It increases frame size.
981  unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
982  for (unsigned i = CSI.size(); i != 0; --i) {
983  unsigned Reg = CSI[i-1].getReg();
984  if (!X86::GR64RegClass.contains(Reg) &&
985  !X86::GR32RegClass.contains(Reg))
986  continue;
987  // Add the callee-saved register as live-in. It's killed at the spill.
988  MBB.addLiveIn(Reg);
989  if (Reg == FPReg)
990  // X86RegisterInfo::emitPrologue will handle spilling of frame register.
991  continue;
992  CalleeFrameSize += SlotSize;
993  BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
995  }
996 
997  X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
998 
999  // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1000  // It can be done by spilling XMMs to stack frame.
1001  // Note that only Win64 ABI might spill XMMs.
1002  for (unsigned i = CSI.size(); i != 0; --i) {
1003  unsigned Reg = CSI[i-1].getReg();
1004  if (X86::GR64RegClass.contains(Reg) ||
1005  X86::GR32RegClass.contains(Reg))
1006  continue;
1007  // Add the callee-saved register as live-in. It's killed at the spill.
1008  MBB.addLiveIn(Reg);
1009  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1010  TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
1011  RC, TRI);
1012  }
1013 
1014  return true;
1015 }
1016 
1019  const std::vector<CalleeSavedInfo> &CSI,
1020  const TargetRegisterInfo *TRI) const {
1021  if (CSI.empty())
1022  return false;
1023 
1024  DebugLoc DL = MBB.findDebugLoc(MI);
1025 
1026  MachineFunction &MF = *MBB.getParent();
1027  const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1028 
1029  // Reload XMMs from stack frame.
1030  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1031  unsigned Reg = CSI[i].getReg();
1032  if (X86::GR64RegClass.contains(Reg) ||
1033  X86::GR32RegClass.contains(Reg))
1034  continue;
1035  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1036  TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
1037  RC, TRI);
1038  }
1039 
1040  // POP GPRs.
1041  unsigned FPReg = TRI->getFrameRegister(MF);
1042  unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1043  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1044  unsigned Reg = CSI[i].getReg();
1045  if (!X86::GR64RegClass.contains(Reg) &&
1046  !X86::GR32RegClass.contains(Reg))
1047  continue;
1048  if (Reg == FPReg)
1049  // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
1050  continue;
1051  BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1052  }
1053  return true;
1054 }
1055 
1056 void
1058  RegScavenger *RS) const {
1059  MachineFrameInfo *MFI = MF.getFrameInfo();
1060  const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
1061  unsigned SlotSize = RegInfo->getSlotSize();
1062 
1064  int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1065 
1066  if (TailCallReturnAddrDelta < 0) {
1067  // create RETURNADDR area
1068  // arg
1069  // arg
1070  // RETADDR
1071  // { ...
1072  // RETADDR area
1073  // ...
1074  // }
1075  // [EBP]
1076  MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1077  TailCallReturnAddrDelta - SlotSize, true);
1078  }
1079 
1080  if (hasFP(MF)) {
1081  assert((TailCallReturnAddrDelta <= 0) &&
1082  "The Delta should always be zero or negative");
1083  const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
1084 
1085  // Create a frame entry for the EBP register that must be saved.
1086  int FrameIdx = MFI->CreateFixedObject(SlotSize,
1087  -(int)SlotSize +
1088  TFI.getOffsetOfLocalArea() +
1089  TailCallReturnAddrDelta,
1090  true);
1091  assert(FrameIdx == MFI->getObjectIndexBegin() &&
1092  "Slot for EBP register must be last in order to be found!");
1093  (void)FrameIdx;
1094  }
1095 
1096  // Spill the BasePtr if it's used.
1097  if (RegInfo->hasBasePointer(MF))
1098  MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
1099 }
1100 
1101 static bool
1103  const Function *F = MF->getFunction();
1104  for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1105  I != E; I++) {
1106  if (I->hasNestAttr())
1107  return true;
1108  }
1109  return false;
1110 }
1111 
1112 /// GetScratchRegister - Get a temp register for performing work in the
1113 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1114 /// and the properties of the function either one or two registers will be
1115 /// needed. Set primary to true for the first register, false for the second.
1116 static unsigned
1117 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1119 
1120  // Erlang stuff.
1121  if (CallingConvention == CallingConv::HiPE) {
1122  if (Is64Bit)
1123  return Primary ? X86::R14 : X86::R13;
1124  else
1125  return Primary ? X86::EBX : X86::EDI;
1126  }
1127 
1128  if (Is64Bit)
1129  return Primary ? X86::R11 : X86::R12;
1130 
1131  bool IsNested = HasNestArgument(&MF);
1132 
1133  if (CallingConvention == CallingConv::X86_FastCall ||
1134  CallingConvention == CallingConv::Fast) {
1135  if (IsNested)
1136  report_fatal_error("Segmented stacks does not support fastcall with "
1137  "nested function.");
1138  return Primary ? X86::EAX : X86::ECX;
1139  }
1140  if (IsNested)
1141  return Primary ? X86::EDX : X86::EAX;
1142  return Primary ? X86::ECX : X86::EAX;
1143 }
1144 
1145 // The stack limit in the TCB is set to this many bytes above the actual stack
1146 // limit.
1147 static const uint64_t kSplitStackAvailable = 256;
1148 
1149 void
1151  MachineBasicBlock &prologueMBB = MF.front();
1152  MachineFrameInfo *MFI = MF.getFrameInfo();
1153  const X86InstrInfo &TII = *TM.getInstrInfo();
1154  uint64_t StackSize;
1155  bool Is64Bit = STI.is64Bit();
1156  unsigned TlsReg, TlsOffset;
1157  DebugLoc DL;
1158 
1159  unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1160  assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1161  "Scratch register is live-in");
1162 
1163  if (MF.getFunction()->isVarArg())
1164  report_fatal_error("Segmented stacks do not support vararg functions.");
1165  if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
1166  !STI.isTargetWin32() && !STI.isTargetFreeBSD())
1167  report_fatal_error("Segmented stacks not supported on this platform.");
1168 
1169  MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1170  MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1172  bool IsNested = false;
1173 
1174  // We need to know if the function has a nest argument only in 64 bit mode.
1175  if (Is64Bit)
1176  IsNested = HasNestArgument(&MF);
1177 
1178  // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1179  // allocMBB needs to be last (terminating) instruction.
1180 
1181  for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1182  e = prologueMBB.livein_end(); i != e; i++) {
1183  allocMBB->addLiveIn(*i);
1184  checkMBB->addLiveIn(*i);
1185  }
1186 
1187  if (IsNested)
1188  allocMBB->addLiveIn(X86::R10);
1189 
1190  MF.push_front(allocMBB);
1191  MF.push_front(checkMBB);
1192 
1193  // Eventually StackSize will be calculated by a link-time pass; which will
1194  // also decide whether checking code needs to be injected into this particular
1195  // prologue.
1196  StackSize = MFI->getStackSize();
1197 
1198  // When the frame size is less than 256 we just compare the stack
1199  // boundary directly to the value of the stack pointer, per gcc.
1200  bool CompareStackPointer = StackSize < kSplitStackAvailable;
1201 
1202  // Read the limit off the current stacklet off the stack_guard location.
1203  if (Is64Bit) {
1204  if (STI.isTargetLinux()) {
1205  TlsReg = X86::FS;
1206  TlsOffset = 0x70;
1207  } else if (STI.isTargetDarwin()) {
1208  TlsReg = X86::GS;
1209  TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1210  } else if (STI.isTargetFreeBSD()) {
1211  TlsReg = X86::FS;
1212  TlsOffset = 0x18;
1213  } else {
1214  report_fatal_error("Segmented stacks not supported on this platform.");
1215  }
1216 
1217  if (CompareStackPointer)
1218  ScratchReg = X86::RSP;
1219  else
1220  BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1221  .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1222 
1223  BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1224  .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1225  } else {
1226  if (STI.isTargetLinux()) {
1227  TlsReg = X86::GS;
1228  TlsOffset = 0x30;
1229  } else if (STI.isTargetDarwin()) {
1230  TlsReg = X86::GS;
1231  TlsOffset = 0x48 + 90*4;
1232  } else if (STI.isTargetWin32()) {
1233  TlsReg = X86::FS;
1234  TlsOffset = 0x14; // pvArbitrary, reserved for application use
1235  } else if (STI.isTargetFreeBSD()) {
1236  report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1237  } else {
1238  report_fatal_error("Segmented stacks not supported on this platform.");
1239  }
1240 
1241  if (CompareStackPointer)
1242  ScratchReg = X86::ESP;
1243  else
1244  BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1245  .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1246 
1247  if (STI.isTargetLinux() || STI.isTargetWin32()) {
1248  BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1249  .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1250  } else if (STI.isTargetDarwin()) {
1251 
1252  // TlsOffset doesn't fit into a mod r/m byte so we need an extra register
1253  unsigned ScratchReg2;
1254  bool SaveScratch2;
1255  if (CompareStackPointer) {
1256  // The primary scratch register is available for holding the TLS offset
1257  ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1258  SaveScratch2 = false;
1259  } else {
1260  // Need to use a second register to hold the TLS offset
1261  ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1262 
1263  // Unfortunately, with fastcc the second scratch register may hold an arg
1264  SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1265  }
1266 
1267  // If Scratch2 is live-in then it needs to be saved
1268  assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1269  "Scratch register is live-in and not saved");
1270 
1271  if (SaveScratch2)
1272  BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1273  .addReg(ScratchReg2, RegState::Kill);
1274 
1275  BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1276  .addImm(TlsOffset);
1277  BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1278  .addReg(ScratchReg)
1279  .addReg(ScratchReg2).addImm(1).addReg(0)
1280  .addImm(0)
1281  .addReg(TlsReg);
1282 
1283  if (SaveScratch2)
1284  BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1285  }
1286  }
1287 
1288  // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1289  // It jumps to normal execution of the function body.
1290  BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1291 
1292  // On 32 bit we first push the arguments size and then the frame size. On 64
1293  // bit, we pass the stack frame size in r10 and the argument size in r11.
1294  if (Is64Bit) {
1295  // Functions with nested arguments use R10, so it needs to be saved across
1296  // the call to _morestack
1297 
1298  if (IsNested)
1299  BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1300 
1301  BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1302  .addImm(StackSize);
1303  BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1304  .addImm(X86FI->getArgumentStackSize());
1305  MF.getRegInfo().setPhysRegUsed(X86::R10);
1306  MF.getRegInfo().setPhysRegUsed(X86::R11);
1307  } else {
1308  BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1309  .addImm(X86FI->getArgumentStackSize());
1310  BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1311  .addImm(StackSize);
1312  }
1313 
1314  // __morestack is in libgcc
1315  if (Is64Bit)
1316  BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1317  .addExternalSymbol("__morestack");
1318  else
1319  BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1320  .addExternalSymbol("__morestack");
1321 
1322  if (IsNested)
1323  BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1324  else
1325  BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1326 
1327  allocMBB->addSuccessor(&prologueMBB);
1328 
1329  checkMBB->addSuccessor(allocMBB);
1330  checkMBB->addSuccessor(&prologueMBB);
1331 
1332 #ifdef XDEBUG
1333  MF.verify();
1334 #endif
1335 }
1336 
1337 /// Erlang programs may need a special prologue to handle the stack size they
1338 /// might need at runtime. That is because Erlang/OTP does not implement a C
1339 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1340 /// (for more information see Eric Stenman's Ph.D. thesis:
1341 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1342 ///
1343 /// CheckStack:
1344 /// temp0 = sp - MaxStack
1345 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1346 /// OldStart:
1347 /// ...
1348 /// IncStack:
1349 /// call inc_stack # doubles the stack space
1350 /// temp0 = sp - MaxStack
1351 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1353  const X86InstrInfo &TII = *TM.getInstrInfo();
1354  MachineFrameInfo *MFI = MF.getFrameInfo();
1355  const unsigned SlotSize = TM.getRegisterInfo()->getSlotSize();
1356  const bool Is64Bit = STI.is64Bit();
1357  DebugLoc DL;
1358  // HiPE-specific values
1359  const unsigned HipeLeafWords = 24;
1360  const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1361  const unsigned Guaranteed = HipeLeafWords * SlotSize;
1362  unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1363  MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1364  unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1365 
1366  assert(STI.isTargetLinux() &&
1367  "HiPE prologue is only supported on Linux operating systems.");
1368 
1369  // Compute the largest caller's frame that is needed to fit the callees'
1370  // frames. This 'MaxStack' is computed from:
1371  //
1372  // a) the fixed frame size, which is the space needed for all spilled temps,
1373  // b) outgoing on-stack parameter areas, and
1374  // c) the minimum stack space this function needs to make available for the
1375  // functions it calls (a tunable ABI property).
1376  if (MFI->hasCalls()) {
1377  unsigned MoreStackForCalls = 0;
1378 
1379  for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1380  MBBI != MBBE; ++MBBI)
1381  for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1382  MI != ME; ++MI) {
1383  if (!MI->isCall())
1384  continue;
1385 
1386  // Get callee operand.
1387  const MachineOperand &MO = MI->getOperand(0);
1388 
1389  // Only take account of global function calls (no closures etc.).
1390  if (!MO.isGlobal())
1391  continue;
1392 
1393  const Function *F = dyn_cast<Function>(MO.getGlobal());
1394  if (!F)
1395  continue;
1396 
1397  // Do not update 'MaxStack' for primitive and built-in functions
1398  // (encoded with names either starting with "erlang."/"bif_" or not
1399  // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1400  // "_", such as the BIF "suspend_0") as they are executed on another
1401  // stack.
1402  if (F->getName().find("erlang.") != StringRef::npos ||
1403  F->getName().find("bif_") != StringRef::npos ||
1404  F->getName().find_first_of("._") == StringRef::npos)
1405  continue;
1406 
1407  unsigned CalleeStkArity =
1408  F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1409  if (HipeLeafWords - 1 > CalleeStkArity)
1410  MoreStackForCalls = std::max(MoreStackForCalls,
1411  (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1412  }
1413  MaxStack += MoreStackForCalls;
1414  }
1415 
1416  // If the stack frame needed is larger than the guaranteed then runtime checks
1417  // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1418  if (MaxStack > Guaranteed) {
1419  MachineBasicBlock &prologueMBB = MF.front();
1420  MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1421  MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1422 
1423  for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
1424  E = prologueMBB.livein_end(); I != E; I++) {
1425  stackCheckMBB->addLiveIn(*I);
1426  incStackMBB->addLiveIn(*I);
1427  }
1428 
1429  MF.push_front(incStackMBB);
1430  MF.push_front(stackCheckMBB);
1431 
1432  unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1433  unsigned LEAop, CMPop, CALLop;
1434  if (Is64Bit) {
1435  SPReg = X86::RSP;
1436  PReg = X86::RBP;
1437  LEAop = X86::LEA64r;
1438  CMPop = X86::CMP64rm;
1439  CALLop = X86::CALL64pcrel32;
1440  SPLimitOffset = 0x90;
1441  } else {
1442  SPReg = X86::ESP;
1443  PReg = X86::EBP;
1444  LEAop = X86::LEA32r;
1445  CMPop = X86::CMP32rm;
1446  CALLop = X86::CALLpcrel32;
1447  SPLimitOffset = 0x4c;
1448  }
1449 
1450  ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1451  assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1452  "HiPE prologue scratch register is live-in");
1453 
1454  // Create new MBB for StackCheck:
1455  addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1456  SPReg, false, -MaxStack);
1457  // SPLimitOffset is in a fixed heap location (pointed by BP).
1458  addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1459  .addReg(ScratchReg), PReg, false, SPLimitOffset);
1460  BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
1461 
1462  // Create new MBB for IncStack:
1463  BuildMI(incStackMBB, DL, TII.get(CALLop)).
1464  addExternalSymbol("inc_stack_0");
1465  addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1466  SPReg, false, -MaxStack);
1467  addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1468  .addReg(ScratchReg), PReg, false, SPLimitOffset);
1469  BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
1470 
1471  stackCheckMBB->addSuccessor(&prologueMBB, 99);
1472  stackCheckMBB->addSuccessor(incStackMBB, 1);
1473  incStackMBB->addSuccessor(&prologueMBB, 99);
1474  incStackMBB->addSuccessor(incStackMBB, 1);
1475  }
1476 #ifdef XDEBUG
1477  MF.verify();
1478 #endif
1479 }
1480 
1481 void X86FrameLowering::
1484  const X86InstrInfo &TII = *TM.getInstrInfo();
1485  const X86RegisterInfo &RegInfo = *TM.getRegisterInfo();
1486  unsigned StackPtr = RegInfo.getStackRegister();
1487  bool reseveCallFrame = hasReservedCallFrame(MF);
1488  int Opcode = I->getOpcode();
1489  bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
1490  bool IsLP64 = STI.isTarget64BitLP64();
1491  DebugLoc DL = I->getDebugLoc();
1492  uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
1493  uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
1494  I = MBB.erase(I);
1495 
1496  if (!reseveCallFrame) {
1497  // If the stack pointer can be changed after prologue, turn the
1498  // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1499  // adjcallstackdown instruction into 'add ESP, <amt>'
1500  // TODO: consider using push / pop instead of sub + store / add
1501  if (Amount == 0)
1502  return;
1503 
1504  // We need to keep the stack aligned properly. To do this, we round the
1505  // amount of space needed for the outgoing arguments up to the next
1506  // alignment boundary.
1507  unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
1508  Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
1509 
1510  MachineInstr *New = 0;
1511  if (Opcode == TII.getCallFrameSetupOpcode()) {
1512  New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
1513  StackPtr)
1514  .addReg(StackPtr)
1515  .addImm(Amount);
1516  } else {
1517  assert(Opcode == TII.getCallFrameDestroyOpcode());
1518 
1519  // Factor out the amount the callee already popped.
1520  Amount -= CalleeAmt;
1521 
1522  if (Amount) {
1523  unsigned Opc = getADDriOpcode(IsLP64, Amount);
1524  New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1525  .addReg(StackPtr).addImm(Amount);
1526  }
1527  }
1528 
1529  if (New) {
1530  // The EFLAGS implicit def is dead.
1531  New->getOperand(3).setIsDead();
1532 
1533  // Replace the pseudo instruction with a new instruction.
1534  MBB.insert(I, New);
1535  }
1536 
1537  return;
1538  }
1539 
1540  if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
1541  // If we are performing frame pointer elimination and if the callee pops
1542  // something off the stack pointer, add it back. We do this until we have
1543  // more advanced stack pointer tracking ability.
1544  unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
1545  MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1546  .addReg(StackPtr).addImm(CalleeAmt);
1547 
1548  // The EFLAGS implicit def is dead.
1549  New->getOperand(3).setIsDead();
1550 
1551  // We are not tracking the stack pointer adjustment by the callee, so make
1552  // sure we restore the stack pointer immediately after the call, there may
1553  // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1555  while (I != B && !llvm::prior(I)->isCall())
1556  --I;
1557  MBB.insert(I, New);
1558  }
1559 }
1560 
void push_front(MachineBasicBlock *MBB)
unsigned getStackAlignment() const
const MachineFunction * getParent() const
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const
instr_iterator erase(instr_iterator I)
void setPhysRegUsed(unsigned Reg)
int getDwarfRegNum(unsigned RegNum, bool isEH) const
Map a target register to an equivalent dwarf register number. Returns -1 if there is no equivalent va...
const GlobalValue * getGlobal() const
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, EVT VT=MVT::Other) const
livein_iterator livein_end() const
static bool isEAXLiveIn(MachineFunction &MF)
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS=NULL) const
bool isTargetCygMing() const
Definition: X86Subtarget.h:329
void verify(Pass *p=NULL, const char *Banner=NULL) const
bool hasBasePointer(const MachineFunction &MF) const
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
size_t find(char C, size_t From=0) const
Definition: StringRef.h:233
std::vector< unsigned >::const_iterator livein_iterator
cl::opt< bool > ForceStackAlign
static unsigned getLEArOpcode(unsigned IsLP64)
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition: MCDwarf.h:348
unsigned MaxOffset
void addLiveIn(unsigned Reg)
void setCalleeSavedFrameSize(unsigned bytes)
void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
arg_iterator arg_end()
Definition: Function.h:418
const char * getSymbolName() const
void setIsDead(bool Val=true)
F(f)
const Function * getFunction() const
static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const TargetRegisterInfo &TRI, bool Is64Bit)
unsigned getFrameRegister(const MachineFunction &MF) const
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Definition: Attributes.cpp:818
static MCCFIInstruction createDefCfaOffset(MCSymbol *L, int Offset)
.cfi_def_cfa_offset modifies a rule for computing CFA. Register remains the same, but offset is new...
Definition: MCDwarf.h:335
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
CallingConv::ID getCallingConv() const
Definition: Function.h:161
size_t arg_size() const
Definition: Function.cpp:248
StringRef getName() const
Definition: Value.cpp:167
unsigned getMaxAlignment() const
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
uint64_t getStackSize() const
livein_iterator livein_begin() const
MCSymbol * CreateTempSymbol()
Definition: MCContext.cpp:165
const HexagonInstrInfo * TII
bool isTargetDarwin() const
Definition: X86Subtarget.h:311
static bool usesTheStack(const MachineFunction &MF)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
bool DisableFramePointerElim(const MachineFunction &MF) const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isInt< 8 >(int64_t x)
Definition: MathExtras.h:268
Abstract Stack Frame Information.
static void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, unsigned StackPtr, uint64_t *NumBytes=NULL)
mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
bool isFrameAddressTaken() const
static const uint64_t kSplitStackAvailable
bool needsStackRealignment(const MachineFunction &MF) const
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
const MachineInstrBuilder & addImm(int64_t Val) const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const MachineBasicBlock & front() const
int getObjectIndexBegin() const
bool isLiveIn(unsigned Reg) const
bool is64Bit() const
Is this x86_64? (disregarding specific ABI / programming model)
Definition: X86Subtarget.h:240
static int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, unsigned StackPtr, bool doMergeWithPrevious)
static void mergeSPUpdatesDown(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, unsigned StackPtr, uint64_t *NumBytes=NULL)
mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
bool insert(const T &V)
Definition: SmallSet.h:59
int64_t getImm() const
void adjustForHiPEPrologue(MachineFunction &MF) const
unsigned getUndefRegState(bool B)
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector< CalleeSavedInfo > &CSI, const TargetRegisterInfo *TRI) const
unsigned getDefRegState(bool B)
bundle_iterator< MachineInstr, instr_iterator > iterator
virtual const X86RegisterInfo * getRegisterInfo() const
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc dl, const TargetInstrInfo &TII, unsigned ScratchReg, int64_t NumBytes, MachineInstr::MIFlag MIFlags=MachineInstr::NoFlags)
void adjustForSegmentedStacks(MachineFunction &MF) const
unsigned getTargetFlags() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=0)
virtual bool needsStackRealignment(const MachineFunction &MF) const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register)
.cfi_def_cfa_register modifies a rule for computing CFA. From now on Register will be used instead of...
Definition: MCDwarf.h:328
void setStackSize(uint64_t Size)
bool useLeaForSP() const
Definition: X86Subtarget.h:298
livein_iterator livein_end() const
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:267
bool isTargetWin32() const
Definition: X86Subtarget.h:342
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
void setFlag(MIFlag Flag)
setFlag - Set a MI flag.
Definition: MachineInstr.h:159
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, unsigned Reg, bool isKill, int Offset)
ItTy next(ItTy it, Dist n)
Definition: STLExtras.h:154
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
int64_t getOffset() const
arg_iterator arg_begin()
Definition: Function.h:410
bool isOSWindows() const
Definition: X86Subtarget.h:336
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
virtual const TargetFrameLowering * getFrameLowering() const
unsigned getBaseRegister() const
const MCInstrDesc & get(unsigned Opcode) const
Definition: MCInstrInfo.h:48
int getFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getStackRegister() const
static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm)
int64_t getObjectOffset(int ObjectIdx) const
static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm)
DebugLoc findDebugLoc(instr_iterator MBBI)
bool hasCalls() const
hasCalls - Return true if the current function has any function calls.
virtual const TargetInstrInfo * getInstrInfo() const
const MCContext & getContext() const
bool hasFP(const MachineFunction &MF) const
unsigned getObjectAlignment(int ObjectIdx) const
getObjectAlignment - Return the alignment of the specified stack object.
bool isTargetLinux() const
Definition: X86Subtarget.h:322
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
Definition: Function.h:293
livein_iterator livein_begin() const
void setOffsetAdjustment(int Adj)
MachineFrameInfo * getFrameInfo()
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:170
static unsigned GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary)
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:177
bool count(const T &V) const
count - Return true if the element is in the set.
Definition: SmallSet.h:48
virtual const TargetFrameLowering * getFrameLowering() const
Disable redzone.
Definition: Attributes.h:88
void copyImplicitOps(MachineFunction &MF, const MachineInstr *MI)
static const size_t npos
Definition: StringRef.h:45
MachineRegisterInfo & getRegInfo()
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:250
DBG_VALUE - a mapping of the llvm.dbg.value intrinsic.
Definition: TargetOpcodes.h:69
void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label, unsigned FramePtr) const
#define I(x, y, z)
Definition: MD5.cpp:54
bool hasMSInlineAsm() const
Returns true if the function contains any MS-style inline assembly.
void addFrameInst(const MCCFIInstruction &Inst)
const TargetMachine & getTarget() const
unsigned getSlotSize() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
virtual const TargetRegisterInfo * getRegisterInfo() const
static bool isMem(const MachineInstr *MI, unsigned Op)
Definition: X86InstrInfo.h:123
bool hasVarSizedObjects() const
size_t find_first_of(char C, size_t From=0) const
Definition: StringRef.h:269
std::vector< std::pair< unsigned, unsigned > >::const_iterator livein_iterator
unsigned EnableSegmentedStacks
unsigned getReg() const
getReg - Returns the register number.
void emitPrologue(MachineFunction &MF) const
CallingConvention
Definition: Dwarf.h:622
bool isTargetWin64() const
Definition: X86Subtarget.h:338
bool isTargetFreeBSD() const
Definition: X86Subtarget.h:312
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
BasicBlockListType::iterator iterator
ItTy prior(ItTy it, Dist n)
Definition: STLExtras.h:167
MachineModuleInfo & getMMI() const
reg_iterator reg_begin(unsigned RegNo) const
const MCRegisterInfo & MRI
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable)
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
static reg_iterator reg_end()
static bool HasNestArgument(const MachineFunction *MF)
bool hasReservedCallFrame(const MachineFunction &MF) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
bool isVarArg() const
Definition: Function.cpp:175
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector< CalleeSavedInfo > &CSI, const TargetRegisterInfo *TRI) const
bool isTargetEnvMacho() const
Definition: X86Subtarget.h:334
virtual const X86InstrInfo * getInstrInfo() const