LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
X86MCInstLower.cpp
Go to the documentation of this file.
1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains code to lower X86 MachineInstrs to their corresponding
11 // MCInst records.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "X86AsmPrinter.h"
18 #include "llvm/ADT/SmallString.h"
20 #include "llvm/CodeGen/StackMaps.h"
21 #include "llvm/IR/Type.h"
22 #include "llvm/MC/MCAsmInfo.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCInstBuilder.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Target/Mangler.h"
31 using namespace llvm;
32 
33 namespace {
34 
35 /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
36 class X86MCInstLower {
37  MCContext &Ctx;
38  const MachineFunction &MF;
39  const TargetMachine &TM;
40  const MCAsmInfo &MAI;
42 public:
43  X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
44 
45  void Lower(const MachineInstr *MI, MCInst &OutMI) const;
46 
48  MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
49 
50 private:
52  Mangler *getMang() const {
53  return AsmPrinter.Mang;
54  }
55 };
56 
57 } // end anonymous namespace
58 
59 X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
60  X86AsmPrinter &asmprinter)
61 : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()),
62  MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
63 
65  return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
66 }
67 
68 
69 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
70 /// operand to an MCSymbol.
72 GetSymbolFromOperand(const MachineOperand &MO) const {
73  assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
74 
76 
77  if (MO.isGlobal()) {
78  const GlobalValue *GV = MO.getGlobal();
79  bool isImplicitlyPrivate = false;
84  isImplicitlyPrivate = true;
85 
86  getMang()->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
87  } else if (MO.isSymbol()) {
88  Name += MAI.getGlobalPrefix();
89  Name += MO.getSymbolName();
90  } else if (MO.isMBB()) {
91  Name += MO.getMBB()->getSymbol()->getName();
92  }
93 
94  // If the target flags on the operand changes the name of the symbol, do that
95  // before we return the symbol.
96  switch (MO.getTargetFlags()) {
97  default: break;
98  case X86II::MO_DLLIMPORT: {
99  // Handle dllimport linkage.
100  const char *Prefix = "__imp_";
101  Name.insert(Name.begin(), Prefix, Prefix+strlen(Prefix));
102  break;
103  }
106  Name += "$non_lazy_ptr";
107  MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
108 
111  if (StubSym.getPointer() == 0) {
112  assert(MO.isGlobal() && "Extern symbol not handled yet");
113  StubSym =
116  !MO.getGlobal()->hasInternalLinkage());
117  }
118  return Sym;
119  }
121  Name += "$non_lazy_ptr";
122  MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
125  if (StubSym.getPointer() == 0) {
126  assert(MO.isGlobal() && "Extern symbol not handled yet");
127  StubSym =
130  !MO.getGlobal()->hasInternalLinkage());
131  }
132  return Sym;
133  }
134  case X86II::MO_DARWIN_STUB: {
135  Name += "$stub";
136  MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
139  if (StubSym.getPointer())
140  return Sym;
141 
142  if (MO.isGlobal()) {
143  StubSym =
146  !MO.getGlobal()->hasInternalLinkage());
147  } else {
148  Name.erase(Name.end()-5, Name.end());
149  StubSym =
151  StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
152  }
153  return Sym;
154  }
155  }
156 
157  return Ctx.GetOrCreateSymbol(Name.str());
158 }
159 
160 MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
161  MCSymbol *Sym) const {
162  // FIXME: We would like an efficient form for this, so we don't have to do a
163  // lot of extra uniquing.
164  const MCExpr *Expr = 0;
165  MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
166 
167  switch (MO.getTargetFlags()) {
168  default: llvm_unreachable("Unknown target flag on GV operand");
169  case X86II::MO_NO_FLAG: // No flag.
170  // These affect the name of the symbol, not any suffix.
172  case X86II::MO_DLLIMPORT:
174  break;
175 
176  case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
178  Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
179  // Subtract the pic base.
180  Expr = MCBinaryExpr::CreateSub(Expr,
181  MCSymbolRefExpr::Create(MF.getPICBaseSymbol(),
182  Ctx),
183  Ctx);
184  break;
185  case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break;
186  case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
187  case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break;
188  case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break;
189  case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
190  case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
191  case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
192  case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break;
193  case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
194  case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break;
195  case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
196  case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
197  case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
198  case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
202  Expr = MCSymbolRefExpr::Create(Sym, Ctx);
203  // Subtract the pic base.
204  Expr = MCBinaryExpr::CreateSub(Expr,
205  MCSymbolRefExpr::Create(MF.getPICBaseSymbol(), Ctx),
206  Ctx);
207  if (MO.isJTI() && MAI.hasSetDirective()) {
208  // If .set directive is supported, use it to reduce the number of
209  // relocations the assembler will generate for differences between
210  // local labels. This is only safe when the symbols are in the same
211  // section so we are restricting it to jumptable references.
212  MCSymbol *Label = Ctx.CreateTempSymbol();
214  Expr = MCSymbolRefExpr::Create(Label, Ctx);
215  }
216  break;
217  }
218 
219  if (Expr == 0)
220  Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
221 
222  if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
223  Expr = MCBinaryExpr::CreateAdd(Expr,
224  MCConstantExpr::Create(MO.getOffset(), Ctx),
225  Ctx);
226  return MCOperand::CreateExpr(Expr);
227 }
228 
229 
230 /// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
231 static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
232  OutMI.setOpcode(NewOpc);
233  OutMI.addOperand(OutMI.getOperand(0));
234  OutMI.addOperand(OutMI.getOperand(0));
235 }
236 
237 /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
238 /// a short fixed-register form.
239 static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
240  unsigned ImmOp = Inst.getNumOperands() - 1;
241  assert(Inst.getOperand(0).isReg() &&
242  (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
243  ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
244  Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
245  Inst.getNumOperands() == 2) && "Unexpected instruction!");
246 
247  // Check whether the destination register can be fixed.
248  unsigned Reg = Inst.getOperand(0).getReg();
249  if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
250  return;
251 
252  // If so, rewrite the instruction.
253  MCOperand Saved = Inst.getOperand(ImmOp);
254  Inst = MCInst();
255  Inst.setOpcode(Opcode);
256  Inst.addOperand(Saved);
257 }
258 
259 /// \brief If a movsx instruction has a shorter encoding for the used register
260 /// simplify the instruction to use it instead.
261 static void SimplifyMOVSX(MCInst &Inst) {
262  unsigned NewOpcode = 0;
263  unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg();
264  switch (Inst.getOpcode()) {
265  default:
266  llvm_unreachable("Unexpected instruction!");
267  case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw
268  if (Op0 == X86::AX && Op1 == X86::AL)
269  NewOpcode = X86::CBW;
270  break;
271  case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl
272  if (Op0 == X86::EAX && Op1 == X86::AX)
273  NewOpcode = X86::CWDE;
274  break;
275  case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq
276  if (Op0 == X86::RAX && Op1 == X86::EAX)
277  NewOpcode = X86::CDQE;
278  break;
279  }
280 
281  if (NewOpcode != 0) {
282  Inst = MCInst();
283  Inst.setOpcode(NewOpcode);
284  }
285 }
286 
287 /// \brief Simplify things like MOV32rm to MOV32o32a.
289  unsigned Opcode) {
290  // Don't make these simplifications in 64-bit mode; other assemblers don't
291  // perform them because they make the code larger.
292  if (Printer.getSubtarget().is64Bit())
293  return;
294 
295  bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg();
296  unsigned AddrBase = IsStore;
297  unsigned RegOp = IsStore ? 0 : 5;
298  unsigned AddrOp = AddrBase + 3;
299  assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
300  Inst.getOperand(AddrBase + 0).isReg() && // base
301  Inst.getOperand(AddrBase + 1).isImm() && // scale
302  Inst.getOperand(AddrBase + 2).isReg() && // index register
303  (Inst.getOperand(AddrOp).isExpr() || // address
304  Inst.getOperand(AddrOp).isImm())&&
305  Inst.getOperand(AddrBase + 4).isReg() && // segment
306  "Unexpected instruction!");
307 
308  // Check whether the destination register can be fixed.
309  unsigned Reg = Inst.getOperand(RegOp).getReg();
310  if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
311  return;
312 
313  // Check whether this is an absolute address.
314  // FIXME: We know TLVP symbol refs aren't, but there should be a better way
315  // to do this here.
316  bool Absolute = true;
317  if (Inst.getOperand(AddrOp).isExpr()) {
318  const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
319  if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
320  if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
321  Absolute = false;
322  }
323 
324  if (Absolute &&
325  (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
326  Inst.getOperand(AddrBase + 2).getReg() != 0 ||
327  Inst.getOperand(AddrBase + 4).getReg() != 0 ||
328  Inst.getOperand(AddrBase + 1).getImm() != 1))
329  return;
330 
331  // If so, rewrite the instruction.
332  MCOperand Saved = Inst.getOperand(AddrOp);
333  Inst = MCInst();
334  Inst.setOpcode(Opcode);
335  Inst.addOperand(Saved);
336 }
337 
338 void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
339  OutMI.setOpcode(MI->getOpcode());
340 
341  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
342  const MachineOperand &MO = MI->getOperand(i);
343 
344  MCOperand MCOp;
345  switch (MO.getType()) {
346  default:
347  MI->dump();
348  llvm_unreachable("unknown operand type");
349  case MachineOperand::MO_Register:
350  // Ignore all implicit register operands.
351  if (MO.isImplicit()) continue;
352  MCOp = MCOperand::CreateReg(MO.getReg());
353  break;
354  case MachineOperand::MO_Immediate:
355  MCOp = MCOperand::CreateImm(MO.getImm());
356  break;
357  case MachineOperand::MO_MachineBasicBlock:
358  case MachineOperand::MO_GlobalAddress:
359  case MachineOperand::MO_ExternalSymbol:
360  MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
361  break;
362  case MachineOperand::MO_JumpTableIndex:
363  MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
364  break;
365  case MachineOperand::MO_ConstantPoolIndex:
366  MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
367  break;
368  case MachineOperand::MO_BlockAddress:
369  MCOp = LowerSymbolOperand(MO,
371  break;
372  case MachineOperand::MO_RegisterMask:
373  // Ignore call clobbers.
374  continue;
375  }
376 
377  OutMI.addOperand(MCOp);
378  }
379 
380  // Handle a few special cases to eliminate operand modifiers.
381 ReSimplify:
382  switch (OutMI.getOpcode()) {
383  case X86::LEA64_32r:
384  case X86::LEA64r:
385  case X86::LEA16r:
386  case X86::LEA32r:
387  // LEA should have a segment register, but it must be empty.
388  assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
389  "Unexpected # of LEA operands");
390  assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
391  "LEA has segment specified!");
392  break;
393  case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
394 
395  case X86::MOV32ri64:
396  OutMI.setOpcode(X86::MOV32ri);
397  break;
398 
399  // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B
400  // if one of the registers is extended, but other isn't.
401  case X86::VMOVAPDrr:
402  case X86::VMOVAPDYrr:
403  case X86::VMOVAPSrr:
404  case X86::VMOVAPSYrr:
405  case X86::VMOVDQArr:
406  case X86::VMOVDQAYrr:
407  case X86::VMOVDQUrr:
408  case X86::VMOVDQUYrr:
409  case X86::VMOVUPDrr:
410  case X86::VMOVUPDYrr:
411  case X86::VMOVUPSrr:
412  case X86::VMOVUPSYrr: {
413  if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
415  unsigned NewOpc;
416  switch (OutMI.getOpcode()) {
417  default: llvm_unreachable("Invalid opcode");
418  case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
419  case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
420  case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
421  case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
422  case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
423  case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
424  case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
425  case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
426  case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
427  case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
428  case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
429  case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
430  }
431  OutMI.setOpcode(NewOpc);
432  }
433  break;
434  }
435  case X86::VMOVSDrr:
436  case X86::VMOVSSrr: {
437  if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
439  unsigned NewOpc;
440  switch (OutMI.getOpcode()) {
441  default: llvm_unreachable("Invalid opcode");
442  case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
443  case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
444  }
445  OutMI.setOpcode(NewOpc);
446  }
447  break;
448  }
449 
450  // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
451  // inputs modeled as normal uses instead of implicit uses. As such, truncate
452  // off all but the first operand (the callee). FIXME: Change isel.
453  case X86::TAILJMPr64:
454  case X86::CALL64r:
455  case X86::CALL64pcrel32: {
456  unsigned Opcode = OutMI.getOpcode();
457  MCOperand Saved = OutMI.getOperand(0);
458  OutMI = MCInst();
459  OutMI.setOpcode(Opcode);
460  OutMI.addOperand(Saved);
461  break;
462  }
463 
464  case X86::EH_RETURN:
465  case X86::EH_RETURN64: {
466  OutMI = MCInst();
467  OutMI.setOpcode(X86::RET);
468  break;
469  }
470 
471  // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
472  case X86::TAILJMPr:
473  case X86::TAILJMPd:
474  case X86::TAILJMPd64: {
475  unsigned Opcode;
476  switch (OutMI.getOpcode()) {
477  default: llvm_unreachable("Invalid opcode");
478  case X86::TAILJMPr: Opcode = X86::JMP32r; break;
479  case X86::TAILJMPd:
480  case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
481  }
482 
483  MCOperand Saved = OutMI.getOperand(0);
484  OutMI = MCInst();
485  OutMI.setOpcode(Opcode);
486  OutMI.addOperand(Saved);
487  break;
488  }
489 
490  // These are pseudo-ops for OR to help with the OR->ADD transformation. We do
491  // this with an ugly goto in case the resultant OR uses EAX and needs the
492  // short form.
493  case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
494  case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
495  case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
496  case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify;
497  case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify;
498  case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify;
499  case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
500  case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
501  case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
502 
503  // The assembler backend wants to see branches in their small form and relax
504  // them to their large form. The JIT can only handle the large form because
505  // it does not do relaxation. For now, translate the large form to the
506  // small one here.
507  case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break;
508  case X86::JO_4: OutMI.setOpcode(X86::JO_1); break;
509  case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break;
510  case X86::JB_4: OutMI.setOpcode(X86::JB_1); break;
511  case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break;
512  case X86::JE_4: OutMI.setOpcode(X86::JE_1); break;
513  case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break;
514  case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break;
515  case X86::JA_4: OutMI.setOpcode(X86::JA_1); break;
516  case X86::JS_4: OutMI.setOpcode(X86::JS_1); break;
517  case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break;
518  case X86::JP_4: OutMI.setOpcode(X86::JP_1); break;
519  case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break;
520  case X86::JL_4: OutMI.setOpcode(X86::JL_1); break;
521  case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break;
522  case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break;
523  case X86::JG_4: OutMI.setOpcode(X86::JG_1); break;
524 
525  // Atomic load and store require a separate pseudo-inst because Acquire
526  // implies mayStore and Release implies mayLoad; fix these to regular MOV
527  // instructions here
528  case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify;
529  case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify;
530  case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify;
531  case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify;
532  case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify;
533  case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify;
534  case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify;
535  case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify;
536 
537  // We don't currently select the correct instruction form for instructions
538  // which have a short %eax, etc. form. Handle this by custom lowering, for
539  // now.
540  //
541  // Note, we are currently not handling the following instructions:
542  // MOV64ao8, MOV64o8a
543  // XCHG16ar, XCHG32ar, XCHG64ar
544  case X86::MOV8mr_NOREX:
545  case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break;
546  case X86::MOV8rm_NOREX:
547  case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break;
548  case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break;
549  case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break;
550  case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
551  case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
552 
553  case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
554  case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
555  case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break;
556  case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break;
557  case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break;
558  case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break;
559  case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break;
560  case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break;
561  case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break;
562  case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break;
563  case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break;
564  case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break;
565  case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break;
566  case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break;
567  case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break;
568  case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break;
569  case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break;
570  case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break;
571  case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break;
572  case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break;
573  case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break;
574  case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break;
575  case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break;
576  case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break;
577  case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break;
578  case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break;
579  case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break;
580  case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break;
581  case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break;
582  case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
583  case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
584  case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
585  case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break;
586  case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
587  case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
588  case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
589 
590  // Try to shrink some forms of movsx.
591  case X86::MOVSX16rr8:
592  case X86::MOVSX32rr16:
593  case X86::MOVSX64rr32:
594  SimplifyMOVSX(OutMI);
595  break;
596  }
597 }
598 
599 static void LowerTlsAddr(MCStreamer &OutStreamer,
600  X86MCInstLower &MCInstLowering,
601  const MachineInstr &MI) {
602 
603  bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
604  MI.getOpcode() == X86::TLS_base_addr64;
605 
606  bool needsPadding = MI.getOpcode() == X86::TLS_addr64;
607 
608  MCContext &context = OutStreamer.getContext();
609 
610  if (needsPadding)
611  OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
612 
614  switch (MI.getOpcode()) {
615  case X86::TLS_addr32:
616  case X86::TLS_addr64:
617  SRVK = MCSymbolRefExpr::VK_TLSGD;
618  break;
619  case X86::TLS_base_addr32:
620  SRVK = MCSymbolRefExpr::VK_TLSLDM;
621  break;
622  case X86::TLS_base_addr64:
623  SRVK = MCSymbolRefExpr::VK_TLSLD;
624  break;
625  default:
626  llvm_unreachable("unexpected opcode");
627  }
628 
629  MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
630  const MCSymbolRefExpr *symRef = MCSymbolRefExpr::Create(sym, SRVK, context);
631 
632  MCInst LEA;
633  if (is64Bits) {
634  LEA.setOpcode(X86::LEA64r);
635  LEA.addOperand(MCOperand::CreateReg(X86::RDI)); // dest
636  LEA.addOperand(MCOperand::CreateReg(X86::RIP)); // base
637  LEA.addOperand(MCOperand::CreateImm(1)); // scale
638  LEA.addOperand(MCOperand::CreateReg(0)); // index
639  LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
640  LEA.addOperand(MCOperand::CreateReg(0)); // seg
641  } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) {
642  LEA.setOpcode(X86::LEA32r);
643  LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
644  LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // base
645  LEA.addOperand(MCOperand::CreateImm(1)); // scale
646  LEA.addOperand(MCOperand::CreateReg(0)); // index
647  LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
648  LEA.addOperand(MCOperand::CreateReg(0)); // seg
649  } else {
650  LEA.setOpcode(X86::LEA32r);
651  LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
652  LEA.addOperand(MCOperand::CreateReg(0)); // base
653  LEA.addOperand(MCOperand::CreateImm(1)); // scale
654  LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // index
655  LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
656  LEA.addOperand(MCOperand::CreateReg(0)); // seg
657  }
658  OutStreamer.EmitInstruction(LEA);
659 
660  if (needsPadding) {
661  OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
662  OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
663  OutStreamer.EmitInstruction(MCInstBuilder(X86::REX64_PREFIX));
664  }
665 
666  StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
667  MCSymbol *tlsGetAddr = context.GetOrCreateSymbol(name);
668  const MCSymbolRefExpr *tlsRef =
669  MCSymbolRefExpr::Create(tlsGetAddr,
670  MCSymbolRefExpr::VK_PLT,
671  context);
672 
673  OutStreamer.EmitInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32
674  : X86::CALLpcrel32)
675  .addExpr(tlsRef));
676 }
677 
678 static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
682 
683  typedef StackMaps::Location Location;
684 
685  assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
686 
687  const MachineOperand &Base = *MOI;
688  const MachineOperand &Scale = *(++MOI);
689  const MachineOperand &Index = *(++MOI);
690  const MachineOperand &Disp = *(++MOI);
691  const MachineOperand &ZeroReg = *(++MOI);
692 
693  // Sanity check for supported operand format.
694  assert(Base.isReg() &&
695  Scale.isImm() && Scale.getImm() == 1 &&
696  Index.isReg() && Index.getReg() == 0 &&
697  Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
698  "Unsupported x86 memory operand sequence.");
699  (void)Scale;
700  (void)Index;
701  (void)ZeroReg;
702 
703  return std::make_pair(
704  Location(LocTy, Size, Base.getReg(), Disp.getImm()), ++MOI);
705 }
706 
707 std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
708 X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
710  const TargetMachine &TM) {
711 
712  typedef StackMaps::Location Location;
713 
714  const MachineOperand &MOP = *MOI;
715  assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
716  "Register mask and implicit operands should not be processed.");
717 
718  if (MOP.isImm()) {
719  // Verify anyregcc
720  // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
721 
722  switch (MOP.getImm()) {
723  default: llvm_unreachable("Unrecognized operand type.");
725  unsigned Size = TM.getDataLayout()->getPointerSizeInBits();
726  assert((Size % 8) == 0 && "Need pointer size in bytes.");
727  Size /= 8;
729  llvm::next(MOI), MOE);
730  }
732  ++MOI;
733  int64_t Size = MOI->getImm();
734  assert(Size > 0 && "Need a valid size for indirect memory locations.");
736  llvm::next(MOI), MOE);
737  }
738  case StackMaps::ConstantOp: {
739  ++MOI;
740  assert(MOI->isImm() && "Expected constant operand.");
741  int64_t Imm = MOI->getImm();
742  return std::make_pair(
743  Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
744  }
745  }
746  }
747 
748  // Otherwise this is a reg operand. The physical register number will
749  // ultimately be encoded as a DWARF regno. The stack map also records the size
750  // of a spill slot that can hold the register content. (The runtime can
751  // track the actual size of the data type if it needs to.)
752  assert(MOP.isReg() && "Expected register operand here.");
754  "Virtreg operands should have been rewritten before now.");
755  const TargetRegisterClass *RC =
757  assert(!MOP.getSubReg() && "Physical subreg still around.");
758  return std::make_pair(
759  Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
760 }
761 
762 // Lower a stackmap of the form:
763 // <id>, <shadowBytes>, ...
764 static void LowerSTACKMAP(MCStreamer &OutStreamer,
765  StackMaps &SM,
766  const MachineInstr &MI)
767 {
768  unsigned NumNOPBytes = MI.getOperand(1).getImm();
769  SM.recordStackMap(MI);
770  // Emit padding.
771  // FIXME: These nops ensure that the stackmap's shadow is covered by
772  // instructions from the same basic block, but the nops should not be
773  // necessary if instructions from the same block follow the stackmap.
774  for (unsigned i = 0; i < NumNOPBytes; ++i)
775  OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
776 }
777 
778 // Lower a patchpoint of the form:
779 // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
780 static void LowerPATCHPOINT(MCStreamer &OutStreamer,
781  StackMaps &SM,
782  const MachineInstr &MI) {
783  SM.recordPatchPoint(MI);
784 
785  PatchPointOpers opers(&MI);
786  unsigned ScratchIdx = opers.getNextScratchIdx();
787  unsigned EncodedBytes = 0;
788  int64_t CallTarget = opers.getMetaOper(PatchPointOpers::TargetPos).getImm();
789  if (CallTarget) {
790  // Emit MOV to materialize the target address and the CALL to target.
791  // This is encoded with 12-13 bytes, depending on which register is used.
792  // We conservatively assume that it is 12 bytes and emit in worst case one
793  // extra NOP byte.
794  EncodedBytes = 12;
795  OutStreamer.EmitInstruction(MCInstBuilder(X86::MOV64ri)
796  .addReg(MI.getOperand(ScratchIdx).getReg())
797  .addImm(CallTarget));
798  OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r)
799  .addReg(MI.getOperand(ScratchIdx).getReg()));
800  }
801  // Emit padding.
802  unsigned NumBytes = opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
803  assert(NumBytes >= EncodedBytes &&
804  "Patchpoint can't request size less than the length of a call.");
805 
806  for (unsigned i = EncodedBytes; i < NumBytes; ++i)
807  OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
808 }
809 
811  X86MCInstLower MCInstLowering(*MF, *this);
812  switch (MI->getOpcode()) {
814  llvm_unreachable("Should be handled target independently");
815 
816  // Emit nothing here but a comment if we can.
817  case X86::Int_MemBarrier:
819  OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER"));
820  return;
821 
822 
823  case X86::EH_RETURN:
824  case X86::EH_RETURN64: {
825  // Lower these as normal, but add some comments.
826  unsigned Reg = MI->getOperand(0).getReg();
827  OutStreamer.AddComment(StringRef("eh_return, addr: %") +
829  break;
830  }
831  case X86::TAILJMPr:
832  case X86::TAILJMPd:
833  case X86::TAILJMPd64:
834  // Lower these as normal, but add some comments.
835  OutStreamer.AddComment("TAILCALL");
836  break;
837 
838  case X86::TLS_addr32:
839  case X86::TLS_addr64:
840  case X86::TLS_base_addr32:
841  case X86::TLS_base_addr64:
842  return LowerTlsAddr(OutStreamer, MCInstLowering, *MI);
843 
844  case X86::MOVPC32r: {
845  // This is a pseudo op for a two instruction sequence with a label, which
846  // looks like:
847  // call "L1$pb"
848  // "L1$pb":
849  // popl %esi
850 
851  // Emit the call.
852  MCSymbol *PICBase = MF->getPICBaseSymbol();
853  // FIXME: We would like an efficient form for this, so we don't have to do a
854  // lot of extra uniquing.
855  OutStreamer.EmitInstruction(MCInstBuilder(X86::CALLpcrel32)
856  .addExpr(MCSymbolRefExpr::Create(PICBase, OutContext)));
857 
858  // Emit the label.
859  OutStreamer.EmitLabel(PICBase);
860 
861  // popl $reg
863  .addReg(MI->getOperand(0).getReg()));
864  return;
865  }
866 
867  case X86::ADD32ri: {
868  // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
870  break;
871 
872  // Okay, we have something like:
873  // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
874 
875  // For this, we want to print something like:
876  // MYGLOBAL + (. - PICBASE)
877  // However, we can't generate a ".", so just emit a new label here and refer
878  // to it.
880  OutStreamer.EmitLabel(DotSym);
881 
882  // Now that we have emitted the label, lower the complex operand expression.
883  MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
884 
885  const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
886  const MCExpr *PICBase =
888  DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
889 
891  DotExpr, OutContext);
892 
894  .addReg(MI->getOperand(0).getReg())
895  .addReg(MI->getOperand(1).getReg())
896  .addExpr(DotExpr));
897  return;
898  }
899 
901  return LowerSTACKMAP(OutStreamer, SM, *MI);
902 
904  return LowerPATCHPOINT(OutStreamer, SM, *MI);
905 
906  case X86::MORESTACK_RET:
908  return;
909 
910  case X86::MORESTACK_RET_RESTORE_R10:
911  // Return, then restore R10.
914  .addReg(X86::R10)
915  .addReg(X86::RAX));
916  return;
917  }
918 
919  MCInst TmpInst;
920  MCInstLowering.Lower(MI, TmpInst);
921  OutStreamer.EmitInstruction(TmpInst);
922 }
bool isImplicit() const
StubValueTy & getHiddenGVStubEntry(MCSymbol *Sym)
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:39
virtual void AddComment(const Twine &T)
Definition: MCStreamer.h:207
const GlobalValue * getGlobal() const
static const char * getRegisterName(unsigned RegNo)
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, EVT VT=MVT::Other) const
void EmitRawText(const Twine &String)
Definition: MCStreamer.cpp:582
MCSymbol * getSymbol(const GlobalValue *GV) const
Definition: AsmPrinter.cpp:277
MO_TLSLDM - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:64
bool isReg() const
Definition: MCInst.h:56
MachineBasicBlock * getMBB() const
MCContext & OutContext
Definition: AsmPrinter.h:72
static MachineModuleInfoMachO & getMachOMMI(AsmPrinter &AP)
static void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, const MachineInstr &MI)
MO_TLSGD - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:59
const MachineFunction * MF
The current machine function.
Definition: AsmPrinter.h:81
iterator insert(iterator I, const T &Elt)
Definition: SmallVector.h:537
virtual bool hasRawTextSupport() const
Definition: MCStreamer.h:198
virtual void EmitInstruction(const MCInst &Inst)=0
static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc)
LowerUnaryToTwoAddr - R = setb -> R = sbb R, R.
const char * getSymbolName() const
print alias Alias Set Printer
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value)=0
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
MCSymbol * GetOrCreateSymbol(StringRef Name)
Definition: MCContext.cpp:118
MCSymbol * CreateTempSymbol()
Definition: MCContext.cpp:165
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
bool hasInternalLinkage() const
Definition: GlobalValue.h:205
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MCSymbol * GetJTISymbol(unsigned JTID, bool isLinkerPrivate=false) const
GetJTISymbol - Return the symbol for the specified jump table entry.
MCContext & getContext() const
Definition: MCStreamer.h:168
static const MCBinaryExpr * CreateSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:460
unsigned getNumOperands() const
Definition: MachineInstr.h:265
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Mangler * Mang
Definition: AsmPrinter.h:88
unsigned getReg() const
getReg - Returns the register number.
Definition: MCInst.h:63
int getOpcode() const
Definition: MachineInstr.h:261
bool is64Bit() const
Is this x86_64? (disregarding specific ABI / programming model)
Definition: X86Subtarget.h:240
MCStreamer & OutStreamer
Definition: AsmPrinter.h:78
int64_t getImm() const
static void LowerTlsAddr(MCStreamer &OutStreamer, X86MCInstLower &MCInstLowering, const MachineInstr &MI)
bool isX86_64ExtendedReg(unsigned RegNo)
Definition: X86BaseInfo.h:660
static const MCSymbolRefExpr * Create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:270
bool isImm() const
Definition: MCInst.h:57
const MCExpr * getExpr() const
Definition: MCInst.h:93
unsigned getTargetFlags() const
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:267
ItTy next(ItTy it, Dist n)
Definition: STLExtras.h:154
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isExpr() const
Definition: MCInst.h:59
static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, unsigned Opcode)
Simplify things like MOV32rm to MOV32o32a.
int64_t getOffset() const
MI-level patchpoint operands.
Definition: StackMaps.h:37
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:42
unsigned getSubReg() const
iterator erase(iterator I)
Definition: SmallVector.h:478
MCSymbol * getSymbol() const
StubValueTy & getGVStubEntry(MCSymbol *Sym)
void recordPatchPoint(const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
Definition: StackMaps.cpp:133
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
PointerTy getPointer() const
static void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, const MachineInstr &MI)
void setOpcode(unsigned Op)
Definition: MCInst.h:157
MCSymbol * getPICBaseSymbol() const
virtual void EmitLabel(MCSymbol *Symbol)
Definition: MCStreamer.cpp:212
const X86Subtarget & getSubtarget() const
Definition: X86AsmPrinter.h:50
Promote Memory to Register
Definition: Mem2Reg.cpp:54
unsigned getNextScratchIdx(unsigned StartIdx=0) const
Get the next scratch register operand index.
Definition: StackMaps.cpp:51
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
size_t strlen(const char *s);
void dump() const
unsigned getOpcode() const
Definition: MCInst.h:158
StubValueTy & getFnStubEntry(MCSymbol *Sym)
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:270
void recordStackMap(const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
Definition: StackMaps.cpp:123
int64_t getImm() const
Definition: MCInst.h:74
static const MCBinaryExpr * CreateAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:396
MachineOperandType getType() const
static bool isPhysicalRegister(unsigned Reg)
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:70
const MachineOperand & getMetaOper(unsigned Pos)
Definition: StackMaps.h:56
unsigned getNumOperands() const
Definition: MCInst.h:165
virtual const DataLayout * getDataLayout() const
DBG_VALUE - a mapping of the llvm.dbg.value intrinsic.
Definition: TargetOpcodes.h:69
MCSymbol * GetBlockAddressSymbol(const BlockAddress *BA) const
unsigned getPointerSizeInBits(unsigned AS=0) const
Definition: DataLayout.h:271
virtual const TargetRegisterInfo * getRegisterInfo() const
virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE
EmitInstruction - Targets should implement this to emit instructions.
MCSymbol * GetCPISymbol(unsigned CPID) const
GetCPISymbol - Return the symbol for the specified constant pool entry.
unsigned getReg() const
getReg - Returns the register number.
static std::pair< StackMaps::Location, MachineInstr::const_mop_iterator > parseMemoryOperand(StackMaps::Location::LocationType LocTy, unsigned Size, MachineInstr::const_mop_iterator MOI, MachineInstr::const_mop_iterator MOE)
void addOperand(const MCOperand &Op)
Definition: MCInst.h:167
const BlockAddress * getBlockAddress() const
static MCSymbol * GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP)
static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode)
Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with a short fixed-register form...
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:163
static void SimplifyMOVSX(MCInst &Inst)
If a movsx instruction has a shorter encoding for the used register simplify the instruction to use i...