LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
AArch64MCCodeEmitter.cpp
Go to the documentation of this file.
1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the AArch64MCCodeEmitter class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "mccodeemitter"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
27 
28 using namespace llvm;
29 
30 namespace {
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32  AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33  void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34  MCContext &Ctx;
35 
36 public:
37  AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
38 
39  ~AArch64MCCodeEmitter() {}
40 
41  unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
43 
44  unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45  SmallVectorImpl<MCFixup> &Fixups) const;
46 
47  template<int MemSize>
48  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49  SmallVectorImpl<MCFixup> &Fixups) const {
50  return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
51  }
52 
53  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
55  int MemSize) const;
56 
57  unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58  SmallVectorImpl<MCFixup> &Fixups) const;
59  unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60  SmallVectorImpl<MCFixup> &Fixups) const;
61 
62  unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
63  SmallVectorImpl<MCFixup> &Fixups) const;
64  unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
65  SmallVectorImpl<MCFixup> &Fixups) const;
66  unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
67  SmallVectorImpl<MCFixup> &Fixups) const;
68  unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
69  SmallVectorImpl<MCFixup> &Fixups) const;
70 
71  unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
72  SmallVectorImpl<MCFixup> &Fixups) const;
73  unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
74  SmallVectorImpl<MCFixup> &Fixups) const;
75  unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
76  SmallVectorImpl<MCFixup> &Fixups) const;
77  unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
78  SmallVectorImpl<MCFixup> &Fixups) const;
79 
80  // Labels are handled mostly the same way: a symbol is needed, and
81  // just gets some fixup attached.
82  template<AArch64::Fixups fixupDesired>
83  unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
84  SmallVectorImpl<MCFixup> &Fixups) const;
85 
86  unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
87  SmallVectorImpl<MCFixup> &Fixups) const;
88 
89 
90  unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
91  SmallVectorImpl<MCFixup> &Fixups) const;
92 
93 
94  unsigned getAddressWithFixup(const MCOperand &MO,
95  unsigned FixupKind,
96  SmallVectorImpl<MCFixup> &Fixups) const;
97 
98 
99  // getBinaryCodeForInstr - TableGen'erated function for getting the
100  // binary encoding for an instruction.
101  uint64_t getBinaryCodeForInstr(const MCInst &MI,
102  SmallVectorImpl<MCFixup> &Fixups) const;
103 
104  /// getMachineOpValue - Return binary encoding of operand. If the machine
105  /// operand requires relocation, record the relocation and return zero.
106  unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
107  SmallVectorImpl<MCFixup> &Fixups) const;
108 
109 
110  void EmitByte(unsigned char C, raw_ostream &OS) const {
111  OS << (char)C;
112  }
113 
114  void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
115  // Output the constant in little endian byte order.
116  for (unsigned i = 0; i != 4; ++i) {
117  EmitByte(Val & 0xff, OS);
118  Val >>= 8;
119  }
120  }
121 
122 
123  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
124  SmallVectorImpl<MCFixup> &Fixups) const;
125 
126  template<int hasRs, int hasRt2> unsigned
127  fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
128 
129  unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
130 
131  unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
132 
133 
134 };
135 
136 } // end anonymous namespace
137 
138 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
139  unsigned FixupKind,
141  if (!MO.isExpr()) {
142  // This can occur for manually decoded or constructed MCInsts, but neither
143  // the assembly-parser nor instruction selection will currently produce an
144  // MCInst that's not a symbol reference.
145  assert(MO.isImm() && "Unexpected address requested");
146  return MO.getImm();
147  }
148 
149  const MCExpr *Expr = MO.getExpr();
150  MCFixupKind Kind = MCFixupKind(FixupKind);
151  Fixups.push_back(MCFixup::Create(0, Expr, Kind));
152 
153  return 0;
154 }
155 
156 unsigned AArch64MCCodeEmitter::
157 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
158  SmallVectorImpl<MCFixup> &Fixups,
159  int MemSize) const {
160  const MCOperand &ImmOp = MI.getOperand(OpIdx);
161  if (ImmOp.isImm())
162  return ImmOp.getImm();
163 
164  assert(ImmOp.isExpr() && "Unexpected operand type");
165  const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
166  unsigned FixupKind;
167 
168 
169  switch (Expr->getKind()) {
170  default: llvm_unreachable("Unexpected operand modifier");
172  static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
177  assert(MemSize <= 16 && "Invalid fixup for operation");
178  FixupKind = FixupsBySize[Log2_32(MemSize)];
179  break;
180  }
182  assert(MemSize == 8 && "Invalid fixup for operation");
184  break;
186  static const unsigned FixupsBySize[] = {
191  };
192  assert(MemSize <= 8 && "Invalid fixup for operation");
193  FixupKind = FixupsBySize[Log2_32(MemSize)];
194  break;
195  }
197  static const unsigned FixupsBySize[] = {
202  };
203  assert(MemSize <= 8 && "Invalid fixup for operation");
204  FixupKind = FixupsBySize[Log2_32(MemSize)];
205  break;
206  }
208  assert(MemSize == 8 && "Invalid fixup for operation");
210  break;
212  static const unsigned FixupsBySize[] = {
217  };
218  assert(MemSize <= 8 && "Invalid fixup for operation");
219  FixupKind = FixupsBySize[Log2_32(MemSize)];
220  break;
221  }
223  static const unsigned FixupsBySize[] = {
228  };
229  assert(MemSize <= 8 && "Invalid fixup for operation");
230  FixupKind = FixupsBySize[Log2_32(MemSize)];
231  break;
232  }
234  assert(MemSize == 8 && "Invalid fixup for operation");
236  break;
237  }
238 
239  return getAddressWithFixup(ImmOp, FixupKind, Fixups);
240 }
241 
242 unsigned
243 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
244  SmallVectorImpl<MCFixup> &Fixups) const {
245  const MCOperand &MO = MI.getOperand(OpIdx);
246  if (MO.isImm())
247  return static_cast<unsigned>(MO.getImm());
248 
249  assert(MO.isExpr());
250 
251  unsigned FixupKind = 0;
252  switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
253  default: llvm_unreachable("Invalid expression modifier");
255  FixupKind = AArch64::fixup_a64_add_lo12; break;
257  FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
259  FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
261  FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
263  FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
265  FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
267  FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
269  FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
270  }
271 
272  return getAddressWithFixup(MO, FixupKind, Fixups);
273 }
274 
275 unsigned
276 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
277  SmallVectorImpl<MCFixup> &Fixups) const {
278 
279  const MCOperand &MO = MI.getOperand(OpIdx);
280  if (MO.isImm())
281  return static_cast<unsigned>(MO.getImm());
282 
283  assert(MO.isExpr());
284 
285  unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
286  if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
287  Modifier = Expr->getKind();
288 
289  unsigned FixupKind = 0;
290  switch(Modifier) {
293  break;
296  break;
299  break;
302  break;
303  default:
304  llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
305  }
306 
307  return getAddressWithFixup(MO, FixupKind, Fixups);
308 }
309 
310 unsigned
311 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
312  SmallVectorImpl<MCFixup> &Fixups) const {
313 
314  const MCOperand &MO = MI.getOperand(OpIdx);
315  assert(MO.isImm() && "Only immediate expected for shift");
316 
317  return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
318 }
319 
320 unsigned
321 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
322  SmallVectorImpl<MCFixup> &Fixups) const {
323 
324  const MCOperand &MO = MI.getOperand(OpIdx);
325  assert(MO.isImm() && "Only immediate expected for shift");
326 
327  return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
328 }
329 
330 unsigned AArch64MCCodeEmitter::getShiftRightImm8(
331  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
332  return 8 - MI.getOperand(Op).getImm();
333 }
334 
335 unsigned AArch64MCCodeEmitter::getShiftRightImm16(
336  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
337  return 16 - MI.getOperand(Op).getImm();
338 }
339 
340 unsigned AArch64MCCodeEmitter::getShiftRightImm32(
341  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
342  return 32 - MI.getOperand(Op).getImm();
343 }
344 
345 unsigned AArch64MCCodeEmitter::getShiftRightImm64(
346  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
347  return 64 - MI.getOperand(Op).getImm();
348 }
349 
350 unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
351  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
352  return MI.getOperand(Op).getImm() - 8;
353 }
354 
355 unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
356  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
357  return MI.getOperand(Op).getImm() - 16;
358 }
359 
360 unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
361  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
362  return MI.getOperand(Op).getImm() - 32;
363 }
364 
365 unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
366  const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
367  return MI.getOperand(Op).getImm() - 64;
368 }
369 
370 template<AArch64::Fixups fixupDesired> unsigned
371 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
372  unsigned OpIdx,
373  SmallVectorImpl<MCFixup> &Fixups) const {
374  const MCOperand &MO = MI.getOperand(OpIdx);
375 
376  if (MO.isExpr())
377  return getAddressWithFixup(MO, fixupDesired, Fixups);
378 
379  assert(MO.isImm());
380  return MO.getImm();
381 }
382 
383 unsigned
384 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
385  unsigned OpIdx,
386  SmallVectorImpl<MCFixup> &Fixups) const {
387  const MCOperand &MO = MI.getOperand(OpIdx);
388 
389  if (MO.isImm())
390  return MO.getImm();
391 
392  assert(MO.isExpr());
393 
394  unsigned FixupKind;
395  if (isa<AArch64MCExpr>(MO.getExpr())) {
396  assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
398  && "Invalid symbol modifier for literal load");
400  } else {
401  FixupKind = AArch64::fixup_a64_ld_prel;
402  }
403 
404  return getAddressWithFixup(MO, FixupKind, Fixups);
405 }
406 
407 
408 unsigned
409 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
410  const MCOperand &MO,
411  SmallVectorImpl<MCFixup> &Fixups) const {
412  if (MO.isReg()) {
413  return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
414  } else if (MO.isImm()) {
415  return static_cast<unsigned>(MO.getImm());
416  }
417 
418  llvm_unreachable("Unable to encode MCOperand!");
419  return 0;
420 }
421 
422 unsigned
423 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
424  SmallVectorImpl<MCFixup> &Fixups) const {
425  const MCOperand &UImm16MO = MI.getOperand(OpIdx);
426  const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
427 
428  unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
429 
430  if (UImm16MO.isImm()) {
431  Result |= UImm16MO.getImm();
432  return Result;
433  }
434 
435  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
436  AArch64::Fixups requestedFixup;
437  switch (A64E->getKind()) {
438  default: llvm_unreachable("unexpected expression modifier");
440  requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
442  requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
444  requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
446  requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
448  requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
450  requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
452  requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
454  requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
456  requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
458  requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
460  requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
462  requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
464  requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
466  requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
468  requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
470  requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
472  requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
474  requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
476  requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
478  requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
480  requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
482  requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
483  }
484 
485  return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
486 }
487 
488 template<int hasRs, int hasRt2> unsigned
489 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
490  unsigned EncodedValue) const {
491  if (!hasRs) EncodedValue |= 0x001F0000;
492  if (!hasRt2) EncodedValue |= 0x00007C00;
493 
494  return EncodedValue;
495 }
496 
497 unsigned
498 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
499  // If one of the signed fixup kinds is applied to a MOVZ instruction, the
500  // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
501  // job to ensure that any bits possibly affected by this are 0. This means we
502  // must zero out bit 30 (essentially emitting a MOVN).
503  MCOperand UImm16MO = MI.getOperand(1);
504 
505  // Nothing to do if there's no fixup.
506  if (UImm16MO.isImm())
507  return EncodedValue;
508 
509  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
510  switch (A64E->getKind()) {
521  return EncodedValue & ~(1u << 30);
522  default:
523  // Nothing to do for an unsigned fixup.
524  return EncodedValue;
525  }
526 
527  llvm_unreachable("Should have returned by now");
528 }
529 
530 unsigned
531 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
532  unsigned EncodedValue) const {
533  // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
534  // (i.e. all bits 1) but is ignored by the processor.
535  EncodedValue |= 0x1f << 10;
536  return EncodedValue;
537 }
538 
540  const MCRegisterInfo &MRI,
541  const MCSubtargetInfo &STI,
542  MCContext &Ctx) {
543  return new AArch64MCCodeEmitter(Ctx);
544 }
545 
546 void AArch64MCCodeEmitter::
547 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
548  SmallVectorImpl<MCFixup> &Fixups) const {
549  if (MI.getOpcode() == AArch64::TLSDESCCALL) {
550  // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
551  // following (BLR) instruction. It doesn't emit any code itself so it
552  // doesn't go through the normal TableGenerated channels.
554  const MCExpr *Expr;
555  Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
556  Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
557  return;
558  }
559 
560  uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
561 
562  EmitInstruction(Binary, OS);
563 }
564 
565 
566 #include "AArch64GenMCCodeEmitter.inc"
MCCodeEmitter * createAArch64MCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, const MCSubtargetInfo &STI, MCContext &Ctx)
bool isReg() const
Definition: MCInst.h:56
static const AArch64MCExpr * CreateTLSDesc(const MCExpr *Expr, MCContext &Ctx)
#define llvm_unreachable(msg)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
unsigned getReg() const
getReg - Returns the register number.
Definition: MCInst.h:63
bool isImm() const
Definition: MCInst.h:57
const MCExpr * getExpr() const
Definition: MCInst.h:93
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:22
MCFixupKind
MCFixupKind - Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:22
bool isExpr() const
Definition: MCInst.h:59
#define LLVM_DELETED_FUNCTION
Definition: Compiler.h:137
unsigned Log2_32(uint32_t Value)
Definition: MathExtras.h:443
unsigned getOpcode() const
Definition: MCInst.h:158
int64_t getImm() const
Definition: MCInst.h:74
static MCFixup Create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:77
const MCRegisterInfo & MRI
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:163