LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARMBuildAttrs.h"
11 #include "ARMFPUName.h"
12 #include "ARMFeatures.h"
16 #include "MCTargetDesc/ARMMCExpr.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/MC/MCAsmInfo.h"
24 #include "llvm/MC/MCAssembler.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCELFStreamer.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCInstrDesc.h"
30 #include "llvm/MC/MCInstrInfo.h"
34 #include "llvm/MC/MCRegisterInfo.h"
35 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/Support/ELF.h"
39 #include "llvm/Support/SourceMgr.h"
42 
43 using namespace llvm;
44 
45 namespace {
46 
47 class ARMOperand;
48 
49 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
50 
51 class ARMAsmParser : public MCTargetAsmParser {
52  MCSubtargetInfo &STI;
53  MCAsmParser &Parser;
54  const MCInstrInfo &MII;
55  const MCRegisterInfo *MRI;
56 
57  ARMTargetStreamer &getTargetStreamer() {
58  MCTargetStreamer &TS = getParser().getStreamer().getTargetStreamer();
59  return static_cast<ARMTargetStreamer &>(TS);
60  }
61 
62  // Unwind directives state
63  SMLoc FnStartLoc;
64  SMLoc CantUnwindLoc;
65  SMLoc PersonalityLoc;
66  SMLoc HandlerDataLoc;
67  int FPReg;
68  void resetUnwindDirectiveParserState() {
69  FnStartLoc = SMLoc();
70  CantUnwindLoc = SMLoc();
71  PersonalityLoc = SMLoc();
72  HandlerDataLoc = SMLoc();
73  FPReg = -1;
74  }
75 
76  // Map of register aliases registers via the .req directive.
77  StringMap<unsigned> RegisterReqs;
78 
79  bool NextSymbolIsThumb;
80 
81  struct {
82  ARMCC::CondCodes Cond; // Condition for IT block.
83  unsigned Mask:4; // Condition mask for instructions.
84  // Starting at first 1 (from lsb).
85  // '1' condition as indicated in IT.
86  // '0' inverse of condition (else).
87  // Count of instructions in IT block is
88  // 4 - trailingzeroes(mask)
89 
90  bool FirstCond; // Explicit flag for when we're parsing the
91  // First instruction in the IT block. It's
92  // implied in the mask, so needs special
93  // handling.
94 
95  unsigned CurPosition; // Current position in parsing of IT
96  // block. In range [0,3]. Initialized
97  // according to count of instructions in block.
98  // ~0U if no active IT block.
99  } ITState;
100  bool inITBlock() { return ITState.CurPosition != ~0U;}
101  void forwardITPosition() {
102  if (!inITBlock()) return;
103  // Move to the next instruction in the IT block, if there is one. If not,
104  // mark the block as done.
105  unsigned TZ = countTrailingZeros(ITState.Mask);
106  if (++ITState.CurPosition == 5 - TZ)
107  ITState.CurPosition = ~0U; // Done with the IT block after this.
108  }
109 
110 
111  MCAsmParser &getParser() const { return Parser; }
112  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
113 
114  bool Warning(SMLoc L, const Twine &Msg,
115  ArrayRef<SMRange> Ranges = None) {
116  return Parser.Warning(L, Msg, Ranges);
117  }
118  bool Error(SMLoc L, const Twine &Msg,
119  ArrayRef<SMRange> Ranges = None) {
120  return Parser.Error(L, Msg, Ranges);
121  }
122 
123  int tryParseRegister();
124  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
125  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
126  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
127  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
128  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
129  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
130  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
131  unsigned &ShiftAmount);
132  bool parseDirectiveWord(unsigned Size, SMLoc L);
133  bool parseDirectiveThumb(SMLoc L);
134  bool parseDirectiveARM(SMLoc L);
135  bool parseDirectiveThumbFunc(SMLoc L);
136  bool parseDirectiveCode(SMLoc L);
137  bool parseDirectiveSyntax(SMLoc L);
138  bool parseDirectiveReq(StringRef Name, SMLoc L);
139  bool parseDirectiveUnreq(SMLoc L);
140  bool parseDirectiveArch(SMLoc L);
141  bool parseDirectiveEabiAttr(SMLoc L);
142  bool parseDirectiveCPU(SMLoc L);
143  bool parseDirectiveFPU(SMLoc L);
144  bool parseDirectiveFnStart(SMLoc L);
145  bool parseDirectiveFnEnd(SMLoc L);
146  bool parseDirectiveCantUnwind(SMLoc L);
147  bool parseDirectivePersonality(SMLoc L);
148  bool parseDirectiveHandlerData(SMLoc L);
149  bool parseDirectiveSetFP(SMLoc L);
150  bool parseDirectivePad(SMLoc L);
151  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
152 
153  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
154  bool &CarrySetting, unsigned &ProcessorIMod,
155  StringRef &ITMask);
156  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
157  bool &CanAcceptCarrySet,
158  bool &CanAcceptPredicationCode);
159 
160  bool isThumb() const {
161  // FIXME: Can tablegen auto-generate this?
162  return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
163  }
164  bool isThumbOne() const {
165  return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
166  }
167  bool isThumbTwo() const {
168  return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
169  }
170  bool hasThumb() const {
171  return STI.getFeatureBits() & ARM::HasV4TOps;
172  }
173  bool hasV6Ops() const {
174  return STI.getFeatureBits() & ARM::HasV6Ops;
175  }
176  bool hasV6MOps() const {
177  return STI.getFeatureBits() & ARM::HasV6MOps;
178  }
179  bool hasV7Ops() const {
180  return STI.getFeatureBits() & ARM::HasV7Ops;
181  }
182  bool hasV8Ops() const {
183  return STI.getFeatureBits() & ARM::HasV8Ops;
184  }
185  bool hasARM() const {
186  return !(STI.getFeatureBits() & ARM::FeatureNoARM);
187  }
188 
189  void SwitchMode() {
190  unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
191  setAvailableFeatures(FB);
192  }
193  bool isMClass() const {
194  return STI.getFeatureBits() & ARM::FeatureMClass;
195  }
196 
197  /// @name Auto-generated Match Functions
198  /// {
199 
200 #define GET_ASSEMBLER_HEADER
201 #include "ARMGenAsmMatcher.inc"
202 
203  /// }
204 
205  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
206  OperandMatchResultTy parseCoprocNumOperand(
208  OperandMatchResultTy parseCoprocRegOperand(
210  OperandMatchResultTy parseCoprocOptionOperand(
212  OperandMatchResultTy parseMemBarrierOptOperand(
214  OperandMatchResultTy parseInstSyncBarrierOptOperand(
216  OperandMatchResultTy parseProcIFlagsOperand(
218  OperandMatchResultTy parseMSRMaskOperand(
220  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
221  StringRef Op, int Low, int High);
222  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
223  return parsePKHImm(O, "lsl", 0, 31);
224  }
225  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
226  return parsePKHImm(O, "asr", 1, 32);
227  }
228  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
229  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
230  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
231  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
232  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
233  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
234  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
235  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
236  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
237  SMLoc &EndLoc);
238 
239  // Asm Match Converter Methods
240  void cvtThumbMultiply(MCInst &Inst,
242  void cvtThumbBranches(MCInst &Inst,
244 
245  bool validateInstruction(MCInst &Inst,
247  bool processInstruction(MCInst &Inst,
249  bool shouldOmitCCOutOperand(StringRef Mnemonic,
251  bool shouldOmitPredicateOperand(StringRef Mnemonic,
253 public:
254  enum ARMMatchResultTy {
255  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
256  Match_RequiresNotITBlock,
257  Match_RequiresV6,
258  Match_RequiresThumb2,
259 #define GET_OPERAND_DIAGNOSTIC_TYPES
260 #include "ARMGenAsmMatcher.inc"
261 
262  };
263 
264  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
265  const MCInstrInfo &MII)
266  : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), FPReg(-1) {
268 
269  // Cache the MCRegisterInfo.
270  MRI = getContext().getRegisterInfo();
271 
272  // Initialize the set of available features.
273  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
274 
275  // Not in an ITBlock to start with.
276  ITState.CurPosition = ~0U;
277 
278  NextSymbolIsThumb = false;
279  }
280 
281  // Implementation of the MCTargetAsmParser interface:
282  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
283  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
284  SMLoc NameLoc,
286  bool ParseDirective(AsmToken DirectiveID);
287 
288  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
289  unsigned checkTargetMatchPredicate(MCInst &Inst);
290 
291  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
293  MCStreamer &Out, unsigned &ErrorInfo,
294  bool MatchingInlineAsm);
295  void onLabelParsed(MCSymbol *Symbol);
296 
297 };
298 } // end anonymous namespace
299 
300 namespace {
301 
302 /// ARMOperand - Instances of this class represent a parsed ARM machine
303 /// operand.
304 class ARMOperand : public MCParsedAsmOperand {
305  enum KindTy {
306  k_CondCode,
307  k_CCOut,
308  k_ITCondMask,
309  k_CoprocNum,
310  k_CoprocReg,
311  k_CoprocOption,
312  k_Immediate,
313  k_MemBarrierOpt,
314  k_InstSyncBarrierOpt,
315  k_Memory,
316  k_PostIndexRegister,
317  k_MSRMask,
318  k_ProcIFlags,
319  k_VectorIndex,
320  k_Register,
321  k_RegisterList,
322  k_DPRRegisterList,
323  k_SPRRegisterList,
324  k_VectorList,
325  k_VectorListAllLanes,
326  k_VectorListIndexed,
327  k_ShiftedRegister,
328  k_ShiftedImmediate,
329  k_ShifterImmediate,
330  k_RotateImmediate,
331  k_BitfieldDescriptor,
332  k_Token
333  } Kind;
334 
335  SMLoc StartLoc, EndLoc;
336  SmallVector<unsigned, 8> Registers;
337 
338  struct CCOp {
339  ARMCC::CondCodes Val;
340  };
341 
342  struct CopOp {
343  unsigned Val;
344  };
345 
346  struct CoprocOptionOp {
347  unsigned Val;
348  };
349 
350  struct ITMaskOp {
351  unsigned Mask:4;
352  };
353 
354  struct MBOptOp {
355  ARM_MB::MemBOpt Val;
356  };
357 
358  struct ISBOptOp {
360  };
361 
362  struct IFlagsOp {
363  ARM_PROC::IFlags Val;
364  };
365 
366  struct MMaskOp {
367  unsigned Val;
368  };
369 
370  struct TokOp {
371  const char *Data;
372  unsigned Length;
373  };
374 
375  struct RegOp {
376  unsigned RegNum;
377  };
378 
379  // A vector register list is a sequential list of 1 to 4 registers.
380  struct VectorListOp {
381  unsigned RegNum;
382  unsigned Count;
383  unsigned LaneIndex;
384  bool isDoubleSpaced;
385  };
386 
387  struct VectorIndexOp {
388  unsigned Val;
389  };
390 
391  struct ImmOp {
392  const MCExpr *Val;
393  };
394 
395  /// Combined record for all forms of ARM address expressions.
396  struct MemoryOp {
397  unsigned BaseRegNum;
398  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
399  // was specified.
400  const MCConstantExpr *OffsetImm; // Offset immediate value
401  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
402  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
403  unsigned ShiftImm; // shift for OffsetReg.
404  unsigned Alignment; // 0 = no alignment specified
405  // n = alignment in bytes (2, 4, 8, 16, or 32)
406  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
407  };
408 
409  struct PostIdxRegOp {
410  unsigned RegNum;
411  bool isAdd;
412  ARM_AM::ShiftOpc ShiftTy;
413  unsigned ShiftImm;
414  };
415 
416  struct ShifterImmOp {
417  bool isASR;
418  unsigned Imm;
419  };
420 
421  struct RegShiftedRegOp {
422  ARM_AM::ShiftOpc ShiftTy;
423  unsigned SrcReg;
424  unsigned ShiftReg;
425  unsigned ShiftImm;
426  };
427 
428  struct RegShiftedImmOp {
429  ARM_AM::ShiftOpc ShiftTy;
430  unsigned SrcReg;
431  unsigned ShiftImm;
432  };
433 
434  struct RotImmOp {
435  unsigned Imm;
436  };
437 
438  struct BitfieldOp {
439  unsigned LSB;
440  unsigned Width;
441  };
442 
443  union {
444  struct CCOp CC;
445  struct CopOp Cop;
446  struct CoprocOptionOp CoprocOption;
447  struct MBOptOp MBOpt;
448  struct ISBOptOp ISBOpt;
449  struct ITMaskOp ITMask;
450  struct IFlagsOp IFlags;
451  struct MMaskOp MMask;
452  struct TokOp Tok;
453  struct RegOp Reg;
454  struct VectorListOp VectorList;
455  struct VectorIndexOp VectorIndex;
456  struct ImmOp Imm;
457  struct MemoryOp Memory;
458  struct PostIdxRegOp PostIdxReg;
459  struct ShifterImmOp ShifterImm;
460  struct RegShiftedRegOp RegShiftedReg;
461  struct RegShiftedImmOp RegShiftedImm;
462  struct RotImmOp RotImm;
463  struct BitfieldOp Bitfield;
464  };
465 
466  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
467 public:
468  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
469  Kind = o.Kind;
470  StartLoc = o.StartLoc;
471  EndLoc = o.EndLoc;
472  switch (Kind) {
473  case k_CondCode:
474  CC = o.CC;
475  break;
476  case k_ITCondMask:
477  ITMask = o.ITMask;
478  break;
479  case k_Token:
480  Tok = o.Tok;
481  break;
482  case k_CCOut:
483  case k_Register:
484  Reg = o.Reg;
485  break;
486  case k_RegisterList:
487  case k_DPRRegisterList:
488  case k_SPRRegisterList:
489  Registers = o.Registers;
490  break;
491  case k_VectorList:
492  case k_VectorListAllLanes:
493  case k_VectorListIndexed:
494  VectorList = o.VectorList;
495  break;
496  case k_CoprocNum:
497  case k_CoprocReg:
498  Cop = o.Cop;
499  break;
500  case k_CoprocOption:
501  CoprocOption = o.CoprocOption;
502  break;
503  case k_Immediate:
504  Imm = o.Imm;
505  break;
506  case k_MemBarrierOpt:
507  MBOpt = o.MBOpt;
508  break;
509  case k_InstSyncBarrierOpt:
510  ISBOpt = o.ISBOpt;
511  case k_Memory:
512  Memory = o.Memory;
513  break;
514  case k_PostIndexRegister:
515  PostIdxReg = o.PostIdxReg;
516  break;
517  case k_MSRMask:
518  MMask = o.MMask;
519  break;
520  case k_ProcIFlags:
521  IFlags = o.IFlags;
522  break;
523  case k_ShifterImmediate:
524  ShifterImm = o.ShifterImm;
525  break;
526  case k_ShiftedRegister:
527  RegShiftedReg = o.RegShiftedReg;
528  break;
529  case k_ShiftedImmediate:
530  RegShiftedImm = o.RegShiftedImm;
531  break;
532  case k_RotateImmediate:
533  RotImm = o.RotImm;
534  break;
535  case k_BitfieldDescriptor:
536  Bitfield = o.Bitfield;
537  break;
538  case k_VectorIndex:
539  VectorIndex = o.VectorIndex;
540  break;
541  }
542  }
543 
544  /// getStartLoc - Get the location of the first token of this operand.
545  SMLoc getStartLoc() const { return StartLoc; }
546  /// getEndLoc - Get the location of the last token of this operand.
547  SMLoc getEndLoc() const { return EndLoc; }
548  /// getLocRange - Get the range between the first and last token of this
549  /// operand.
550  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
551 
552  ARMCC::CondCodes getCondCode() const {
553  assert(Kind == k_CondCode && "Invalid access!");
554  return CC.Val;
555  }
556 
557  unsigned getCoproc() const {
558  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
559  return Cop.Val;
560  }
561 
562  StringRef getToken() const {
563  assert(Kind == k_Token && "Invalid access!");
564  return StringRef(Tok.Data, Tok.Length);
565  }
566 
567  unsigned getReg() const {
568  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
569  return Reg.RegNum;
570  }
571 
572  const SmallVectorImpl<unsigned> &getRegList() const {
573  assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
574  Kind == k_SPRRegisterList) && "Invalid access!");
575  return Registers;
576  }
577 
578  const MCExpr *getImm() const {
579  assert(isImm() && "Invalid access!");
580  return Imm.Val;
581  }
582 
583  unsigned getVectorIndex() const {
584  assert(Kind == k_VectorIndex && "Invalid access!");
585  return VectorIndex.Val;
586  }
587 
588  ARM_MB::MemBOpt getMemBarrierOpt() const {
589  assert(Kind == k_MemBarrierOpt && "Invalid access!");
590  return MBOpt.Val;
591  }
592 
593  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
594  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
595  return ISBOpt.Val;
596  }
597 
598  ARM_PROC::IFlags getProcIFlags() const {
599  assert(Kind == k_ProcIFlags && "Invalid access!");
600  return IFlags.Val;
601  }
602 
603  unsigned getMSRMask() const {
604  assert(Kind == k_MSRMask && "Invalid access!");
605  return MMask.Val;
606  }
607 
608  bool isCoprocNum() const { return Kind == k_CoprocNum; }
609  bool isCoprocReg() const { return Kind == k_CoprocReg; }
610  bool isCoprocOption() const { return Kind == k_CoprocOption; }
611  bool isCondCode() const { return Kind == k_CondCode; }
612  bool isCCOut() const { return Kind == k_CCOut; }
613  bool isITMask() const { return Kind == k_ITCondMask; }
614  bool isITCondCode() const { return Kind == k_CondCode; }
615  bool isImm() const { return Kind == k_Immediate; }
616  // checks whether this operand is an unsigned offset which fits is a field
617  // of specified width and scaled by a specific number of bits
618  template<unsigned width, unsigned scale>
619  bool isUnsignedOffset() const {
620  if (!isImm()) return false;
621  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
622  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
623  int64_t Val = CE->getValue();
624  int64_t Align = 1LL << scale;
625  int64_t Max = Align * ((1LL << width) - 1);
626  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
627  }
628  return false;
629  }
630  // checks whether this operand is an signed offset which fits is a field
631  // of specified width and scaled by a specific number of bits
632  template<unsigned width, unsigned scale>
633  bool isSignedOffset() const {
634  if (!isImm()) return false;
635  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
636  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
637  int64_t Val = CE->getValue();
638  int64_t Align = 1LL << scale;
639  int64_t Max = Align * ((1LL << (width-1)) - 1);
640  int64_t Min = -Align * (1LL << (width-1));
641  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
642  }
643  return false;
644  }
645 
646  // checks whether this operand is a memory operand computed as an offset
647  // applied to PC. the offset may have 8 bits of magnitude and is represented
648  // with two bits of shift. textually it may be either [pc, #imm], #imm or
649  // relocable expression...
650  bool isThumbMemPC() const {
651  int64_t Val = 0;
652  if (isImm()) {
653  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
654  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
655  if (!CE) return false;
656  Val = CE->getValue();
657  }
658  else if (isMem()) {
659  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
660  if(Memory.BaseRegNum != ARM::PC) return false;
661  Val = Memory.OffsetImm->getValue();
662  }
663  else return false;
664  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
665  }
666  bool isFPImm() const {
667  if (!isImm()) return false;
668  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669  if (!CE) return false;
670  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
671  return Val != -1;
672  }
673  bool isFBits16() const {
674  if (!isImm()) return false;
675  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676  if (!CE) return false;
677  int64_t Value = CE->getValue();
678  return Value >= 0 && Value <= 16;
679  }
680  bool isFBits32() const {
681  if (!isImm()) return false;
682  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683  if (!CE) return false;
684  int64_t Value = CE->getValue();
685  return Value >= 1 && Value <= 32;
686  }
687  bool isImm8s4() const {
688  if (!isImm()) return false;
689  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690  if (!CE) return false;
691  int64_t Value = CE->getValue();
692  return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
693  }
694  bool isImm0_1020s4() const {
695  if (!isImm()) return false;
696  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697  if (!CE) return false;
698  int64_t Value = CE->getValue();
699  return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
700  }
701  bool isImm0_508s4() const {
702  if (!isImm()) return false;
703  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704  if (!CE) return false;
705  int64_t Value = CE->getValue();
706  return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
707  }
708  bool isImm0_508s4Neg() const {
709  if (!isImm()) return false;
710  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711  if (!CE) return false;
712  int64_t Value = -CE->getValue();
713  // explicitly exclude zero. we want that to use the normal 0_508 version.
714  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
715  }
716  bool isImm0_239() const {
717  if (!isImm()) return false;
718  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
719  if (!CE) return false;
720  int64_t Value = CE->getValue();
721  return Value >= 0 && Value < 240;
722  }
723  bool isImm0_255() const {
724  if (!isImm()) return false;
725  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
726  if (!CE) return false;
727  int64_t Value = CE->getValue();
728  return Value >= 0 && Value < 256;
729  }
730  bool isImm0_4095() const {
731  if (!isImm()) return false;
732  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
733  if (!CE) return false;
734  int64_t Value = CE->getValue();
735  return Value >= 0 && Value < 4096;
736  }
737  bool isImm0_4095Neg() const {
738  if (!isImm()) return false;
739  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740  if (!CE) return false;
741  int64_t Value = -CE->getValue();
742  return Value > 0 && Value < 4096;
743  }
744  bool isImm0_1() const {
745  if (!isImm()) return false;
746  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747  if (!CE) return false;
748  int64_t Value = CE->getValue();
749  return Value >= 0 && Value < 2;
750  }
751  bool isImm0_3() const {
752  if (!isImm()) return false;
753  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754  if (!CE) return false;
755  int64_t Value = CE->getValue();
756  return Value >= 0 && Value < 4;
757  }
758  bool isImm0_7() const {
759  if (!isImm()) return false;
760  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
761  if (!CE) return false;
762  int64_t Value = CE->getValue();
763  return Value >= 0 && Value < 8;
764  }
765  bool isImm0_15() const {
766  if (!isImm()) return false;
767  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
768  if (!CE) return false;
769  int64_t Value = CE->getValue();
770  return Value >= 0 && Value < 16;
771  }
772  bool isImm0_31() const {
773  if (!isImm()) return false;
774  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
775  if (!CE) return false;
776  int64_t Value = CE->getValue();
777  return Value >= 0 && Value < 32;
778  }
779  bool isImm0_63() const {
780  if (!isImm()) return false;
781  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782  if (!CE) return false;
783  int64_t Value = CE->getValue();
784  return Value >= 0 && Value < 64;
785  }
786  bool isImm8() const {
787  if (!isImm()) return false;
788  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
789  if (!CE) return false;
790  int64_t Value = CE->getValue();
791  return Value == 8;
792  }
793  bool isImm16() const {
794  if (!isImm()) return false;
795  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796  if (!CE) return false;
797  int64_t Value = CE->getValue();
798  return Value == 16;
799  }
800  bool isImm32() const {
801  if (!isImm()) return false;
802  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803  if (!CE) return false;
804  int64_t Value = CE->getValue();
805  return Value == 32;
806  }
807  bool isShrImm8() const {
808  if (!isImm()) return false;
809  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810  if (!CE) return false;
811  int64_t Value = CE->getValue();
812  return Value > 0 && Value <= 8;
813  }
814  bool isShrImm16() const {
815  if (!isImm()) return false;
816  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
817  if (!CE) return false;
818  int64_t Value = CE->getValue();
819  return Value > 0 && Value <= 16;
820  }
821  bool isShrImm32() const {
822  if (!isImm()) return false;
823  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
824  if (!CE) return false;
825  int64_t Value = CE->getValue();
826  return Value > 0 && Value <= 32;
827  }
828  bool isShrImm64() const {
829  if (!isImm()) return false;
830  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
831  if (!CE) return false;
832  int64_t Value = CE->getValue();
833  return Value > 0 && Value <= 64;
834  }
835  bool isImm1_7() const {
836  if (!isImm()) return false;
837  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
838  if (!CE) return false;
839  int64_t Value = CE->getValue();
840  return Value > 0 && Value < 8;
841  }
842  bool isImm1_15() const {
843  if (!isImm()) return false;
844  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845  if (!CE) return false;
846  int64_t Value = CE->getValue();
847  return Value > 0 && Value < 16;
848  }
849  bool isImm1_31() const {
850  if (!isImm()) return false;
851  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
852  if (!CE) return false;
853  int64_t Value = CE->getValue();
854  return Value > 0 && Value < 32;
855  }
856  bool isImm1_16() const {
857  if (!isImm()) return false;
858  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
859  if (!CE) return false;
860  int64_t Value = CE->getValue();
861  return Value > 0 && Value < 17;
862  }
863  bool isImm1_32() const {
864  if (!isImm()) return false;
865  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
866  if (!CE) return false;
867  int64_t Value = CE->getValue();
868  return Value > 0 && Value < 33;
869  }
870  bool isImm0_32() const {
871  if (!isImm()) return false;
872  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
873  if (!CE) return false;
874  int64_t Value = CE->getValue();
875  return Value >= 0 && Value < 33;
876  }
877  bool isImm0_65535() const {
878  if (!isImm()) return false;
879  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880  if (!CE) return false;
881  int64_t Value = CE->getValue();
882  return Value >= 0 && Value < 65536;
883  }
884  bool isImm256_65535Expr() const {
885  if (!isImm()) return false;
886  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
887  // If it's not a constant expression, it'll generate a fixup and be
888  // handled later.
889  if (!CE) return true;
890  int64_t Value = CE->getValue();
891  return Value >= 256 && Value < 65536;
892  }
893  bool isImm0_65535Expr() const {
894  if (!isImm()) return false;
895  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
896  // If it's not a constant expression, it'll generate a fixup and be
897  // handled later.
898  if (!CE) return true;
899  int64_t Value = CE->getValue();
900  return Value >= 0 && Value < 65536;
901  }
902  bool isImm24bit() const {
903  if (!isImm()) return false;
904  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
905  if (!CE) return false;
906  int64_t Value = CE->getValue();
907  return Value >= 0 && Value <= 0xffffff;
908  }
909  bool isImmThumbSR() const {
910  if (!isImm()) return false;
911  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
912  if (!CE) return false;
913  int64_t Value = CE->getValue();
914  return Value > 0 && Value < 33;
915  }
916  bool isPKHLSLImm() const {
917  if (!isImm()) return false;
918  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
919  if (!CE) return false;
920  int64_t Value = CE->getValue();
921  return Value >= 0 && Value < 32;
922  }
923  bool isPKHASRImm() const {
924  if (!isImm()) return false;
925  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926  if (!CE) return false;
927  int64_t Value = CE->getValue();
928  return Value > 0 && Value <= 32;
929  }
930  bool isAdrLabel() const {
931  // If we have an immediate that's not a constant, treat it as a label
932  // reference needing a fixup. If it is a constant, but it can't fit
933  // into shift immediate encoding, we reject it.
934  if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
935  else return (isARMSOImm() || isARMSOImmNeg());
936  }
937  bool isARMSOImm() const {
938  if (!isImm()) return false;
939  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
940  if (!CE) return false;
941  int64_t Value = CE->getValue();
942  return ARM_AM::getSOImmVal(Value) != -1;
943  }
944  bool isARMSOImmNot() const {
945  if (!isImm()) return false;
946  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
947  if (!CE) return false;
948  int64_t Value = CE->getValue();
949  return ARM_AM::getSOImmVal(~Value) != -1;
950  }
951  bool isARMSOImmNeg() const {
952  if (!isImm()) return false;
953  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
954  if (!CE) return false;
955  int64_t Value = CE->getValue();
956  // Only use this when not representable as a plain so_imm.
957  return ARM_AM::getSOImmVal(Value) == -1 &&
958  ARM_AM::getSOImmVal(-Value) != -1;
959  }
960  bool isT2SOImm() const {
961  if (!isImm()) return false;
962  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
963  if (!CE) return false;
964  int64_t Value = CE->getValue();
965  return ARM_AM::getT2SOImmVal(Value) != -1;
966  }
967  bool isT2SOImmNot() const {
968  if (!isImm()) return false;
969  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
970  if (!CE) return false;
971  int64_t Value = CE->getValue();
972  return ARM_AM::getT2SOImmVal(Value) == -1 &&
973  ARM_AM::getT2SOImmVal(~Value) != -1;
974  }
975  bool isT2SOImmNeg() const {
976  if (!isImm()) return false;
977  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
978  if (!CE) return false;
979  int64_t Value = CE->getValue();
980  // Only use this when not representable as a plain so_imm.
981  return ARM_AM::getT2SOImmVal(Value) == -1 &&
982  ARM_AM::getT2SOImmVal(-Value) != -1;
983  }
984  bool isSetEndImm() const {
985  if (!isImm()) return false;
986  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
987  if (!CE) return false;
988  int64_t Value = CE->getValue();
989  return Value == 1 || Value == 0;
990  }
991  bool isReg() const { return Kind == k_Register; }
992  bool isRegList() const { return Kind == k_RegisterList; }
993  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
994  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
995  bool isToken() const { return Kind == k_Token; }
996  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
997  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
998  bool isMem() const { return Kind == k_Memory; }
999  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1000  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1001  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1002  bool isRotImm() const { return Kind == k_RotateImmediate; }
1003  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1004  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1005  bool isPostIdxReg() const {
1006  return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1007  }
1008  bool isMemNoOffset(bool alignOK = false) const {
1009  if (!isMem())
1010  return false;
1011  // No offset of any kind.
1012  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
1013  (alignOK || Memory.Alignment == 0);
1014  }
1015  bool isMemPCRelImm12() const {
1016  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1017  return false;
1018  // Base register must be PC.
1019  if (Memory.BaseRegNum != ARM::PC)
1020  return false;
1021  // Immediate offset in range [-4095, 4095].
1022  if (!Memory.OffsetImm) return true;
1023  int64_t Val = Memory.OffsetImm->getValue();
1024  return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1025  }
1026  bool isAlignedMemory() const {
1027  return isMemNoOffset(true);
1028  }
1029  bool isAddrMode2() const {
1030  if (!isMem() || Memory.Alignment != 0) return false;
1031  // Check for register offset.
1032  if (Memory.OffsetRegNum) return true;
1033  // Immediate offset in range [-4095, 4095].
1034  if (!Memory.OffsetImm) return true;
1035  int64_t Val = Memory.OffsetImm->getValue();
1036  return Val > -4096 && Val < 4096;
1037  }
1038  bool isAM2OffsetImm() const {
1039  if (!isImm()) return false;
1040  // Immediate offset in range [-4095, 4095].
1041  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1042  if (!CE) return false;
1043  int64_t Val = CE->getValue();
1044  return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1045  }
1046  bool isAddrMode3() const {
1047  // If we have an immediate that's not a constant, treat it as a label
1048  // reference needing a fixup. If it is a constant, it's something else
1049  // and we reject it.
1050  if (isImm() && !isa<MCConstantExpr>(getImm()))
1051  return true;
1052  if (!isMem() || Memory.Alignment != 0) return false;
1053  // No shifts are legal for AM3.
1054  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1055  // Check for register offset.
1056  if (Memory.OffsetRegNum) return true;
1057  // Immediate offset in range [-255, 255].
1058  if (!Memory.OffsetImm) return true;
1059  int64_t Val = Memory.OffsetImm->getValue();
1060  // The #-0 offset is encoded as INT32_MIN, and we have to check
1061  // for this too.
1062  return (Val > -256 && Val < 256) || Val == INT32_MIN;
1063  }
1064  bool isAM3Offset() const {
1065  if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1066  return false;
1067  if (Kind == k_PostIndexRegister)
1068  return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1069  // Immediate offset in range [-255, 255].
1070  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1071  if (!CE) return false;
1072  int64_t Val = CE->getValue();
1073  // Special case, #-0 is INT32_MIN.
1074  return (Val > -256 && Val < 256) || Val == INT32_MIN;
1075  }
1076  bool isAddrMode5() const {
1077  // If we have an immediate that's not a constant, treat it as a label
1078  // reference needing a fixup. If it is a constant, it's something else
1079  // and we reject it.
1080  if (isImm() && !isa<MCConstantExpr>(getImm()))
1081  return true;
1082  if (!isMem() || Memory.Alignment != 0) return false;
1083  // Check for register offset.
1084  if (Memory.OffsetRegNum) return false;
1085  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1086  if (!Memory.OffsetImm) return true;
1087  int64_t Val = Memory.OffsetImm->getValue();
1088  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1089  Val == INT32_MIN;
1090  }
1091  bool isMemTBB() const {
1092  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1093  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1094  return false;
1095  return true;
1096  }
1097  bool isMemTBH() const {
1098  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1099  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1100  Memory.Alignment != 0 )
1101  return false;
1102  return true;
1103  }
1104  bool isMemRegOffset() const {
1105  if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1106  return false;
1107  return true;
1108  }
1109  bool isT2MemRegOffset() const {
1110  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1111  Memory.Alignment != 0)
1112  return false;
1113  // Only lsl #{0, 1, 2, 3} allowed.
1114  if (Memory.ShiftType == ARM_AM::no_shift)
1115  return true;
1116  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1117  return false;
1118  return true;
1119  }
1120  bool isMemThumbRR() const {
1121  // Thumb reg+reg addressing is simple. Just two registers, a base and
1122  // an offset. No shifts, negations or any other complicating factors.
1123  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1124  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1125  return false;
1126  return isARMLowRegister(Memory.BaseRegNum) &&
1127  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1128  }
1129  bool isMemThumbRIs4() const {
1130  if (!isMem() || Memory.OffsetRegNum != 0 ||
1131  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1132  return false;
1133  // Immediate offset, multiple of 4 in range [0, 124].
1134  if (!Memory.OffsetImm) return true;
1135  int64_t Val = Memory.OffsetImm->getValue();
1136  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1137  }
1138  bool isMemThumbRIs2() const {
1139  if (!isMem() || Memory.OffsetRegNum != 0 ||
1140  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1141  return false;
1142  // Immediate offset, multiple of 4 in range [0, 62].
1143  if (!Memory.OffsetImm) return true;
1144  int64_t Val = Memory.OffsetImm->getValue();
1145  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1146  }
1147  bool isMemThumbRIs1() const {
1148  if (!isMem() || Memory.OffsetRegNum != 0 ||
1149  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1150  return false;
1151  // Immediate offset in range [0, 31].
1152  if (!Memory.OffsetImm) return true;
1153  int64_t Val = Memory.OffsetImm->getValue();
1154  return Val >= 0 && Val <= 31;
1155  }
1156  bool isMemThumbSPI() const {
1157  if (!isMem() || Memory.OffsetRegNum != 0 ||
1158  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1159  return false;
1160  // Immediate offset, multiple of 4 in range [0, 1020].
1161  if (!Memory.OffsetImm) return true;
1162  int64_t Val = Memory.OffsetImm->getValue();
1163  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1164  }
1165  bool isMemImm8s4Offset() const {
1166  // If we have an immediate that's not a constant, treat it as a label
1167  // reference needing a fixup. If it is a constant, it's something else
1168  // and we reject it.
1169  if (isImm() && !isa<MCConstantExpr>(getImm()))
1170  return true;
1171  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1172  return false;
1173  // Immediate offset a multiple of 4 in range [-1020, 1020].
1174  if (!Memory.OffsetImm) return true;
1175  int64_t Val = Memory.OffsetImm->getValue();
1176  // Special case, #-0 is INT32_MIN.
1177  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1178  }
1179  bool isMemImm0_1020s4Offset() const {
1180  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1181  return false;
1182  // Immediate offset a multiple of 4 in range [0, 1020].
1183  if (!Memory.OffsetImm) return true;
1184  int64_t Val = Memory.OffsetImm->getValue();
1185  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1186  }
1187  bool isMemImm8Offset() const {
1188  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1189  return false;
1190  // Base reg of PC isn't allowed for these encodings.
1191  if (Memory.BaseRegNum == ARM::PC) return false;
1192  // Immediate offset in range [-255, 255].
1193  if (!Memory.OffsetImm) return true;
1194  int64_t Val = Memory.OffsetImm->getValue();
1195  return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1196  }
1197  bool isMemPosImm8Offset() const {
1198  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1199  return false;
1200  // Immediate offset in range [0, 255].
1201  if (!Memory.OffsetImm) return true;
1202  int64_t Val = Memory.OffsetImm->getValue();
1203  return Val >= 0 && Val < 256;
1204  }
1205  bool isMemNegImm8Offset() const {
1206  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1207  return false;
1208  // Base reg of PC isn't allowed for these encodings.
1209  if (Memory.BaseRegNum == ARM::PC) return false;
1210  // Immediate offset in range [-255, -1].
1211  if (!Memory.OffsetImm) return false;
1212  int64_t Val = Memory.OffsetImm->getValue();
1213  return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1214  }
1215  bool isMemUImm12Offset() const {
1216  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1217  return false;
1218  // Immediate offset in range [0, 4095].
1219  if (!Memory.OffsetImm) return true;
1220  int64_t Val = Memory.OffsetImm->getValue();
1221  return (Val >= 0 && Val < 4096);
1222  }
1223  bool isMemImm12Offset() const {
1224  // If we have an immediate that's not a constant, treat it as a label
1225  // reference needing a fixup. If it is a constant, it's something else
1226  // and we reject it.
1227  if (isImm() && !isa<MCConstantExpr>(getImm()))
1228  return true;
1229 
1230  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1231  return false;
1232  // Immediate offset in range [-4095, 4095].
1233  if (!Memory.OffsetImm) return true;
1234  int64_t Val = Memory.OffsetImm->getValue();
1235  return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1236  }
1237  bool isPostIdxImm8() const {
1238  if (!isImm()) return false;
1239  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1240  if (!CE) return false;
1241  int64_t Val = CE->getValue();
1242  return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1243  }
1244  bool isPostIdxImm8s4() const {
1245  if (!isImm()) return false;
1246  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1247  if (!CE) return false;
1248  int64_t Val = CE->getValue();
1249  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1250  (Val == INT32_MIN);
1251  }
1252 
1253  bool isMSRMask() const { return Kind == k_MSRMask; }
1254  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1255 
1256  // NEON operands.
1257  bool isSingleSpacedVectorList() const {
1258  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1259  }
1260  bool isDoubleSpacedVectorList() const {
1261  return Kind == k_VectorList && VectorList.isDoubleSpaced;
1262  }
1263  bool isVecListOneD() const {
1264  if (!isSingleSpacedVectorList()) return false;
1265  return VectorList.Count == 1;
1266  }
1267 
1268  bool isVecListDPair() const {
1269  if (!isSingleSpacedVectorList()) return false;
1270  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1271  .contains(VectorList.RegNum));
1272  }
1273 
1274  bool isVecListThreeD() const {
1275  if (!isSingleSpacedVectorList()) return false;
1276  return VectorList.Count == 3;
1277  }
1278 
1279  bool isVecListFourD() const {
1280  if (!isSingleSpacedVectorList()) return false;
1281  return VectorList.Count == 4;
1282  }
1283 
1284  bool isVecListDPairSpaced() const {
1285  if (isSingleSpacedVectorList()) return false;
1286  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1287  .contains(VectorList.RegNum));
1288  }
1289 
1290  bool isVecListThreeQ() const {
1291  if (!isDoubleSpacedVectorList()) return false;
1292  return VectorList.Count == 3;
1293  }
1294 
1295  bool isVecListFourQ() const {
1296  if (!isDoubleSpacedVectorList()) return false;
1297  return VectorList.Count == 4;
1298  }
1299 
1300  bool isSingleSpacedVectorAllLanes() const {
1301  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1302  }
1303  bool isDoubleSpacedVectorAllLanes() const {
1304  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1305  }
1306  bool isVecListOneDAllLanes() const {
1307  if (!isSingleSpacedVectorAllLanes()) return false;
1308  return VectorList.Count == 1;
1309  }
1310 
1311  bool isVecListDPairAllLanes() const {
1312  if (!isSingleSpacedVectorAllLanes()) return false;
1313  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1314  .contains(VectorList.RegNum));
1315  }
1316 
1317  bool isVecListDPairSpacedAllLanes() const {
1318  if (!isDoubleSpacedVectorAllLanes()) return false;
1319  return VectorList.Count == 2;
1320  }
1321 
1322  bool isVecListThreeDAllLanes() const {
1323  if (!isSingleSpacedVectorAllLanes()) return false;
1324  return VectorList.Count == 3;
1325  }
1326 
1327  bool isVecListThreeQAllLanes() const {
1328  if (!isDoubleSpacedVectorAllLanes()) return false;
1329  return VectorList.Count == 3;
1330  }
1331 
1332  bool isVecListFourDAllLanes() const {
1333  if (!isSingleSpacedVectorAllLanes()) return false;
1334  return VectorList.Count == 4;
1335  }
1336 
1337  bool isVecListFourQAllLanes() const {
1338  if (!isDoubleSpacedVectorAllLanes()) return false;
1339  return VectorList.Count == 4;
1340  }
1341 
1342  bool isSingleSpacedVectorIndexed() const {
1343  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1344  }
1345  bool isDoubleSpacedVectorIndexed() const {
1346  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1347  }
1348  bool isVecListOneDByteIndexed() const {
1349  if (!isSingleSpacedVectorIndexed()) return false;
1350  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1351  }
1352 
1353  bool isVecListOneDHWordIndexed() const {
1354  if (!isSingleSpacedVectorIndexed()) return false;
1355  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1356  }
1357 
1358  bool isVecListOneDWordIndexed() const {
1359  if (!isSingleSpacedVectorIndexed()) return false;
1360  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1361  }
1362 
1363  bool isVecListTwoDByteIndexed() const {
1364  if (!isSingleSpacedVectorIndexed()) return false;
1365  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1366  }
1367 
1368  bool isVecListTwoDHWordIndexed() const {
1369  if (!isSingleSpacedVectorIndexed()) return false;
1370  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1371  }
1372 
1373  bool isVecListTwoQWordIndexed() const {
1374  if (!isDoubleSpacedVectorIndexed()) return false;
1375  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1376  }
1377 
1378  bool isVecListTwoQHWordIndexed() const {
1379  if (!isDoubleSpacedVectorIndexed()) return false;
1380  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1381  }
1382 
1383  bool isVecListTwoDWordIndexed() const {
1384  if (!isSingleSpacedVectorIndexed()) return false;
1385  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1386  }
1387 
1388  bool isVecListThreeDByteIndexed() const {
1389  if (!isSingleSpacedVectorIndexed()) return false;
1390  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1391  }
1392 
1393  bool isVecListThreeDHWordIndexed() const {
1394  if (!isSingleSpacedVectorIndexed()) return false;
1395  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1396  }
1397 
1398  bool isVecListThreeQWordIndexed() const {
1399  if (!isDoubleSpacedVectorIndexed()) return false;
1400  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1401  }
1402 
1403  bool isVecListThreeQHWordIndexed() const {
1404  if (!isDoubleSpacedVectorIndexed()) return false;
1405  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1406  }
1407 
1408  bool isVecListThreeDWordIndexed() const {
1409  if (!isSingleSpacedVectorIndexed()) return false;
1410  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1411  }
1412 
1413  bool isVecListFourDByteIndexed() const {
1414  if (!isSingleSpacedVectorIndexed()) return false;
1415  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1416  }
1417 
1418  bool isVecListFourDHWordIndexed() const {
1419  if (!isSingleSpacedVectorIndexed()) return false;
1420  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1421  }
1422 
1423  bool isVecListFourQWordIndexed() const {
1424  if (!isDoubleSpacedVectorIndexed()) return false;
1425  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1426  }
1427 
1428  bool isVecListFourQHWordIndexed() const {
1429  if (!isDoubleSpacedVectorIndexed()) return false;
1430  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1431  }
1432 
1433  bool isVecListFourDWordIndexed() const {
1434  if (!isSingleSpacedVectorIndexed()) return false;
1435  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1436  }
1437 
1438  bool isVectorIndex8() const {
1439  if (Kind != k_VectorIndex) return false;
1440  return VectorIndex.Val < 8;
1441  }
1442  bool isVectorIndex16() const {
1443  if (Kind != k_VectorIndex) return false;
1444  return VectorIndex.Val < 4;
1445  }
1446  bool isVectorIndex32() const {
1447  if (Kind != k_VectorIndex) return false;
1448  return VectorIndex.Val < 2;
1449  }
1450 
1451  bool isNEONi8splat() const {
1452  if (!isImm()) return false;
1453  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1454  // Must be a constant.
1455  if (!CE) return false;
1456  int64_t Value = CE->getValue();
1457  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1458  // value.
1459  return Value >= 0 && Value < 256;
1460  }
1461 
1462  bool isNEONi16splat() const {
1463  if (!isImm()) return false;
1464  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1465  // Must be a constant.
1466  if (!CE) return false;
1467  int64_t Value = CE->getValue();
1468  // i16 value in the range [0,255] or [0x0100, 0xff00]
1469  return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1470  }
1471 
1472  bool isNEONi32splat() const {
1473  if (!isImm()) return false;
1474  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1475  // Must be a constant.
1476  if (!CE) return false;
1477  int64_t Value = CE->getValue();
1478  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1479  return (Value >= 0 && Value < 256) ||
1480  (Value >= 0x0100 && Value <= 0xff00) ||
1481  (Value >= 0x010000 && Value <= 0xff0000) ||
1482  (Value >= 0x01000000 && Value <= 0xff000000);
1483  }
1484 
1485  bool isNEONi32vmov() const {
1486  if (!isImm()) return false;
1487  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1488  // Must be a constant.
1489  if (!CE) return false;
1490  int64_t Value = CE->getValue();
1491  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1492  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1493  return (Value >= 0 && Value < 256) ||
1494  (Value >= 0x0100 && Value <= 0xff00) ||
1495  (Value >= 0x010000 && Value <= 0xff0000) ||
1496  (Value >= 0x01000000 && Value <= 0xff000000) ||
1497  (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1498  (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1499  }
1500  bool isNEONi32vmovNeg() const {
1501  if (!isImm()) return false;
1502  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1503  // Must be a constant.
1504  if (!CE) return false;
1505  int64_t Value = ~CE->getValue();
1506  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1507  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1508  return (Value >= 0 && Value < 256) ||
1509  (Value >= 0x0100 && Value <= 0xff00) ||
1510  (Value >= 0x010000 && Value <= 0xff0000) ||
1511  (Value >= 0x01000000 && Value <= 0xff000000) ||
1512  (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1513  (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1514  }
1515 
1516  bool isNEONi64splat() const {
1517  if (!isImm()) return false;
1518  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1519  // Must be a constant.
1520  if (!CE) return false;
1521  uint64_t Value = CE->getValue();
1522  // i64 value with each byte being either 0 or 0xff.
1523  for (unsigned i = 0; i < 8; ++i)
1524  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1525  return true;
1526  }
1527 
1528  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1529  // Add as immediates when possible. Null MCExpr = 0.
1530  if (Expr == 0)
1532  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1534  else
1535  Inst.addOperand(MCOperand::CreateExpr(Expr));
1536  }
1537 
1538  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1539  assert(N == 2 && "Invalid number of operands!");
1540  Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1541  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1542  Inst.addOperand(MCOperand::CreateReg(RegNum));
1543  }
1544 
1545  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1546  assert(N == 1 && "Invalid number of operands!");
1547  Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1548  }
1549 
1550  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1551  assert(N == 1 && "Invalid number of operands!");
1552  Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1553  }
1554 
1555  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1556  assert(N == 1 && "Invalid number of operands!");
1557  Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1558  }
1559 
1560  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1561  assert(N == 1 && "Invalid number of operands!");
1562  Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1563  }
1564 
1565  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1566  assert(N == 1 && "Invalid number of operands!");
1567  Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1568  }
1569 
1570  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1571  assert(N == 1 && "Invalid number of operands!");
1573  }
1574 
1575  void addRegOperands(MCInst &Inst, unsigned N) const {
1576  assert(N == 1 && "Invalid number of operands!");
1578  }
1579 
1580  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1581  assert(N == 3 && "Invalid number of operands!");
1582  assert(isRegShiftedReg() &&
1583  "addRegShiftedRegOperands() on non RegShiftedReg!");
1584  Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1585  Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1587  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1588  }
1589 
1590  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1591  assert(N == 2 && "Invalid number of operands!");
1592  assert(isRegShiftedImm() &&
1593  "addRegShiftedImmOperands() on non RegShiftedImm!");
1594  Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1595  // Shift of #32 is encoded as 0 where permitted
1596  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1598  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1599  }
1600 
1601  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1602  assert(N == 1 && "Invalid number of operands!");
1603  Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1604  ShifterImm.Imm));
1605  }
1606 
1607  void addRegListOperands(MCInst &Inst, unsigned N) const {
1608  assert(N == 1 && "Invalid number of operands!");
1609  const SmallVectorImpl<unsigned> &RegList = getRegList();
1611  I = RegList.begin(), E = RegList.end(); I != E; ++I)
1613  }
1614 
1615  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1616  addRegListOperands(Inst, N);
1617  }
1618 
1619  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1620  addRegListOperands(Inst, N);
1621  }
1622 
1623  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1624  assert(N == 1 && "Invalid number of operands!");
1625  // Encoded as val>>3. The printer handles display as 8, 16, 24.
1626  Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1627  }
1628 
1629  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1630  assert(N == 1 && "Invalid number of operands!");
1631  // Munge the lsb/width into a bitfield mask.
1632  unsigned lsb = Bitfield.LSB;
1633  unsigned width = Bitfield.Width;
1634  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1635  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1636  (32 - (lsb + width)));
1637  Inst.addOperand(MCOperand::CreateImm(Mask));
1638  }
1639 
1640  void addImmOperands(MCInst &Inst, unsigned N) const {
1641  assert(N == 1 && "Invalid number of operands!");
1642  addExpr(Inst, getImm());
1643  }
1644 
1645  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1646  assert(N == 1 && "Invalid number of operands!");
1647  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1648  Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1649  }
1650 
1651  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1652  assert(N == 1 && "Invalid number of operands!");
1653  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1654  Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1655  }
1656 
1657  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1658  assert(N == 1 && "Invalid number of operands!");
1659  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1660  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1661  Inst.addOperand(MCOperand::CreateImm(Val));
1662  }
1663 
1664  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1665  assert(N == 1 && "Invalid number of operands!");
1666  // FIXME: We really want to scale the value here, but the LDRD/STRD
1667  // instruction don't encode operands that way yet.
1668  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1670  }
1671 
1672  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1673  assert(N == 1 && "Invalid number of operands!");
1674  // The immediate is scaled by four in the encoding and is stored
1675  // in the MCInst as such. Lop off the low two bits here.
1676  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1677  Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1678  }
1679 
1680  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1681  assert(N == 1 && "Invalid number of operands!");
1682  // The immediate is scaled by four in the encoding and is stored
1683  // in the MCInst as such. Lop off the low two bits here.
1684  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1685  Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1686  }
1687 
1688  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1689  assert(N == 1 && "Invalid number of operands!");
1690  // The immediate is scaled by four in the encoding and is stored
1691  // in the MCInst as such. Lop off the low two bits here.
1692  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1693  Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1694  }
1695 
1696  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1697  assert(N == 1 && "Invalid number of operands!");
1698  // The constant encodes as the immediate-1, and we store in the instruction
1699  // the bits as encoded, so subtract off one here.
1700  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1701  Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1702  }
1703 
1704  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1705  assert(N == 1 && "Invalid number of operands!");
1706  // The constant encodes as the immediate-1, and we store in the instruction
1707  // the bits as encoded, so subtract off one here.
1708  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1709  Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1710  }
1711 
1712  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1713  assert(N == 1 && "Invalid number of operands!");
1714  // The constant encodes as the immediate, except for 32, which encodes as
1715  // zero.
1716  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1717  unsigned Imm = CE->getValue();
1718  Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1719  }
1720 
1721  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1722  assert(N == 1 && "Invalid number of operands!");
1723  // An ASR value of 32 encodes as 0, so that's how we want to add it to
1724  // the instruction as well.
1725  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1726  int Val = CE->getValue();
1727  Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1728  }
1729 
1730  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1731  assert(N == 1 && "Invalid number of operands!");
1732  // The operand is actually a t2_so_imm, but we have its bitwise
1733  // negation in the assembly source, so twiddle it here.
1734  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1736  }
1737 
1738  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1739  assert(N == 1 && "Invalid number of operands!");
1740  // The operand is actually a t2_so_imm, but we have its
1741  // negation in the assembly source, so twiddle it here.
1742  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1744  }
1745 
1746  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1747  assert(N == 1 && "Invalid number of operands!");
1748  // The operand is actually an imm0_4095, but we have its
1749  // negation in the assembly source, so twiddle it here.
1750  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1752  }
1753 
1754  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
1755  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1756  Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
1757  return;
1758  }
1759 
1760  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1761  assert(SR && "Unknown value type!");
1763  }
1764 
1765  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
1766  assert(N == 1 && "Invalid number of operands!");
1767  if (isImm()) {
1768  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1769  if (CE) {
1771  return;
1772  }
1773 
1774  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1775  assert(SR && "Unknown value type!");
1777  return;
1778  }
1779 
1780  assert(isMem() && "Unknown value type!");
1781  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
1782  Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
1783  }
1784 
1785  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1786  assert(N == 1 && "Invalid number of operands!");
1787  // The operand is actually a so_imm, but we have its bitwise
1788  // negation in the assembly source, so twiddle it here.
1789  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1791  }
1792 
1793  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1794  assert(N == 1 && "Invalid number of operands!");
1795  // The operand is actually a so_imm, but we have its
1796  // negation in the assembly source, so twiddle it here.
1797  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1799  }
1800 
1801  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1802  assert(N == 1 && "Invalid number of operands!");
1803  Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1804  }
1805 
1806  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
1807  assert(N == 1 && "Invalid number of operands!");
1808  Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
1809  }
1810 
1811  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1812  assert(N == 1 && "Invalid number of operands!");
1813  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1814  }
1815 
1816  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1817  assert(N == 1 && "Invalid number of operands!");
1818  int32_t Imm = Memory.OffsetImm->getValue();
1819  Inst.addOperand(MCOperand::CreateImm(Imm));
1820  }
1821 
1822  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1823  assert(N == 1 && "Invalid number of operands!");
1824  assert(isImm() && "Not an immediate!");
1825 
1826  // If we have an immediate that's not a constant, treat it as a label
1827  // reference needing a fixup.
1828  if (!isa<MCConstantExpr>(getImm())) {
1829  Inst.addOperand(MCOperand::CreateExpr(getImm()));
1830  return;
1831  }
1832 
1833  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834  int Val = CE->getValue();
1835  Inst.addOperand(MCOperand::CreateImm(Val));
1836  }
1837 
1838  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1839  assert(N == 2 && "Invalid number of operands!");
1840  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1841  Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1842  }
1843 
1844  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1845  assert(N == 3 && "Invalid number of operands!");
1846  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1847  if (!Memory.OffsetRegNum) {
1848  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1849  // Special case for #-0
1850  if (Val == INT32_MIN) Val = 0;
1851  if (Val < 0) Val = -Val;
1852  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1853  } else {
1854  // For register offset, we encode the shift type and negation flag
1855  // here.
1856  Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1857  Memory.ShiftImm, Memory.ShiftType);
1858  }
1859  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1860  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1861  Inst.addOperand(MCOperand::CreateImm(Val));
1862  }
1863 
1864  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1865  assert(N == 2 && "Invalid number of operands!");
1866  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1867  assert(CE && "non-constant AM2OffsetImm operand!");
1868  int32_t Val = CE->getValue();
1869  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1870  // Special case for #-0
1871  if (Val == INT32_MIN) Val = 0;
1872  if (Val < 0) Val = -Val;
1873  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1875  Inst.addOperand(MCOperand::CreateImm(Val));
1876  }
1877 
1878  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1879  assert(N == 3 && "Invalid number of operands!");
1880  // If we have an immediate that's not a constant, treat it as a label
1881  // reference needing a fixup. If it is a constant, it's something else
1882  // and we reject it.
1883  if (isImm()) {
1884  Inst.addOperand(MCOperand::CreateExpr(getImm()));
1887  return;
1888  }
1889 
1890  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1891  if (!Memory.OffsetRegNum) {
1892  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1893  // Special case for #-0
1894  if (Val == INT32_MIN) Val = 0;
1895  if (Val < 0) Val = -Val;
1896  Val = ARM_AM::getAM3Opc(AddSub, Val);
1897  } else {
1898  // For register offset, we encode the shift type and negation flag
1899  // here.
1900  Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1901  }
1902  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1903  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1904  Inst.addOperand(MCOperand::CreateImm(Val));
1905  }
1906 
1907  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1908  assert(N == 2 && "Invalid number of operands!");
1909  if (Kind == k_PostIndexRegister) {
1910  int32_t Val =
1911  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1912  Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1913  Inst.addOperand(MCOperand::CreateImm(Val));
1914  return;
1915  }
1916 
1917  // Constant offset.
1918  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1919  int32_t Val = CE->getValue();
1920  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1921  // Special case for #-0
1922  if (Val == INT32_MIN) Val = 0;
1923  if (Val < 0) Val = -Val;
1924  Val = ARM_AM::getAM3Opc(AddSub, Val);
1926  Inst.addOperand(MCOperand::CreateImm(Val));
1927  }
1928 
1929  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1930  assert(N == 2 && "Invalid number of operands!");
1931  // If we have an immediate that's not a constant, treat it as a label
1932  // reference needing a fixup. If it is a constant, it's something else
1933  // and we reject it.
1934  if (isImm()) {
1935  Inst.addOperand(MCOperand::CreateExpr(getImm()));
1937  return;
1938  }
1939 
1940  // The lower two bits are always zero and as such are not encoded.
1941  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1942  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1943  // Special case for #-0
1944  if (Val == INT32_MIN) Val = 0;
1945  if (Val < 0) Val = -Val;
1946  Val = ARM_AM::getAM5Opc(AddSub, Val);
1947  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1948  Inst.addOperand(MCOperand::CreateImm(Val));
1949  }
1950 
1951  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1952  assert(N == 2 && "Invalid number of operands!");
1953  // If we have an immediate that's not a constant, treat it as a label
1954  // reference needing a fixup. If it is a constant, it's something else
1955  // and we reject it.
1956  if (isImm()) {
1957  Inst.addOperand(MCOperand::CreateExpr(getImm()));
1959  return;
1960  }
1961 
1962  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1963  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1964  Inst.addOperand(MCOperand::CreateImm(Val));
1965  }
1966 
1967  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1968  assert(N == 2 && "Invalid number of operands!");
1969  // The lower two bits are always zero and as such are not encoded.
1970  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1971  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1972  Inst.addOperand(MCOperand::CreateImm(Val));
1973  }
1974 
1975  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1976  assert(N == 2 && "Invalid number of operands!");
1977  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1978  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1979  Inst.addOperand(MCOperand::CreateImm(Val));
1980  }
1981 
1982  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1983  addMemImm8OffsetOperands(Inst, N);
1984  }
1985 
1986  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1987  addMemImm8OffsetOperands(Inst, N);
1988  }
1989 
1990  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1991  assert(N == 2 && "Invalid number of operands!");
1992  // If this is an immediate, it's a label reference.
1993  if (isImm()) {
1994  addExpr(Inst, getImm());
1996  return;
1997  }
1998 
1999  // Otherwise, it's a normal memory reg+offset.
2000  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2001  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2002  Inst.addOperand(MCOperand::CreateImm(Val));
2003  }
2004 
2005  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2006  assert(N == 2 && "Invalid number of operands!");
2007  // If this is an immediate, it's a label reference.
2008  if (isImm()) {
2009  addExpr(Inst, getImm());
2011  return;
2012  }
2013 
2014  // Otherwise, it's a normal memory reg+offset.
2015  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2016  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2017  Inst.addOperand(MCOperand::CreateImm(Val));
2018  }
2019 
2020  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2021  assert(N == 2 && "Invalid number of operands!");
2022  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2023  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2024  }
2025 
2026  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2027  assert(N == 2 && "Invalid number of operands!");
2028  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2029  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2030  }
2031 
2032  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2033  assert(N == 3 && "Invalid number of operands!");
2034  unsigned Val =
2036  Memory.ShiftImm, Memory.ShiftType);
2037  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2038  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2039  Inst.addOperand(MCOperand::CreateImm(Val));
2040  }
2041 
2042  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2043  assert(N == 3 && "Invalid number of operands!");
2044  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2045  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2046  Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
2047  }
2048 
2049  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2050  assert(N == 2 && "Invalid number of operands!");
2051  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2052  Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2053  }
2054 
2055  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2056  assert(N == 2 && "Invalid number of operands!");
2057  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2058  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2059  Inst.addOperand(MCOperand::CreateImm(Val));
2060  }
2061 
2062  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2063  assert(N == 2 && "Invalid number of operands!");
2064  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2065  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2066  Inst.addOperand(MCOperand::CreateImm(Val));
2067  }
2068 
2069  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2070  assert(N == 2 && "Invalid number of operands!");
2071  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2072  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2073  Inst.addOperand(MCOperand::CreateImm(Val));
2074  }
2075 
2076  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2077  assert(N == 2 && "Invalid number of operands!");
2078  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2079  Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2080  Inst.addOperand(MCOperand::CreateImm(Val));
2081  }
2082 
2083  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2084  assert(N == 1 && "Invalid number of operands!");
2085  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2086  assert(CE && "non-constant post-idx-imm8 operand!");
2087  int Imm = CE->getValue();
2088  bool isAdd = Imm >= 0;
2089  if (Imm == INT32_MIN) Imm = 0;
2090  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2091  Inst.addOperand(MCOperand::CreateImm(Imm));
2092  }
2093 
2094  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2095  assert(N == 1 && "Invalid number of operands!");
2096  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2097  assert(CE && "non-constant post-idx-imm8s4 operand!");
2098  int Imm = CE->getValue();
2099  bool isAdd = Imm >= 0;
2100  if (Imm == INT32_MIN) Imm = 0;
2101  // Immediate is scaled by 4.
2102  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2103  Inst.addOperand(MCOperand::CreateImm(Imm));
2104  }
2105 
2106  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2107  assert(N == 2 && "Invalid number of operands!");
2108  Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2109  Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2110  }
2111 
2112  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2113  assert(N == 2 && "Invalid number of operands!");
2114  Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2115  // The sign, shift type, and shift amount are encoded in a single operand
2116  // using the AM2 encoding helpers.
2117  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2118  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2119  PostIdxReg.ShiftTy);
2120  Inst.addOperand(MCOperand::CreateImm(Imm));
2121  }
2122 
2123  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2124  assert(N == 1 && "Invalid number of operands!");
2125  Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2126  }
2127 
2128  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2129  assert(N == 1 && "Invalid number of operands!");
2130  Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2131  }
2132 
2133  void addVecListOperands(MCInst &Inst, unsigned N) const {
2134  assert(N == 1 && "Invalid number of operands!");
2135  Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2136  }
2137 
2138  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2139  assert(N == 2 && "Invalid number of operands!");
2140  Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2141  Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2142  }
2143 
2144  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2145  assert(N == 1 && "Invalid number of operands!");
2146  Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2147  }
2148 
2149  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2150  assert(N == 1 && "Invalid number of operands!");
2151  Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2152  }
2153 
2154  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2155  assert(N == 1 && "Invalid number of operands!");
2156  Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2157  }
2158 
2159  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2160  assert(N == 1 && "Invalid number of operands!");
2161  // The immediate encodes the type of constant as well as the value.
2162  // Mask in that this is an i8 splat.
2163  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2164  Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2165  }
2166 
2167  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2168  assert(N == 1 && "Invalid number of operands!");
2169  // The immediate encodes the type of constant as well as the value.
2170  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2171  unsigned Value = CE->getValue();
2172  if (Value >= 256)
2173  Value = (Value >> 8) | 0xa00;
2174  else
2175  Value |= 0x800;
2176  Inst.addOperand(MCOperand::CreateImm(Value));
2177  }
2178 
2179  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2180  assert(N == 1 && "Invalid number of operands!");
2181  // The immediate encodes the type of constant as well as the value.
2182  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2183  unsigned Value = CE->getValue();
2184  if (Value >= 256 && Value <= 0xff00)
2185  Value = (Value >> 8) | 0x200;
2186  else if (Value > 0xffff && Value <= 0xff0000)
2187  Value = (Value >> 16) | 0x400;
2188  else if (Value > 0xffffff)
2189  Value = (Value >> 24) | 0x600;
2190  Inst.addOperand(MCOperand::CreateImm(Value));
2191  }
2192 
2193  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2194  assert(N == 1 && "Invalid number of operands!");
2195  // The immediate encodes the type of constant as well as the value.
2196  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2197  unsigned Value = CE->getValue();
2198  if (Value >= 256 && Value <= 0xffff)
2199  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2200  else if (Value > 0xffff && Value <= 0xffffff)
2201  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2202  else if (Value > 0xffffff)
2203  Value = (Value >> 24) | 0x600;
2204  Inst.addOperand(MCOperand::CreateImm(Value));
2205  }
2206 
2207  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2208  assert(N == 1 && "Invalid number of operands!");
2209  // The immediate encodes the type of constant as well as the value.
2210  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2211  unsigned Value = ~CE->getValue();
2212  if (Value >= 256 && Value <= 0xffff)
2213  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2214  else if (Value > 0xffff && Value <= 0xffffff)
2215  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2216  else if (Value > 0xffffff)
2217  Value = (Value >> 24) | 0x600;
2218  Inst.addOperand(MCOperand::CreateImm(Value));
2219  }
2220 
2221  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2222  assert(N == 1 && "Invalid number of operands!");
2223  // The immediate encodes the type of constant as well as the value.
2224  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2225  uint64_t Value = CE->getValue();
2226  unsigned Imm = 0;
2227  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2228  Imm |= (Value & 1) << i;
2229  }
2230  Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2231  }
2232 
2233  virtual void print(raw_ostream &OS) const;
2234 
2235  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2236  ARMOperand *Op = new ARMOperand(k_ITCondMask);
2237  Op->ITMask.Mask = Mask;
2238  Op->StartLoc = S;
2239  Op->EndLoc = S;
2240  return Op;
2241  }
2242 
2243  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2244  ARMOperand *Op = new ARMOperand(k_CondCode);
2245  Op->CC.Val = CC;
2246  Op->StartLoc = S;
2247  Op->EndLoc = S;
2248  return Op;
2249  }
2250 
2251  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2252  ARMOperand *Op = new ARMOperand(k_CoprocNum);
2253  Op->Cop.Val = CopVal;
2254  Op->StartLoc = S;
2255  Op->EndLoc = S;
2256  return Op;
2257  }
2258 
2259  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2260  ARMOperand *Op = new ARMOperand(k_CoprocReg);
2261  Op->Cop.Val = CopVal;
2262  Op->StartLoc = S;
2263  Op->EndLoc = S;
2264  return Op;
2265  }
2266 
2267  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2268  ARMOperand *Op = new ARMOperand(k_CoprocOption);
2269  Op->Cop.Val = Val;
2270  Op->StartLoc = S;
2271  Op->EndLoc = E;
2272  return Op;
2273  }
2274 
2275  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2276  ARMOperand *Op = new ARMOperand(k_CCOut);
2277  Op->Reg.RegNum = RegNum;
2278  Op->StartLoc = S;
2279  Op->EndLoc = S;
2280  return Op;
2281  }
2282 
2283  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2284  ARMOperand *Op = new ARMOperand(k_Token);
2285  Op->Tok.Data = Str.data();
2286  Op->Tok.Length = Str.size();
2287  Op->StartLoc = S;
2288  Op->EndLoc = S;
2289  return Op;
2290  }
2291 
2292  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2293  ARMOperand *Op = new ARMOperand(k_Register);
2294  Op->Reg.RegNum = RegNum;
2295  Op->StartLoc = S;
2296  Op->EndLoc = E;
2297  return Op;
2298  }
2299 
2300  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2301  unsigned SrcReg,
2302  unsigned ShiftReg,
2303  unsigned ShiftImm,
2304  SMLoc S, SMLoc E) {
2305  ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2306  Op->RegShiftedReg.ShiftTy = ShTy;
2307  Op->RegShiftedReg.SrcReg = SrcReg;
2308  Op->RegShiftedReg.ShiftReg = ShiftReg;
2309  Op->RegShiftedReg.ShiftImm = ShiftImm;
2310  Op->StartLoc = S;
2311  Op->EndLoc = E;
2312  return Op;
2313  }
2314 
2315  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2316  unsigned SrcReg,
2317  unsigned ShiftImm,
2318  SMLoc S, SMLoc E) {
2319  ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2320  Op->RegShiftedImm.ShiftTy = ShTy;
2321  Op->RegShiftedImm.SrcReg = SrcReg;
2322  Op->RegShiftedImm.ShiftImm = ShiftImm;
2323  Op->StartLoc = S;
2324  Op->EndLoc = E;
2325  return Op;
2326  }
2327 
2328  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2329  SMLoc S, SMLoc E) {
2330  ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2331  Op->ShifterImm.isASR = isASR;
2332  Op->ShifterImm.Imm = Imm;
2333  Op->StartLoc = S;
2334  Op->EndLoc = E;
2335  return Op;
2336  }
2337 
2338  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2339  ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2340  Op->RotImm.Imm = Imm;
2341  Op->StartLoc = S;
2342  Op->EndLoc = E;
2343  return Op;
2344  }
2345 
2346  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2347  SMLoc S, SMLoc E) {
2348  ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2349  Op->Bitfield.LSB = LSB;
2350  Op->Bitfield.Width = Width;
2351  Op->StartLoc = S;
2352  Op->EndLoc = E;
2353  return Op;
2354  }
2355 
2356  static ARMOperand *
2357  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs,
2358  SMLoc StartLoc, SMLoc EndLoc) {
2359  assert (Regs.size() > 0 && "RegList contains no registers?");
2360  KindTy Kind = k_RegisterList;
2361 
2362  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2363  Kind = k_DPRRegisterList;
2364  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2365  contains(Regs.front().second))
2366  Kind = k_SPRRegisterList;
2367 
2368  // Sort based on the register encoding values.
2369  array_pod_sort(Regs.begin(), Regs.end());
2370 
2371  ARMOperand *Op = new ARMOperand(Kind);
2372  for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2373  I = Regs.begin(), E = Regs.end(); I != E; ++I)
2374  Op->Registers.push_back(I->second);
2375  Op->StartLoc = StartLoc;
2376  Op->EndLoc = EndLoc;
2377  return Op;
2378  }
2379 
2380  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2381  bool isDoubleSpaced, SMLoc S, SMLoc E) {
2382  ARMOperand *Op = new ARMOperand(k_VectorList);
2383  Op->VectorList.RegNum = RegNum;
2384  Op->VectorList.Count = Count;
2385  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2386  Op->StartLoc = S;
2387  Op->EndLoc = E;
2388  return Op;
2389  }
2390 
2391  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2392  bool isDoubleSpaced,
2393  SMLoc S, SMLoc E) {
2394  ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2395  Op->VectorList.RegNum = RegNum;
2396  Op->VectorList.Count = Count;
2397  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2398  Op->StartLoc = S;
2399  Op->EndLoc = E;
2400  return Op;
2401  }
2402 
2403  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2404  unsigned Index,
2405  bool isDoubleSpaced,
2406  SMLoc S, SMLoc E) {
2407  ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2408  Op->VectorList.RegNum = RegNum;
2409  Op->VectorList.Count = Count;
2410  Op->VectorList.LaneIndex = Index;
2411  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2412  Op->StartLoc = S;
2413  Op->EndLoc = E;
2414  return Op;
2415  }
2416 
2417  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2418  MCContext &Ctx) {
2419  ARMOperand *Op = new ARMOperand(k_VectorIndex);
2420  Op->VectorIndex.Val = Idx;
2421  Op->StartLoc = S;
2422  Op->EndLoc = E;
2423  return Op;
2424  }
2425 
2426  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2427  ARMOperand *Op = new ARMOperand(k_Immediate);
2428  Op->Imm.Val = Val;
2429  Op->StartLoc = S;
2430  Op->EndLoc = E;
2431  return Op;
2432  }
2433 
2434  static ARMOperand *CreateMem(unsigned BaseRegNum,
2435  const MCConstantExpr *OffsetImm,
2436  unsigned OffsetRegNum,
2437  ARM_AM::ShiftOpc ShiftType,
2438  unsigned ShiftImm,
2439  unsigned Alignment,
2440  bool isNegative,
2441  SMLoc S, SMLoc E) {
2442  ARMOperand *Op = new ARMOperand(k_Memory);
2443  Op->Memory.BaseRegNum = BaseRegNum;
2444  Op->Memory.OffsetImm = OffsetImm;
2445  Op->Memory.OffsetRegNum = OffsetRegNum;
2446  Op->Memory.ShiftType = ShiftType;
2447  Op->Memory.ShiftImm = ShiftImm;
2448  Op->Memory.Alignment = Alignment;
2449  Op->Memory.isNegative = isNegative;
2450  Op->StartLoc = S;
2451  Op->EndLoc = E;
2452  return Op;
2453  }
2454 
2455  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2456  ARM_AM::ShiftOpc ShiftTy,
2457  unsigned ShiftImm,
2458  SMLoc S, SMLoc E) {
2459  ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2460  Op->PostIdxReg.RegNum = RegNum;
2461  Op->PostIdxReg.isAdd = isAdd;
2462  Op->PostIdxReg.ShiftTy = ShiftTy;
2463  Op->PostIdxReg.ShiftImm = ShiftImm;
2464  Op->StartLoc = S;
2465  Op->EndLoc = E;
2466  return Op;
2467  }
2468 
2469  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2470  ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2471  Op->MBOpt.Val = Opt;
2472  Op->StartLoc = S;
2473  Op->EndLoc = S;
2474  return Op;
2475  }
2476 
2477  static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,
2478  SMLoc S) {
2479  ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt);
2480  Op->ISBOpt.Val = Opt;
2481  Op->StartLoc = S;
2482  Op->EndLoc = S;
2483  return Op;
2484  }
2485 
2486  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2487  ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2488  Op->IFlags.Val = IFlags;
2489  Op->StartLoc = S;
2490  Op->EndLoc = S;
2491  return Op;
2492  }
2493 
2494  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2495  ARMOperand *Op = new ARMOperand(k_MSRMask);
2496  Op->MMask.Val = MMask;
2497  Op->StartLoc = S;
2498  Op->EndLoc = S;
2499  return Op;
2500  }
2501 };
2502 
2503 } // end anonymous namespace.
2504 
2505 void ARMOperand::print(raw_ostream &OS) const {
2506  switch (Kind) {
2507  case k_CondCode:
2508  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2509  break;
2510  case k_CCOut:
2511  OS << "<ccout " << getReg() << ">";
2512  break;
2513  case k_ITCondMask: {
2514  static const char *const MaskStr[] = {
2515  "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2516  "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2517  };
2518  assert((ITMask.Mask & 0xf) == ITMask.Mask);
2519  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2520  break;
2521  }
2522  case k_CoprocNum:
2523  OS << "<coprocessor number: " << getCoproc() << ">";
2524  break;
2525  case k_CoprocReg:
2526  OS << "<coprocessor register: " << getCoproc() << ">";
2527  break;
2528  case k_CoprocOption:
2529  OS << "<coprocessor option: " << CoprocOption.Val << ">";
2530  break;
2531  case k_MSRMask:
2532  OS << "<mask: " << getMSRMask() << ">";
2533  break;
2534  case k_Immediate:
2535  getImm()->print(OS);
2536  break;
2537  case k_MemBarrierOpt:
2538  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2539  break;
2540  case k_InstSyncBarrierOpt:
2541  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2542  break;
2543  case k_Memory:
2544  OS << "<memory "
2545  << " base:" << Memory.BaseRegNum;
2546  OS << ">";
2547  break;
2548  case k_PostIndexRegister:
2549  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2550  << PostIdxReg.RegNum;
2551  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2552  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2553  << PostIdxReg.ShiftImm;
2554  OS << ">";
2555  break;
2556  case k_ProcIFlags: {
2557  OS << "<ARM_PROC::";
2558  unsigned IFlags = getProcIFlags();
2559  for (int i=2; i >= 0; --i)
2560  if (IFlags & (1 << i))
2561  OS << ARM_PROC::IFlagsToString(1 << i);
2562  OS << ">";
2563  break;
2564  }
2565  case k_Register:
2566  OS << "<register " << getReg() << ">";
2567  break;
2568  case k_ShifterImmediate:
2569  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2570  << " #" << ShifterImm.Imm << ">";
2571  break;
2572  case k_ShiftedRegister:
2573  OS << "<so_reg_reg "
2574  << RegShiftedReg.SrcReg << " "
2575  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2576  << " " << RegShiftedReg.ShiftReg << ">";
2577  break;
2578  case k_ShiftedImmediate:
2579  OS << "<so_reg_imm "
2580  << RegShiftedImm.SrcReg << " "
2581  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2582  << " #" << RegShiftedImm.ShiftImm << ">";
2583  break;
2584  case k_RotateImmediate:
2585  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2586  break;
2587  case k_BitfieldDescriptor:
2588  OS << "<bitfield " << "lsb: " << Bitfield.LSB
2589  << ", width: " << Bitfield.Width << ">";
2590  break;
2591  case k_RegisterList:
2592  case k_DPRRegisterList:
2593  case k_SPRRegisterList: {
2594  OS << "<register_list ";
2595 
2596  const SmallVectorImpl<unsigned> &RegList = getRegList();
2598  I = RegList.begin(), E = RegList.end(); I != E; ) {
2599  OS << *I;
2600  if (++I < E) OS << ", ";
2601  }
2602 
2603  OS << ">";
2604  break;
2605  }
2606  case k_VectorList:
2607  OS << "<vector_list " << VectorList.Count << " * "
2608  << VectorList.RegNum << ">";
2609  break;
2610  case k_VectorListAllLanes:
2611  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2612  << VectorList.RegNum << ">";
2613  break;
2614  case k_VectorListIndexed:
2615  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2616  << VectorList.Count << " * " << VectorList.RegNum << ">";
2617  break;
2618  case k_Token:
2619  OS << "'" << getToken() << "'";
2620  break;
2621  case k_VectorIndex:
2622  OS << "<vectorindex " << getVectorIndex() << ">";
2623  break;
2624  }
2625 }
2626 
2627 /// @name Auto-generated Match Functions
2628 /// {
2629 
2630 static unsigned MatchRegisterName(StringRef Name);
2631 
2632 /// }
2633 
2634 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2635  SMLoc &StartLoc, SMLoc &EndLoc) {
2636  StartLoc = Parser.getTok().getLoc();
2637  EndLoc = Parser.getTok().getEndLoc();
2638  RegNo = tryParseRegister();
2639 
2640  return (RegNo == (unsigned)-1);
2641 }
2642 
2643 /// Try to parse a register name. The token must be an Identifier when called,
2644 /// and if it is a register name the token is eaten and the register number is
2645 /// returned. Otherwise return -1.
2646 ///
2647 int ARMAsmParser::tryParseRegister() {
2648  const AsmToken &Tok = Parser.getTok();
2649  if (Tok.isNot(AsmToken::Identifier)) return -1;
2650 
2651  std::string lowerCase = Tok.getString().lower();
2652  unsigned RegNum = MatchRegisterName(lowerCase);
2653  if (!RegNum) {
2654  RegNum = StringSwitch<unsigned>(lowerCase)
2655  .Case("r13", ARM::SP)
2656  .Case("r14", ARM::LR)
2657  .Case("r15", ARM::PC)
2658  .Case("ip", ARM::R12)
2659  // Additional register name aliases for 'gas' compatibility.
2660  .Case("a1", ARM::R0)
2661  .Case("a2", ARM::R1)
2662  .Case("a3", ARM::R2)
2663  .Case("a4", ARM::R3)
2664  .Case("v1", ARM::R4)
2665  .Case("v2", ARM::R5)
2666  .Case("v3", ARM::R6)
2667  .Case("v4", ARM::R7)
2668  .Case("v5", ARM::R8)
2669  .Case("v6", ARM::R9)
2670  .Case("v7", ARM::R10)
2671  .Case("v8", ARM::R11)
2672  .Case("sb", ARM::R9)
2673  .Case("sl", ARM::R10)
2674  .Case("fp", ARM::R11)
2675  .Default(0);
2676  }
2677  if (!RegNum) {
2678  // Check for aliases registered via .req. Canonicalize to lower case.
2679  // That's more consistent since register names are case insensitive, and
2680  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2681  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2682  // If no match, return failure.
2683  if (Entry == RegisterReqs.end())
2684  return -1;
2685  Parser.Lex(); // Eat identifier token.
2686  return Entry->getValue();
2687  }
2688 
2689  Parser.Lex(); // Eat identifier token.
2690 
2691  return RegNum;
2692 }
2693 
2694 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
2695 // If a recoverable error occurs, return 1. If an irrecoverable error
2696 // occurs, return -1. An irrecoverable error is one where tokens have been
2697 // consumed in the process of trying to parse the shifter (i.e., when it is
2698 // indeed a shifter operand, but malformed).
2699 int ARMAsmParser::tryParseShiftRegister(
2701  SMLoc S = Parser.getTok().getLoc();
2702  const AsmToken &Tok = Parser.getTok();
2703  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2704 
2705  std::string lowerCase = Tok.getString().lower();
2707  .Case("asl", ARM_AM::lsl)
2708  .Case("lsl", ARM_AM::lsl)
2709  .Case("lsr", ARM_AM::lsr)
2710  .Case("asr", ARM_AM::asr)
2711  .Case("ror", ARM_AM::ror)
2712  .Case("rrx", ARM_AM::rrx)
2714 
2715  if (ShiftTy == ARM_AM::no_shift)
2716  return 1;
2717 
2718  Parser.Lex(); // Eat the operator.
2719 
2720  // The source register for the shift has already been added to the
2721  // operand list, so we need to pop it off and combine it into the shifted
2722  // register operand instead.
2723  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2724  if (!PrevOp->isReg())
2725  return Error(PrevOp->getStartLoc(), "shift must be of a register");
2726  int SrcReg = PrevOp->getReg();
2727 
2728  SMLoc EndLoc;
2729  int64_t Imm = 0;
2730  int ShiftReg = 0;
2731  if (ShiftTy == ARM_AM::rrx) {
2732  // RRX Doesn't have an explicit shift amount. The encoder expects
2733  // the shift register to be the same as the source register. Seems odd,
2734  // but OK.
2735  ShiftReg = SrcReg;
2736  } else {
2737  // Figure out if this is shifted by a constant or a register (for non-RRX).
2738  if (Parser.getTok().is(AsmToken::Hash) ||
2739  Parser.getTok().is(AsmToken::Dollar)) {
2740  Parser.Lex(); // Eat hash.
2741  SMLoc ImmLoc = Parser.getTok().getLoc();
2742  const MCExpr *ShiftExpr = 0;
2743  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2744  Error(ImmLoc, "invalid immediate shift value");
2745  return -1;
2746  }
2747  // The expression must be evaluatable as an immediate.
2748  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2749  if (!CE) {
2750  Error(ImmLoc, "invalid immediate shift value");
2751  return -1;
2752  }
2753  // Range check the immediate.
2754  // lsl, ror: 0 <= imm <= 31
2755  // lsr, asr: 0 <= imm <= 32
2756  Imm = CE->getValue();
2757  if (Imm < 0 ||
2758  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2759  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2760  Error(ImmLoc, "immediate shift value out of range");
2761  return -1;
2762  }
2763  // shift by zero is a nop. Always send it through as lsl.
2764  // ('as' compatibility)
2765  if (Imm == 0)
2766  ShiftTy = ARM_AM::lsl;
2767  } else if (Parser.getTok().is(AsmToken::Identifier)) {
2768  SMLoc L = Parser.getTok().getLoc();
2769  EndLoc = Parser.getTok().getEndLoc();
2770  ShiftReg = tryParseRegister();
2771  if (ShiftReg == -1) {
2772  Error (L, "expected immediate or register in shift operand");
2773  return -1;
2774  }
2775  } else {
2776  Error (Parser.getTok().getLoc(),
2777  "expected immediate or register in shift operand");
2778  return -1;
2779  }
2780  }
2781 
2782  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2783  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2784  ShiftReg, Imm,
2785  S, EndLoc));
2786  else
2787  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2788  S, EndLoc));
2789 
2790  return 0;
2791 }
2792 
2793 
2794 /// Try to parse a register name. The token must be an Identifier when called.
2795 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
2796 /// if there is a "writeback". 'true' if it's not a register.
2797 ///
2798 /// TODO this is likely to change to allow different register types and or to
2799 /// parse for a specific register type.
2800 bool ARMAsmParser::
2801 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2802  const AsmToken &RegTok = Parser.getTok();
2803  int RegNo = tryParseRegister();
2804  if (RegNo == -1)
2805  return true;
2806 
2807  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2808  RegTok.getEndLoc()));
2809 
2810  const AsmToken &ExclaimTok = Parser.getTok();
2811  if (ExclaimTok.is(AsmToken::Exclaim)) {
2812  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2813  ExclaimTok.getLoc()));
2814  Parser.Lex(); // Eat exclaim token
2815  return false;
2816  }
2817 
2818  // Also check for an index operand. This is only legal for vector registers,
2819  // but that'll get caught OK in operand matching, so we don't need to
2820  // explicitly filter everything else out here.
2821  if (Parser.getTok().is(AsmToken::LBrac)) {
2822  SMLoc SIdx = Parser.getTok().getLoc();
2823  Parser.Lex(); // Eat left bracket token.
2824 
2825  const MCExpr *ImmVal;
2826  if (getParser().parseExpression(ImmVal))
2827  return true;
2828  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2829  if (!MCE)
2830  return TokError("immediate value expected for vector index");
2831 
2832  if (Parser.getTok().isNot(AsmToken::RBrac))
2833  return Error(Parser.getTok().getLoc(), "']' expected");
2834 
2835  SMLoc E = Parser.getTok().getEndLoc();
2836  Parser.Lex(); // Eat right bracket token.
2837 
2838  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2839  SIdx, E,
2840  getContext()));
2841  }
2842 
2843  return false;
2844 }
2845 
2846 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
2847 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2848 /// "c5", ...
2849 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2850  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2851  // but efficient.
2852  switch (Name.size()) {
2853  default: return -1;
2854  case 2:
2855  if (Name[0] != CoprocOp)
2856  return -1;
2857  switch (Name[1]) {
2858  default: return -1;
2859  case '0': return 0;
2860  case '1': return 1;
2861  case '2': return 2;
2862  case '3': return 3;
2863  case '4': return 4;
2864  case '5': return 5;
2865  case '6': return 6;
2866  case '7': return 7;
2867  case '8': return 8;
2868  case '9': return 9;
2869  }
2870  case 3:
2871  if (Name[0] != CoprocOp || Name[1] != '1')
2872  return -1;
2873  switch (Name[2]) {
2874  default: return -1;
2875  // p10 and p11 are invalid for coproc instructions (reserved for FP/NEON)
2876  case '0': return CoprocOp == 'p'? -1: 10;
2877  case '1': return CoprocOp == 'p'? -1: 11;
2878  case '2': return 12;
2879  case '3': return 13;
2880  case '4': return 14;
2881  case '5': return 15;
2882  }
2883  }
2884 }
2885 
2886 /// parseITCondCode - Try to parse a condition code for an IT instruction.
2887 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2888 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2889  SMLoc S = Parser.getTok().getLoc();
2890  const AsmToken &Tok = Parser.getTok();
2891  if (!Tok.is(AsmToken::Identifier))
2892  return MatchOperand_NoMatch;
2893  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2894  .Case("eq", ARMCC::EQ)
2895  .Case("ne", ARMCC::NE)
2896  .Case("hs", ARMCC::HS)
2897  .Case("cs", ARMCC::HS)
2898  .Case("lo", ARMCC::LO)
2899  .Case("cc", ARMCC::LO)
2900  .Case("mi", ARMCC::MI)
2901  .Case("pl", ARMCC::PL)
2902  .Case("vs", ARMCC::VS)
2903  .Case("vc", ARMCC::VC)
2904  .Case("hi", ARMCC::HI)
2905  .Case("ls", ARMCC::LS)
2906  .Case("ge", ARMCC::GE)
2907  .Case("lt", ARMCC::LT)
2908  .Case("gt", ARMCC::GT)
2909  .Case("le", ARMCC::LE)
2910  .Case("al", ARMCC::AL)
2911  .Default(~0U);
2912  if (CC == ~0U)
2913  return MatchOperand_NoMatch;
2914  Parser.Lex(); // Eat the token.
2915 
2916  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2917 
2918  return MatchOperand_Success;
2919 }
2920 
2921 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2922 /// token must be an Identifier when called, and if it is a coprocessor
2923 /// number, the token is eaten and the operand is added to the operand list.
2924 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2925 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2926  SMLoc S = Parser.getTok().getLoc();
2927  const AsmToken &Tok = Parser.getTok();
2928  if (Tok.isNot(AsmToken::Identifier))
2929  return MatchOperand_NoMatch;
2930 
2931  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2932  if (Num == -1)
2933  return MatchOperand_NoMatch;
2934 
2935  Parser.Lex(); // Eat identifier token.
2936  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2937  return MatchOperand_Success;
2938 }
2939 
2940 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2941 /// token must be an Identifier when called, and if it is a coprocessor
2942 /// number, the token is eaten and the operand is added to the operand list.
2943 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2944 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2945  SMLoc S = Parser.getTok().getLoc();
2946  const AsmToken &Tok = Parser.getTok();
2947  if (Tok.isNot(AsmToken::Identifier))
2948  return MatchOperand_NoMatch;
2949 
2950  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2951  if (Reg == -1)
2952  return MatchOperand_NoMatch;
2953 
2954  Parser.Lex(); // Eat identifier token.
2955  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2956  return MatchOperand_Success;
2957 }
2958 
2959 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2960 /// coproc_option : '{' imm0_255 '}'
2961 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2962 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2963  SMLoc S = Parser.getTok().getLoc();
2964 
2965  // If this isn't a '{', this isn't a coprocessor immediate operand.
2966  if (Parser.getTok().isNot(AsmToken::LCurly))
2967  return MatchOperand_NoMatch;
2968  Parser.Lex(); // Eat the '{'
2969 
2970  const MCExpr *Expr;
2971  SMLoc Loc = Parser.getTok().getLoc();
2972  if (getParser().parseExpression(Expr)) {
2973  Error(Loc, "illegal expression");
2974  return MatchOperand_ParseFail;
2975  }
2976  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2977  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2978  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2979  return MatchOperand_ParseFail;
2980  }
2981  int Val = CE->getValue();
2982 
2983  // Check for and consume the closing '}'
2984  if (Parser.getTok().isNot(AsmToken::RCurly))
2985  return MatchOperand_ParseFail;
2986  SMLoc E = Parser.getTok().getEndLoc();
2987  Parser.Lex(); // Eat the '}'
2988 
2989  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2990  return MatchOperand_Success;
2991 }
2992 
2993 // For register list parsing, we need to map from raw GPR register numbering
2994 // to the enumeration values. The enumeration values aren't sorted by
2995 // register number due to our using "sp", "lr" and "pc" as canonical names.
2996 static unsigned getNextRegister(unsigned Reg) {
2997  // If this is a GPR, we need to do it manually, otherwise we can rely
2998  // on the sort ordering of the enumeration since the other reg-classes
2999  // are sane.
3000  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3001  return Reg + 1;
3002  switch(Reg) {
3003  default: llvm_unreachable("Invalid GPR number!");
3004  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3005  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3006  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3007  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3008  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3009  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3010  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3011  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3012  }
3013 }
3014 
3015 // Return the low-subreg of a given Q register.
3016 static unsigned getDRegFromQReg(unsigned QReg) {
3017  switch (QReg) {
3018  default: llvm_unreachable("expected a Q register!");
3019  case ARM::Q0: return ARM::D0;
3020  case ARM::Q1: return ARM::D2;
3021  case ARM::Q2: return ARM::D4;
3022  case ARM::Q3: return ARM::D6;
3023  case ARM::Q4: return ARM::D8;
3024  case ARM::Q5: return ARM::D10;
3025  case ARM::Q6: return ARM::D12;
3026  case ARM::Q7: return ARM::D14;
3027  case ARM::Q8: return ARM::D16;
3028  case ARM::Q9: return ARM::D18;
3029  case ARM::Q10: return ARM::D20;
3030  case ARM::Q11: return ARM::D22;
3031  case ARM::Q12: return ARM::D24;
3032  case ARM::Q13: return ARM::D26;
3033  case ARM::Q14: return ARM::D28;
3034  case ARM::Q15: return ARM::D30;
3035  }
3036 }
3037 
3038 /// Parse a register list.
3039 bool ARMAsmParser::
3040 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3041  assert(Parser.getTok().is(AsmToken::LCurly) &&
3042  "Token is not a Left Curly Brace");
3043  SMLoc S = Parser.getTok().getLoc();
3044  Parser.Lex(); // Eat '{' token.
3045  SMLoc RegLoc = Parser.getTok().getLoc();
3046 
3047  // Check the first register in the list to see what register class
3048  // this is a list of.
3049  int Reg = tryParseRegister();
3050  if (Reg == -1)
3051  return Error(RegLoc, "register expected");
3052 
3053  // The reglist instructions have at most 16 registers, so reserve
3054  // space for that many.
3055  int EReg = 0;
3057 
3058  // Allow Q regs and just interpret them as the two D sub-registers.
3059  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3060  Reg = getDRegFromQReg(Reg);
3061  EReg = MRI->getEncodingValue(Reg);
3062  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3063  ++Reg;
3064  }
3065  const MCRegisterClass *RC;
3066  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3067  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3068  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3069  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3070  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3071  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3072  else
3073  return Error(RegLoc, "invalid register in register list");
3074 
3075  // Store the register.
3076  EReg = MRI->getEncodingValue(Reg);
3077  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3078 
3079  // This starts immediately after the first register token in the list,
3080  // so we can see either a comma or a minus (range separator) as a legal
3081  // next token.
3082  while (Parser.getTok().is(AsmToken::Comma) ||
3083  Parser.getTok().is(AsmToken::Minus)) {
3084  if (Parser.getTok().is(AsmToken::Minus)) {
3085  Parser.Lex(); // Eat the minus.
3086  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3087  int EndReg = tryParseRegister();
3088  if (EndReg == -1)
3089  return Error(AfterMinusLoc, "register expected");
3090  // Allow Q regs and just interpret them as the two D sub-registers.
3091  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3092  EndReg = getDRegFromQReg(EndReg) + 1;
3093  // If the register is the same as the start reg, there's nothing
3094  // more to do.
3095  if (Reg == EndReg)
3096  continue;
3097  // The register must be in the same register class as the first.
3098  if (!RC->contains(EndReg))
3099  return Error(AfterMinusLoc, "invalid register in register list");
3100  // Ranges must go from low to high.
3101  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3102  return Error(AfterMinusLoc, "bad range in register list");
3103 
3104  // Add all the registers in the range to the register list.
3105  while (Reg != EndReg) {
3106  Reg = getNextRegister(Reg);
3107  EReg = MRI->getEncodingValue(Reg);
3108  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3109  }
3110  continue;
3111  }
3112  Parser.Lex(); // Eat the comma.
3113  RegLoc = Parser.getTok().getLoc();
3114  int OldReg = Reg;
3115  const AsmToken RegTok = Parser.getTok();
3116  Reg = tryParseRegister();
3117  if (Reg == -1)
3118  return Error(RegLoc, "register expected");
3119  // Allow Q regs and just interpret them as the two D sub-registers.
3120  bool isQReg = false;
3121  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3122  Reg = getDRegFromQReg(Reg);
3123  isQReg = true;
3124  }
3125  // The register must be in the same register class as the first.
3126  if (!RC->contains(Reg))
3127  return Error(RegLoc, "invalid register in register list");
3128  // List must be monotonically increasing.
3129  if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3130  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3131  Warning(RegLoc, "register list not in ascending order");
3132  else
3133  return Error(RegLoc, "register list not in ascending order");
3134  }
3135  if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3136  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3137  ") in register list");
3138  continue;
3139  }
3140  // VFP register lists must also be contiguous.
3141  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3142  Reg != OldReg + 1)
3143  return Error(RegLoc, "non-contiguous register range");
3144  EReg = MRI->getEncodingValue(Reg);
3145  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3146  if (isQReg) {
3147  EReg = MRI->getEncodingValue(++Reg);
3148  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3149  }
3150  }
3151 
3152  if (Parser.getTok().isNot(AsmToken::RCurly))
3153  return Error(Parser.getTok().getLoc(), "'}' expected");
3154  SMLoc E = Parser.getTok().getEndLoc();
3155  Parser.Lex(); // Eat '}' token.
3156 
3157  // Push the register list operand.
3158  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3159 
3160  // The ARM system instruction variants for LDM/STM have a '^' token here.
3161  if (Parser.getTok().is(AsmToken::Caret)) {
3162  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3163  Parser.Lex(); // Eat '^' token.
3164  }
3165 
3166  return false;
3167 }
3168 
3169 // Helper function to parse the lane index for vector lists.
3170 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3171 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3172  Index = 0; // Always return a defined index value.
3173  if (Parser.getTok().is(AsmToken::LBrac)) {
3174  Parser.Lex(); // Eat the '['.
3175  if (Parser.getTok().is(AsmToken::RBrac)) {
3176  // "Dn[]" is the 'all lanes' syntax.
3177  LaneKind = AllLanes;
3178  EndLoc = Parser.getTok().getEndLoc();
3179  Parser.Lex(); // Eat the ']'.
3180  return MatchOperand_Success;
3181  }
3182 
3183  // There's an optional '#' token here. Normally there wouldn't be, but
3184  // inline assemble puts one in, and it's friendly to accept that.
3185  if (Parser.getTok().is(AsmToken::Hash))
3186  Parser.Lex(); // Eat '#' or '$'.
3187 
3188  const MCExpr *LaneIndex;
3189  SMLoc Loc = Parser.getTok().getLoc();
3190  if (getParser().parseExpression(LaneIndex)) {
3191  Error(Loc, "illegal expression");
3192  return MatchOperand_ParseFail;
3193  }
3194  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3195  if (!CE) {
3196  Error(Loc, "lane index must be empty or an integer");
3197  return MatchOperand_ParseFail;
3198  }
3199  if (Parser.getTok().isNot(AsmToken::RBrac)) {
3200  Error(Parser.getTok().getLoc(), "']' expected");
3201  return MatchOperand_ParseFail;
3202  }
3203  EndLoc = Parser.getTok().getEndLoc();
3204  Parser.Lex(); // Eat the ']'.
3205  int64_t Val = CE->getValue();
3206 
3207  // FIXME: Make this range check context sensitive for .8, .16, .32.
3208  if (Val < 0 || Val > 7) {
3209  Error(Parser.getTok().getLoc(), "lane index out of range");
3210  return MatchOperand_ParseFail;
3211  }
3212  Index = Val;
3213  LaneKind = IndexedLane;
3214  return MatchOperand_Success;
3215  }
3216  LaneKind = NoLanes;
3217  return MatchOperand_Success;
3218 }
3219 
3220 // parse a vector register list
3221 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3222 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3223  VectorLaneTy LaneKind;
3224  unsigned LaneIndex;
3225  SMLoc S = Parser.getTok().getLoc();
3226  // As an extension (to match gas), support a plain D register or Q register
3227  // (without encosing curly braces) as a single or double entry list,
3228  // respectively.
3229  if (Parser.getTok().is(AsmToken::Identifier)) {
3230  SMLoc E = Parser.getTok().getEndLoc();
3231  int Reg = tryParseRegister();
3232  if (Reg == -1)
3233  return MatchOperand_NoMatch;
3234  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3235  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3236  if (Res != MatchOperand_Success)
3237  return Res;
3238  switch (LaneKind) {
3239  case NoLanes:
3240  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3241  break;
3242  case AllLanes:
3243  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3244  S, E));
3245  break;
3246  case IndexedLane:
3247  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3248  LaneIndex,
3249  false, S, E));
3250  break;
3251  }
3252  return MatchOperand_Success;
3253  }
3254  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3255  Reg = getDRegFromQReg(Reg);
3256  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3257  if (Res != MatchOperand_Success)
3258  return Res;
3259  switch (LaneKind) {
3260  case NoLanes:
3261  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3262  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3263  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3264  break;
3265  case AllLanes:
3266  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3267  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3268  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3269  S, E));
3270  break;
3271  case IndexedLane:
3272  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3273  LaneIndex,
3274  false, S, E));
3275  break;
3276  }
3277  return MatchOperand_Success;
3278  }
3279  Error(S, "vector register expected");
3280  return MatchOperand_ParseFail;
3281  }
3282 
3283  if (Parser.getTok().isNot(AsmToken::LCurly))
3284  return MatchOperand_NoMatch;
3285 
3286  Parser.Lex(); // Eat '{' token.
3287  SMLoc RegLoc = Parser.getTok().getLoc();
3288 
3289  int Reg = tryParseRegister();
3290  if (Reg == -1) {
3291  Error(RegLoc, "register expected");
3292  return MatchOperand_ParseFail;
3293  }
3294  unsigned Count = 1;
3295  int Spacing = 0;
3296  unsigned FirstReg = Reg;
3297  // The list is of D registers, but we also allow Q regs and just interpret
3298  // them as the two D sub-registers.
3299  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3300  FirstReg = Reg = getDRegFromQReg(Reg);
3301  Spacing = 1; // double-spacing requires explicit D registers, otherwise
3302  // it's ambiguous with four-register single spaced.
3303  ++Reg;
3304  ++Count;
3305  }
3306 
3307  SMLoc E;
3308  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3309  return MatchOperand_ParseFail;
3310 
3311  while (Parser.getTok().is(AsmToken::Comma) ||
3312  Parser.getTok().is(AsmToken::Minus)) {
3313  if (Parser.getTok().is(AsmToken::Minus)) {
3314  if (!Spacing)
3315  Spacing = 1; // Register range implies a single spaced list.
3316  else if (Spacing == 2) {
3317  Error(Parser.getTok().getLoc(),
3318  "sequential registers in double spaced list");
3319  return MatchOperand_ParseFail;
3320  }
3321  Parser.Lex(); // Eat the minus.
3322  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3323  int EndReg = tryParseRegister();
3324  if (EndReg == -1) {
3325  Error(AfterMinusLoc, "register expected");
3326  return MatchOperand_ParseFail;
3327  }
3328  // Allow Q regs and just interpret them as the two D sub-registers.
3329  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3330  EndReg = getDRegFromQReg(EndReg) + 1;
3331  // If the register is the same as the start reg, there's nothing
3332  // more to do.
3333  if (Reg == EndReg)
3334  continue;
3335  // The register must be in the same register class as the first.
3336  if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3337  Error(AfterMinusLoc, "invalid register in register list");
3338  return MatchOperand_ParseFail;
3339  }
3340  // Ranges must go from low to high.
3341  if (Reg > EndReg) {
3342  Error(AfterMinusLoc, "bad range in register list");
3343  return MatchOperand_ParseFail;
3344  }
3345  // Parse the lane specifier if present.
3346  VectorLaneTy NextLaneKind;
3347  unsigned NextLaneIndex;
3348  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3349  MatchOperand_Success)
3350  return MatchOperand_ParseFail;
3351  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3352  Error(AfterMinusLoc, "mismatched lane index in register list");
3353  return MatchOperand_ParseFail;
3354  }
3355 
3356  // Add all the registers in the range to the register list.
3357  Count += EndReg - Reg;
3358  Reg = EndReg;
3359  continue;
3360  }
3361  Parser.Lex(); // Eat the comma.
3362  RegLoc = Parser.getTok().getLoc();
3363  int OldReg = Reg;
3364  Reg = tryParseRegister();
3365  if (Reg == -1) {
3366  Error(RegLoc, "register expected");
3367  return MatchOperand_ParseFail;
3368  }
3369  // vector register lists must be contiguous.
3370  // It's OK to use the enumeration values directly here rather, as the
3371  // VFP register classes have the enum sorted properly.
3372  //
3373  // The list is of D registers, but we also allow Q regs and just interpret
3374  // them as the two D sub-registers.
3375  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3376  if (!Spacing)
3377  Spacing = 1; // Register range implies a single spaced list.
3378  else if (Spacing == 2) {
3379  Error(RegLoc,
3380  "invalid register in double-spaced list (must be 'D' register')");
3381  return MatchOperand_ParseFail;
3382  }
3383  Reg = getDRegFromQReg(Reg);
3384  if (Reg != OldReg + 1) {
3385  Error(RegLoc, "non-contiguous register range");
3386  return MatchOperand_ParseFail;
3387  }
3388  ++Reg;
3389  Count += 2;
3390  // Parse the lane specifier if present.
3391  VectorLaneTy NextLaneKind;
3392  unsigned NextLaneIndex;
3393  SMLoc LaneLoc = Parser.getTok().getLoc();
3394  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3395  MatchOperand_Success)
3396  return MatchOperand_ParseFail;
3397  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3398  Error(LaneLoc, "mismatched lane index in register list");
3399  return MatchOperand_ParseFail;
3400  }
3401  continue;
3402  }
3403  // Normal D register.
3404  // Figure out the register spacing (single or double) of the list if
3405  // we don't know it already.
3406  if (!Spacing)
3407  Spacing = 1 + (Reg == OldReg + 2);
3408 
3409  // Just check that it's contiguous and keep going.
3410  if (Reg != OldReg + Spacing) {
3411  Error(RegLoc, "non-contiguous register range");
3412  return MatchOperand_ParseFail;
3413  }
3414  ++Count;
3415  // Parse the lane specifier if present.
3416  VectorLaneTy NextLaneKind;
3417  unsigned NextLaneIndex;
3418  SMLoc EndLoc = Parser.getTok().getLoc();
3419  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3420  return MatchOperand_ParseFail;
3421  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3422  Error(EndLoc, "mismatched lane index in register list");
3423  return MatchOperand_ParseFail;
3424  }
3425  }
3426 
3427  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3428  Error(Parser.getTok().getLoc(), "'}' expected");
3429  return MatchOperand_ParseFail;
3430  }
3431  E = Parser.getTok().getEndLoc();
3432  Parser.Lex(); // Eat '}' token.
3433 
3434  switch (LaneKind) {
3435  case NoLanes:
3436  // Two-register operands have been converted to the
3437  // composite register classes.
3438  if (Count == 2) {
3439  const MCRegisterClass *RC = (Spacing == 1) ?
3440  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3441  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3442  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3443  }
3444 
3445  Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3446  (Spacing == 2), S, E));
3447  break;
3448  case AllLanes:
3449  // Two-register operands have been converted to the
3450  // composite register classes.
3451  if (Count == 2) {
3452  const MCRegisterClass *RC = (Spacing == 1) ?
3453  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3454  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3455  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3456  }
3457  Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3458  (Spacing == 2),
3459  S, E));
3460  break;
3461  case IndexedLane:
3462  Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3463  LaneIndex,
3464  (Spacing == 2),
3465  S, E));
3466  break;
3467  }
3468  return MatchOperand_Success;
3469 }
3470 
3471 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3472 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3473 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3474  SMLoc S = Parser.getTok().getLoc();
3475  const AsmToken &Tok = Parser.getTok();
3476  unsigned Opt;
3477 
3478  if (Tok.is(AsmToken::Identifier)) {
3479  StringRef OptStr = Tok.getString();
3480 
3481  Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3482  .Case("sy", ARM_MB::SY)
3483  .Case("st", ARM_MB::ST)
3484  .Case("ld", ARM_MB::LD)
3485  .Case("sh", ARM_MB::ISH)
3486  .Case("ish", ARM_MB::ISH)
3487  .Case("shst", ARM_MB::ISHST)
3488  .Case("ishst", ARM_MB::ISHST)
3489  .Case("ishld", ARM_MB::ISHLD)
3490  .Case("nsh", ARM_MB::NSH)
3491  .Case("un", ARM_MB::NSH)
3492  .Case("nshst", ARM_MB::NSHST)
3493  .Case("nshld", ARM_MB::NSHLD)
3494  .Case("unst", ARM_MB::NSHST)
3495  .Case("osh", ARM_MB::OSH)
3496  .Case("oshst", ARM_MB::OSHST)
3497  .Case("oshld", ARM_MB::OSHLD)
3498  .Default(~0U);
3499 
3500  // ishld, oshld, nshld and ld are only available from ARMv8.
3501  if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3502  Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3503  Opt = ~0U;
3504 
3505  if (Opt == ~0U)
3506  return MatchOperand_NoMatch;
3507 
3508  Parser.Lex(); // Eat identifier token.
3509  } else if (Tok.is(AsmToken::Hash) ||
3510  Tok.is(AsmToken::Dollar) ||
3511  Tok.is(AsmToken::Integer)) {
3512  if (Parser.getTok().isNot(AsmToken::Integer))
3513  Parser.Lex(); // Eat '#' or '$'.
3514  SMLoc Loc = Parser.getTok().getLoc();
3515 
3516  const MCExpr *MemBarrierID;
3517  if (getParser().parseExpression(MemBarrierID)) {
3518  Error(Loc, "illegal expression");
3519  return MatchOperand_ParseFail;
3520  }
3521 
3522  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3523  if (!CE) {
3524  Error(Loc, "constant expression expected");
3525  return MatchOperand_ParseFail;
3526  }
3527 
3528  int Val = CE->getValue();
3529  if (Val & ~0xf) {
3530  Error(Loc, "immediate value out of range");
3531  return MatchOperand_ParseFail;
3532  }
3533 
3534  Opt = ARM_MB::RESERVED_0 + Val;
3535  } else
3536  return MatchOperand_ParseFail;
3537 
3538  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3539  return MatchOperand_Success;
3540 }
3541 
3542 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3543 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3544 parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3545  SMLoc S = Parser.getTok().getLoc();
3546  const AsmToken &Tok = Parser.getTok();
3547  unsigned Opt;
3548 
3549  if (Tok.is(AsmToken::Identifier)) {
3550  StringRef OptStr = Tok.getString();
3551 
3552  if (OptStr.equals_lower("sy"))
3553  Opt = ARM_ISB::SY;
3554  else
3555  return MatchOperand_NoMatch;
3556 
3557  Parser.Lex(); // Eat identifier token.
3558  } else if (Tok.is(AsmToken::Hash) ||
3559  Tok.is(AsmToken::Dollar) ||
3560  Tok.is(AsmToken::Integer)) {
3561  if (Parser.getTok().isNot(AsmToken::Integer))
3562  Parser.Lex(); // Eat '#' or '$'.
3563  SMLoc Loc = Parser.getTok().getLoc();
3564 
3565  const MCExpr *ISBarrierID;
3566  if (getParser().parseExpression(ISBarrierID)) {
3567  Error(Loc, "illegal expression");
3568  return MatchOperand_ParseFail;
3569  }
3570 
3571  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3572  if (!CE) {
3573  Error(Loc, "constant expression expected");
3574  return MatchOperand_ParseFail;
3575  }
3576 
3577  int Val = CE->getValue();
3578  if (Val & ~0xf) {
3579  Error(Loc, "immediate value out of range");
3580  return MatchOperand_ParseFail;
3581  }
3582 
3583  Opt = ARM_ISB::RESERVED_0 + Val;
3584  } else
3585  return MatchOperand_ParseFail;
3586 
3587  Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3588  (ARM_ISB::InstSyncBOpt)Opt, S));
3589  return MatchOperand_Success;
3590 }
3591 
3592 
3593 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3594 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3595 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3596  SMLoc S = Parser.getTok().getLoc();
3597  const AsmToken &Tok = Parser.getTok();
3598  if (!Tok.is(AsmToken::Identifier))
3599  return MatchOperand_NoMatch;
3600  StringRef IFlagsStr = Tok.getString();
3601 
3602  // An iflags string of "none" is interpreted to mean that none of the AIF
3603  // bits are set. Not a terribly useful instruction, but a valid encoding.
3604  unsigned IFlags = 0;
3605  if (IFlagsStr != "none") {
3606  for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3607  unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3608  .Case("a", ARM_PROC::A)
3609  .Case("i", ARM_PROC::I)
3610  .Case("f", ARM_PROC::F)
3611  .Default(~0U);
3612 
3613  // If some specific iflag is already set, it means that some letter is
3614  // present more than once, this is not acceptable.
3615  if (Flag == ~0U || (IFlags & Flag))
3616  return MatchOperand_NoMatch;
3617 
3618  IFlags |= Flag;
3619  }
3620  }
3621 
3622  Parser.Lex(); // Eat identifier token.
3623  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3624  return MatchOperand_Success;
3625 }
3626 
3627 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3628 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3629 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3630  SMLoc S = Parser.getTok().getLoc();
3631  const AsmToken &Tok = Parser.getTok();
3632  if (!Tok.is(AsmToken::Identifier))
3633  return MatchOperand_NoMatch;
3634  StringRef Mask = Tok.getString();
3635 
3636  if (isMClass()) {
3637  // See ARMv6-M 10.1.1
3638  std::string Name = Mask.lower();
3639  unsigned FlagsVal = StringSwitch<unsigned>(Name)
3640  // Note: in the documentation:
3641  // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3642  // for MSR APSR_nzcvq.
3643  // but we do make it an alias here. This is so to get the "mask encoding"
3644  // bits correct on MSR APSR writes.
3645  //
3646  // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3647  // should really only be allowed when writing a special register. Note
3648  // they get dropped in the MRS instruction reading a special register as
3649  // the SYSm field is only 8 bits.
3650  //
3651  // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3652  // includes the DSP extension but that is not checked.
3653  .Case("apsr", 0x800)
3654  .Case("apsr_nzcvq", 0x800)
3655  .Case("apsr_g", 0x400)
3656  .Case("apsr_nzcvqg", 0xc00)
3657  .Case("iapsr", 0x801)
3658  .Case("iapsr_nzcvq", 0x801)
3659  .Case("iapsr_g", 0x401)
3660  .Case("iapsr_nzcvqg", 0xc01)
3661  .Case("eapsr", 0x802)
3662  .Case("eapsr_nzcvq", 0x802)
3663  .Case("eapsr_g", 0x402)
3664  .Case("eapsr_nzcvqg", 0xc02)
3665  .Case("xpsr", 0x803)
3666  .Case("xpsr_nzcvq", 0x803)
3667  .Case("xpsr_g", 0x403)
3668  .Case("xpsr_nzcvqg", 0xc03)
3669  .Case("ipsr", 0x805)
3670  .Case("epsr", 0x806)
3671  .Case("iepsr", 0x807)
3672  .Case("msp", 0x808)
3673  .Case("psp", 0x809)
3674  .Case("primask", 0x810)
3675  .Case("basepri", 0x811)
3676  .Case("basepri_max", 0x812)
3677  .Case("faultmask", 0x813)
3678  .Case("control", 0x814)
3679  .Default(~0U);
3680 
3681  if (FlagsVal == ~0U)
3682  return MatchOperand_NoMatch;
3683 
3684  if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3685  // basepri, basepri_max and faultmask only valid for V7m.
3686  return MatchOperand_NoMatch;
3687 
3688  Parser.Lex(); // Eat identifier token.
3689  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3690  return MatchOperand_Success;
3691  }
3692 
3693  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3694  size_t Start = 0, Next = Mask.find('_');
3695  StringRef Flags = "";
3696  std::string SpecReg = Mask.slice(Start, Next).lower();
3697  if (Next != StringRef::npos)
3698  Flags = Mask.slice(Next+1, Mask.size());
3699 
3700  // FlagsVal contains the complete mask:
3701  // 3-0: Mask
3702  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3703  unsigned FlagsVal = 0;
3704 
3705  if (SpecReg == "apsr") {
3706  FlagsVal = StringSwitch<unsigned>(Flags)
3707  .Case("nzcvq", 0x8) // same as CPSR_f
3708  .Case("g", 0x4) // same as CPSR_s
3709  .Case("nzcvqg", 0xc) // same as CPSR_fs
3710  .Default(~0U);
3711 
3712  if (FlagsVal == ~0U) {
3713  if (!Flags.empty())
3714  return MatchOperand_NoMatch;
3715  else
3716  FlagsVal = 8; // No flag
3717  }
3718  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3719  // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3720  if (Flags == "all" || Flags == "")
3721  Flags = "fc";
3722  for (int i = 0, e = Flags.size(); i != e; ++i) {
3723  unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3724  .Case("c", 1)
3725  .Case("x", 2)
3726  .Case("s", 4)
3727  .Case("f", 8)
3728  .Default(~0U);
3729 
3730  // If some specific flag is already set, it means that some letter is
3731  // present more than once, this is not acceptable.
3732  if (FlagsVal == ~0U || (FlagsVal & Flag))
3733  return MatchOperand_NoMatch;
3734  FlagsVal |= Flag;
3735  }
3736  } else // No match for special register.
3737  return MatchOperand_NoMatch;
3738 
3739  // Special register without flags is NOT equivalent to "fc" flags.
3740  // NOTE: This is a divergence from gas' behavior. Uncommenting the following
3741  // two lines would enable gas compatibility at the expense of breaking
3742  // round-tripping.
3743  //
3744  // if (!FlagsVal)
3745  // FlagsVal = 0x9;
3746 
3747  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3748  if (SpecReg == "spsr")
3749  FlagsVal |= 16;
3750 
3751  Parser.Lex(); // Eat identifier token.
3752  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3753  return MatchOperand_Success;
3754 }
3755 
3756 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3757 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3758  int Low, int High) {
3759  const AsmToken &Tok = Parser.getTok();
3760  if (Tok.isNot(AsmToken::Identifier)) {
3761  Error(Parser.getTok().getLoc(), Op + " operand expected.");
3762  return MatchOperand_ParseFail;
3763  }
3764  StringRef ShiftName = Tok.getString();
3765  std::string LowerOp = Op.lower();
3766  std::string UpperOp = Op.upper();
3767  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3768  Error(Parser.getTok().getLoc(), Op + " operand expected.");
3769  return MatchOperand_ParseFail;
3770  }
3771  Parser.Lex(); // Eat shift type token.
3772 
3773  // There must be a '#' and a shift amount.
3774  if (Parser.getTok().isNot(AsmToken::Hash) &&
3775  Parser.getTok().isNot(AsmToken::Dollar)) {
3776  Error(Parser.getTok().getLoc(), "'#' expected");
3777  return MatchOperand_ParseFail;
3778  }
3779  Parser.Lex(); // Eat hash token.
3780 
3781  const MCExpr *ShiftAmount;
3782  SMLoc Loc = Parser.getTok().getLoc();
3783  SMLoc EndLoc;
3784  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3785  Error(Loc, "illegal expression");
3786  return MatchOperand_ParseFail;
3787  }
3788  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3789  if (!CE) {
3790  Error(Loc, "constant expression expected");
3791  return MatchOperand_ParseFail;
3792  }
3793  int Val = CE->getValue();
3794  if (Val < Low || Val > High) {
3795  Error(Loc, "immediate value out of range");
3796  return MatchOperand_ParseFail;
3797  }
3798 
3799  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3800 
3801  return MatchOperand_Success;
3802 }
3803 
3804 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3805 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3806  const AsmToken &Tok = Parser.getTok();
3807  SMLoc S = Tok.getLoc();
3808  if (Tok.isNot(AsmToken::Identifier)) {
3809  Error(S, "'be' or 'le' operand expected");
3810  return MatchOperand_ParseFail;
3811  }
3812  int Val = StringSwitch<int>(Tok.getString().lower())
3813  .Case("be", 1)
3814  .Case("le", 0)
3815  .Default(-1);
3816  Parser.Lex(); // Eat the token.
3817 
3818  if (Val == -1) {
3819  Error(S, "'be' or 'le' operand expected");
3820  return MatchOperand_ParseFail;
3821  }
3822  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3823  getContext()),
3824  S, Tok.getEndLoc()));
3825  return MatchOperand_Success;
3826 }
3827 
3828 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3829 /// instructions. Legal values are:
3830 /// lsl #n 'n' in [0,31]
3831 /// asr #n 'n' in [1,32]
3832 /// n == 32 encoded as n == 0.
3833 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3834 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3835  const AsmToken &Tok = Parser.getTok();
3836  SMLoc S = Tok.getLoc();
3837  if (Tok.isNot(AsmToken::Identifier)) {
3838  Error(S, "shift operator 'asr' or 'lsl' expected");
3839  return MatchOperand_ParseFail;
3840  }
3841  StringRef ShiftName = Tok.getString();
3842  bool isASR;
3843  if (ShiftName == "lsl" || ShiftName == "LSL")
3844  isASR = false;
3845  else if (ShiftName == "asr" || ShiftName == "ASR")
3846  isASR = true;
3847  else {
3848  Error(S, "shift operator 'asr' or 'lsl' expected");
3849  return MatchOperand_ParseFail;
3850  }
3851  Parser.Lex(); // Eat the operator.
3852 
3853  // A '#' and a shift amount.
3854  if (Parser.getTok().isNot(AsmToken::Hash) &&
3855  Parser.getTok().isNot(AsmToken::Dollar)) {
3856  Error(Parser.getTok().getLoc(), "'#' expected");
3857  return MatchOperand_ParseFail;
3858  }
3859  Parser.Lex(); // Eat hash token.
3860  SMLoc ExLoc = Parser.getTok().getLoc();
3861 
3862  const MCExpr *ShiftAmount;
3863  SMLoc EndLoc;
3864  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3865  Error(ExLoc, "malformed shift expression");
3866  return MatchOperand_ParseFail;
3867  }
3868  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3869  if (!CE) {
3870  Error(ExLoc, "shift amount must be an immediate");
3871  return MatchOperand_ParseFail;
3872  }
3873 
3874  int64_t Val = CE->getValue();
3875  if (isASR) {
3876  // Shift amount must be in [1,32]
3877  if (Val < 1 || Val > 32) {
3878  Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3879  return MatchOperand_ParseFail;
3880  }
3881  // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3882  if (isThumb() && Val == 32) {
3883  Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3884  return MatchOperand_ParseFail;
3885  }
3886  if (Val == 32) Val = 0;
3887  } else {
3888  // Shift amount must be in [1,32]
3889  if (Val < 0 || Val > 31) {
3890  Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3891  return MatchOperand_ParseFail;
3892  }
3893  }
3894 
3895  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3896 
3897  return MatchOperand_Success;
3898 }
3899 
3900 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3901 /// of instructions. Legal values are:
3902 /// ror #n 'n' in {0, 8, 16, 24}
3903 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3904 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3905  const AsmToken &Tok = Parser.getTok();
3906  SMLoc S = Tok.getLoc();
3907  if (Tok.isNot(AsmToken::Identifier))
3908  return MatchOperand_NoMatch;
3909  StringRef ShiftName = Tok.getString();
3910  if (ShiftName != "ror" && ShiftName != "ROR")
3911  return MatchOperand_NoMatch;
3912  Parser.Lex(); // Eat the operator.
3913 
3914  // A '#' and a rotate amount.
3915  if (Parser.getTok().isNot(AsmToken::Hash) &&
3916  Parser.getTok().isNot(AsmToken::Dollar)) {
3917  Error(Parser.getTok().getLoc(), "'#' expected");
3918  return MatchOperand_ParseFail;
3919  }
3920  Parser.Lex(); // Eat hash token.
3921  SMLoc ExLoc = Parser.getTok().getLoc();
3922 
3923  const MCExpr *ShiftAmount;
3924  SMLoc EndLoc;
3925  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3926  Error(ExLoc, "malformed rotate expression");
3927  return MatchOperand_ParseFail;
3928  }
3929  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3930  if (!CE) {
3931  Error(ExLoc, "rotate amount must be an immediate");
3932  return MatchOperand_ParseFail;
3933  }
3934 
3935  int64_t Val = CE->getValue();
3936  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3937  // normally, zero is represented in asm by omitting the rotate operand
3938  // entirely.
3939  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3940  Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3941  return MatchOperand_ParseFail;
3942  }
3943 
3944  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3945 
3946  return MatchOperand_Success;
3947 }
3948 
3949 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3950 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3951  SMLoc S = Parser.getTok().getLoc();
3952  // The bitfield descriptor is really two operands, the LSB and the width.
3953  if (Parser.getTok().isNot(AsmToken::Hash) &&
3954  Parser.getTok().isNot(AsmToken::Dollar)) {
3955  Error(Parser.getTok().getLoc(), "'#' expected");
3956  return MatchOperand_ParseFail;
3957  }
3958  Parser.Lex(); // Eat hash token.
3959 
3960  const MCExpr *LSBExpr;
3961  SMLoc E = Parser.getTok().getLoc();
3962  if (getParser().parseExpression(LSBExpr)) {
3963  Error(E, "malformed immediate expression");
3964  return MatchOperand_ParseFail;
3965  }
3966  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3967  if (!CE) {
3968  Error(E, "'lsb' operand must be an immediate");
3969  return MatchOperand_ParseFail;
3970  }
3971 
3972  int64_t LSB = CE->getValue();
3973  // The LSB must be in the range [0,31]
3974  if (LSB < 0 || LSB > 31) {
3975  Error(E, "'lsb' operand must be in the range [0,31]");
3976  return MatchOperand_ParseFail;
3977  }
3978  E = Parser.getTok().getLoc();
3979 
3980  // Expect another immediate operand.
3981  if (Parser.getTok().isNot(AsmToken::Comma)) {
3982  Error(Parser.getTok().getLoc(), "too few operands");
3983  return MatchOperand_ParseFail;
3984  }
3985  Parser.Lex(); // Eat hash token.
3986  if (Parser.getTok().isNot(AsmToken::Hash) &&
3987  Parser.getTok().isNot(AsmToken::Dollar)) {
3988  Error(Parser.getTok().getLoc(), "'#' expected");
3989  return MatchOperand_ParseFail;
3990  }
3991  Parser.Lex(); // Eat hash token.
3992 
3993  const MCExpr *WidthExpr;
3994  SMLoc EndLoc;
3995  if (getParser().parseExpression(WidthExpr, EndLoc)) {
3996  Error(E, "malformed immediate expression");
3997  return MatchOperand_ParseFail;
3998  }
3999  CE = dyn_cast<MCConstantExpr>(WidthExpr);
4000  if (!CE) {
4001  Error(E, "'width' operand must be an immediate");
4002  return MatchOperand_ParseFail;
4003  }
4004 
4005  int64_t Width = CE->getValue();
4006  // The LSB must be in the range [1,32-lsb]
4007  if (Width < 1 || Width > 32 - LSB) {
4008  Error(E, "'width' operand must be in the range [1,32-lsb]");
4009  return MatchOperand_ParseFail;
4010  }
4011 
4012  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4013 
4014  return MatchOperand_Success;
4015 }
4016 
4017 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4018 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4019  // Check for a post-index addressing register operand. Specifically:
4020  // postidx_reg := '+' register {, shift}
4021  // | '-' register {, shift}
4022  // | register {, shift}
4023 
4024  // This method must return MatchOperand_NoMatch without consuming any tokens
4025  // in the case where there is no match, as other alternatives take other
4026  // parse methods.
4027  AsmToken Tok = Parser.getTok();
4028  SMLoc S = Tok.getLoc();
4029  bool haveEaten = false;
4030  bool isAdd = true;
4031  if (Tok.is(AsmToken::Plus)) {
4032  Parser.Lex(); // Eat the '+' token.
4033  haveEaten = true;
4034  } else if (Tok.is(AsmToken::Minus)) {
4035  Parser.Lex(); // Eat the '-' token.
4036  isAdd = false;
4037  haveEaten = true;
4038  }
4039 
4040  SMLoc E = Parser.getTok().getEndLoc();
4041  int Reg = tryParseRegister();
4042  if (Reg == -1) {
4043  if (!haveEaten)
4044  return MatchOperand_NoMatch;
4045  Error(Parser.getTok().getLoc(), "register expected");
4046  return MatchOperand_ParseFail;
4047  }
4048 
4050  unsigned ShiftImm = 0;
4051  if (Parser.getTok().is(AsmToken::Comma)) {
4052  Parser.Lex(); // Eat the ','.
4053  if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4054  return MatchOperand_ParseFail;
4055 
4056  // FIXME: Only approximates end...may include intervening whitespace.
4057  E = Parser.getTok().getLoc();
4058  }
4059 
4060  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4061  ShiftImm, S, E));
4062 
4063  return MatchOperand_Success;
4064 }
4065 
4066 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4067 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4068  // Check for a post-index addressing register operand. Specifically:
4069  // am3offset := '+' register
4070  // | '-' register
4071  // | register
4072  // | # imm
4073  // | # + imm
4074  // | # - imm
4075 
4076  // This method must return MatchOperand_NoMatch without consuming any tokens
4077  // in the case where there is no match, as other alternatives take other
4078  // parse methods.
4079  AsmToken Tok = Parser.getTok();
4080  SMLoc S = Tok.getLoc();
4081 
4082  // Do immediates first, as we always parse those if we have a '#'.
4083  if (Parser.getTok().is(AsmToken::Hash) ||
4084  Parser.getTok().is(AsmToken::Dollar)) {
4085  Parser.Lex(); // Eat '#' or '$'.
4086  // Explicitly look for a '-', as we need to encode negative zero
4087  // differently.
4088  bool isNegative = Parser.getTok().is(AsmToken::Minus);
4089  const MCExpr *Offset;
4090  SMLoc E;
4091  if (getParser().parseExpression(Offset, E))
4092  return MatchOperand_ParseFail;
4093  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4094  if (!CE) {
4095  Error(S, "constant expression expected");
4096  return MatchOperand_ParseFail;
4097  }
4098  // Negative zero is encoded as the flag value INT32_MIN.
4099  int32_t Val = CE->getValue();
4100  if (isNegative && Val == 0)
4101  Val = INT32_MIN;
4102 
4103  Operands.push_back(
4104  ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
4105 
4106  return MatchOperand_Success;
4107  }
4108 
4109 
4110  bool haveEaten = false;
4111  bool isAdd = true;
4112  if (Tok.is(AsmToken::Plus)) {
4113  Parser.Lex(); // Eat the '+' token.
4114  haveEaten = true;
4115  } else if (Tok.is(AsmToken::Minus)) {
4116  Parser.Lex(); // Eat the '-' token.
4117  isAdd = false;
4118  haveEaten = true;
4119  }
4120 
4121  Tok = Parser.getTok();
4122  int Reg = tryParseRegister();
4123  if (Reg == -1) {
4124  if (!haveEaten)
4125  return MatchOperand_NoMatch;
4126  Error(Tok.getLoc(), "register expected");
4127  return MatchOperand_ParseFail;
4128  }
4129 
4130  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4131  0, S, Tok.getEndLoc()));
4132 
4133  return MatchOperand_Success;
4134 }
4135 
4136 /// Convert parsed operands to MCInst. Needed here because this instruction
4137 /// only has two register operands, but multiplication is commutative so
4138 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4139 void ARMAsmParser::
4140 cvtThumbMultiply(MCInst &Inst,
4141  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4142  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4143  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4144  // If we have a three-operand form, make sure to set Rn to be the operand
4145  // that isn't the same as Rd.
4146  unsigned RegOp = 4;
4147  if (Operands.size() == 6 &&
4148  ((ARMOperand*)Operands[4])->getReg() ==
4149  ((ARMOperand*)Operands[3])->getReg())
4150  RegOp = 5;
4151  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4152  Inst.addOperand(Inst.getOperand(0));
4153  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4154 }
4155 
4156 void ARMAsmParser::
4157 cvtThumbBranches(MCInst &Inst,
4158  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4159  int CondOp = -1, ImmOp = -1;
4160  switch(Inst.getOpcode()) {
4161  case ARM::tB:
4162  case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4163 
4164  case ARM::t2B:
4165  case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4166 
4167  default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4168  }
4169  // first decide whether or not the branch should be conditional
4170  // by looking at it's location relative to an IT block
4171  if(inITBlock()) {
4172  // inside an IT block we cannot have any conditional branches. any
4173  // such instructions needs to be converted to unconditional form
4174  switch(Inst.getOpcode()) {
4175  case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4176  case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4177  }
4178  } else {
4179  // outside IT blocks we can only have unconditional branches with AL
4180  // condition code or conditional branches with non-AL condition code
4181  unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode();
4182  switch(Inst.getOpcode()) {
4183  case ARM::tB:
4184  case ARM::tBcc:
4185  Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4186  break;
4187  case ARM::t2B:
4188  case ARM::t2Bcc:
4189  Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4190  break;
4191  }
4192  }
4193 
4194  // now decide on encoding size based on branch target range
4195  switch(Inst.getOpcode()) {
4196  // classify tB as either t2B or t1B based on range of immediate operand
4197  case ARM::tB: {
4198  ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4199  if(!op->isSignedOffset<11, 1>() && isThumbTwo())
4200  Inst.setOpcode(ARM::t2B);
4201  break;
4202  }
4203  // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4204  case ARM::tBcc: {
4205  ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4206  if(!op->isSignedOffset<8, 1>() && isThumbTwo())
4207  Inst.setOpcode(ARM::t2Bcc);
4208  break;
4209  }
4210  }
4211  ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1);
4212  ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2);
4213 }
4214 
4215 /// Parse an ARM memory expression, return false if successful else return true
4216 /// or an error. The first token must be a '[' when called.
4217 bool ARMAsmParser::
4218 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4219  SMLoc S, E;
4220  assert(Parser.getTok().is(AsmToken::LBrac) &&
4221  "Token is not a Left Bracket");
4222  S = Parser.getTok().getLoc();
4223  Parser.Lex(); // Eat left bracket token.
4224 
4225  const AsmToken &BaseRegTok = Parser.getTok();
4226  int BaseRegNum = tryParseRegister();
4227  if (BaseRegNum == -1)
4228  return Error(BaseRegTok.getLoc(), "register expected");
4229 
4230  // The next token must either be a comma, a colon or a closing bracket.
4231  const AsmToken &Tok = Parser.getTok();
4232  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4233  !Tok.is(AsmToken::RBrac))
4234  return Error(Tok.getLoc(), "malformed memory operand");
4235 
4236  if (Tok.is(AsmToken::RBrac)) {
4237  E = Tok.getEndLoc();
4238  Parser.Lex(); // Eat right bracket token.
4239 
4240  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4241  0, 0, false, S, E));
4242 
4243  // If there's a pre-indexing writeback marker, '!', just add it as a token
4244  // operand. It's rather odd, but syntactically valid.
4245  if (Parser.getTok().is(AsmToken::Exclaim)) {
4246  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4247  Parser.Lex(); // Eat the '!'.
4248  }
4249 
4250  return false;
4251  }
4252 
4253  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4254  "Lost colon or comma in memory operand?!");
4255  if (Tok.is(AsmToken::Comma)) {
4256  Parser.Lex(); // Eat the comma.
4257  }
4258 
4259  // If we have a ':', it's an alignment specifier.
4260  if (Parser.getTok().is(AsmToken::Colon)) {
4261  Parser.Lex(); // Eat the ':'.
4262  E = Parser.getTok().getLoc();
4263 
4264  const MCExpr *Expr;
4265  if (getParser().parseExpression(Expr))
4266  return true;
4267 
4268  // The expression has to be a constant. Memory references with relocations
4269  // don't come through here, as they use the <label> forms of the relevant
4270  // instructions.
4271  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4272  if (!CE)
4273  return Error (E, "constant expression expected");
4274 
4275  unsigned Align = 0;
4276  switch (CE->getValue()) {
4277  default:
4278  return Error(E,
4279  "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4280  case 16: Align = 2; break;
4281  case 32: Align = 4; break;
4282  case 64: Align = 8; break;
4283  case 128: Align = 16; break;
4284  case 256: Align = 32; break;
4285  }
4286 
4287  // Now we should have the closing ']'
4288  if (Parser.getTok().isNot(AsmToken::RBrac))
4289  return Error(Parser.getTok().getLoc(), "']' expected");
4290  E = Parser.getTok().getEndLoc();
4291  Parser.Lex(); // Eat right bracket token.
4292 
4293  // Don't worry about range checking the value here. That's handled by
4294  // the is*() predicates.
4295  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4296  ARM_AM::no_shift, 0, Align,
4297  false, S, E));
4298 
4299  // If there's a pre-indexing writeback marker, '!', just add it as a token
4300  // operand.
4301  if (Parser.getTok().is(AsmToken::Exclaim)) {
4302  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4303  Parser.Lex(); // Eat the '!'.
4304  }
4305 
4306  return false;
4307  }
4308 
4309  // If we have a '#', it's an immediate offset, else assume it's a register
4310  // offset. Be friendly and also accept a plain integer (without a leading
4311  // hash) for gas compatibility.
4312  if (Parser.getTok().is(AsmToken::Hash) ||
4313  Parser.getTok().is(AsmToken::Dollar) ||
4314  Parser.getTok().is(AsmToken::Integer)) {
4315  if (Parser.getTok().isNot(AsmToken::Integer))
4316  Parser.Lex(); // Eat '#' or '$'.
4317  E = Parser.getTok().getLoc();
4318 
4319  bool isNegative = getParser().getTok().is(AsmToken::Minus);
4320  const MCExpr *Offset;
4321  if (getParser().parseExpression(Offset))
4322  return true;
4323 
4324  // The expression has to be a constant. Memory references with relocations
4325  // don't come through here, as they use the <label> forms of the relevant
4326  // instructions.
4327  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4328  if (!CE)
4329  return Error (E, "constant expression expected");
4330 
4331  // If the constant was #-0, represent it as INT32_MIN.
4332  int32_t Val = CE->getValue();
4333  if (isNegative && Val == 0)
4334  CE = MCConstantExpr::Create(INT32_MIN, getContext());
4335 
4336  // Now we should have the closing ']'
4337  if (Parser.getTok().isNot(AsmToken::RBrac))
4338  return Error(Parser.getTok().getLoc(), "']' expected");
4339  E = Parser.getTok().getEndLoc();
4340  Parser.Lex(); // Eat right bracket token.
4341 
4342  // Don't worry about range checking the value here. That's handled by
4343  // the is*() predicates.
4344  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4345  ARM_AM::no_shift, 0, 0,
4346  false, S, E));
4347 
4348  // If there's a pre-indexing writeback marker, '!', just add it as a token
4349  // operand.
4350  if (Parser.getTok().is(AsmToken::Exclaim)) {
4351  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4352  Parser.Lex(); // Eat the '!'.
4353  }
4354 
4355  return false;
4356  }
4357 
4358  // The register offset is optionally preceded by a '+' or '-'
4359  bool isNegative = false;
4360  if (Parser.getTok().is(AsmToken::Minus)) {
4361  isNegative = true;
4362  Parser.Lex(); // Eat the '-'.
4363  } else if (Parser.getTok().is(AsmToken::Plus)) {
4364  // Nothing to do.
4365  Parser.Lex(); // Eat the '+'.
4366  }
4367 
4368  E = Parser.getTok().getLoc();
4369  int OffsetRegNum = tryParseRegister();
4370  if (OffsetRegNum == -1)
4371  return Error(E, "register expected");
4372 
4373  // If there's a shift operator, handle it.
4374  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4375  unsigned ShiftImm = 0;
4376  if (Parser.getTok().is(AsmToken::Comma)) {
4377  Parser.Lex(); // Eat the ','.
4378  if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4379  return true;
4380  }
4381 
4382  // Now we should have the closing ']'
4383  if (Parser.getTok().isNot(AsmToken::RBrac))
4384  return Error(Parser.getTok().getLoc(), "']' expected");
4385  E = Parser.getTok().getEndLoc();
4386  Parser.Lex(); // Eat right bracket token.
4387 
4388  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4389  ShiftType, ShiftImm, 0, isNegative,
4390  S, E));
4391 
4392  // If there's a pre-indexing writeback marker, '!', just add it as a token
4393  // operand.
4394  if (Parser.getTok().is(AsmToken::Exclaim)) {
4395  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4396  Parser.Lex(); // Eat the '!'.
4397  }
4398 
4399  return false;
4400 }
4401 
4402 /// parseMemRegOffsetShift - one of these two:
4403 /// ( lsl | lsr | asr | ror ) , # shift_amount
4404 /// rrx
4405 /// return true if it parses a shift otherwise it returns false.
4406 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4407  unsigned &Amount) {
4408  SMLoc Loc = Parser.getTok().getLoc();
4409  const AsmToken &Tok = Parser.getTok();
4410  if (Tok.isNot(AsmToken::Identifier))
4411  return true;
4412  StringRef ShiftName = Tok.getString();
4413  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4414  ShiftName == "asl" || ShiftName == "ASL")
4415  St = ARM_AM::lsl;
4416  else if (ShiftName == "lsr" || ShiftName == "LSR")
4417  St = ARM_AM::lsr;
4418  else if (ShiftName == "asr" || ShiftName == "ASR")
4419  St = ARM_AM::asr;
4420  else if (ShiftName == "ror" || ShiftName == "ROR")
4421  St = ARM_AM::ror;
4422  else if (ShiftName == "rrx" || ShiftName == "RRX")
4423  St = ARM_AM::rrx;
4424  else
4425  return Error(Loc, "illegal shift operator");
4426  Parser.Lex(); // Eat shift type token.
4427 
4428  // rrx stands alone.
4429  Amount = 0;
4430  if (St != ARM_AM::rrx) {
4431  Loc = Parser.getTok().getLoc();
4432  // A '#' and a shift amount.
4433  const AsmToken &HashTok = Parser.getTok();
4434  if (HashTok.isNot(AsmToken::Hash) &&
4435  HashTok.isNot(AsmToken::Dollar))
4436  return Error(HashTok.getLoc(), "'#' expected");
4437  Parser.Lex(); // Eat hash token.
4438 
4439  const MCExpr *Expr;
4440  if (getParser().parseExpression(Expr))
4441  return true;
4442  // Range check the immediate.
4443  // lsl, ror: 0 <= imm <= 31
4444  // lsr, asr: 0 <= imm <= 32
4445  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4446  if (!CE)
4447  return Error(Loc, "shift amount must be an immediate");
4448  int64_t Imm = CE->getValue();
4449  if (Imm < 0 ||
4450  ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4451  ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4452  return Error(Loc, "immediate shift value out of range");
4453  // If <ShiftTy> #0, turn it into a no_shift.
4454  if (Imm == 0)
4455  St = ARM_AM::lsl;
4456  // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4457  if (Imm == 32)
4458  Imm = 0;
4459  Amount = Imm;
4460  }
4461 
4462  return false;
4463 }
4464 
4465 /// parseFPImm - A floating point immediate expression operand.
4466 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4467 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4468  // Anything that can accept a floating point constant as an operand
4469  // needs to go through here, as the regular parseExpression is
4470  // integer only.
4471  //
4472  // This routine still creates a generic Immediate operand, containing
4473  // a bitcast of the 64-bit floating point value. The various operands
4474  // that accept floats can check whether the value is valid for them
4475  // via the standard is*() predicates.
4476 
4477  SMLoc S = Parser.getTok().getLoc();
4478 
4479  if (Parser.getTok().isNot(AsmToken::Hash) &&
4480  Parser.getTok().isNot(AsmToken::Dollar))
4481  return MatchOperand_NoMatch;
4482 
4483  // Disambiguate the VMOV forms that can accept an FP immediate.
4484  // vmov.f32 <sreg>, #imm
4485  // vmov.f64 <dreg>, #imm
4486  // vmov.f32 <dreg>, #imm @ vector f32x2
4487  // vmov.f32 <qreg>, #imm @ vector f32x4
4488  //
4489  // There are also the NEON VMOV instructions which expect an
4490  // integer constant. Make sure we don't try to parse an FPImm
4491  // for these:
4492  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4493  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4494  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4495  TyOp->getToken() != ".f64"))
4496  return MatchOperand_NoMatch;
4497 
4498  Parser.Lex(); // Eat '#' or '$'.
4499 
4500  // Handle negation, as that still comes through as a separate token.
4501  bool isNegative = false;
4502  if (Parser.getTok().is(AsmToken::Minus)) {
4503  isNegative = true;
4504  Parser.Lex();
4505  }
4506  const AsmToken &Tok = Parser.getTok();
4507  SMLoc Loc = Tok.getLoc();
4508  if (Tok.is(AsmToken::Real)) {
4509  APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4510  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4511  // If we had a '-' in front, toggle the sign bit.
4512  IntVal ^= (uint64_t)isNegative << 31;
4513  Parser.Lex(); // Eat the token.
4514  Operands.push_back(ARMOperand::CreateImm(
4515  MCConstantExpr::Create(IntVal, getContext()),
4516  S, Parser.getTok().getLoc()));
4517  return MatchOperand_Success;
4518  }
4519  // Also handle plain integers. Instructions which allow floating point
4520  // immediates also allow a raw encoded 8-bit value.
4521  if (Tok.is(AsmToken::Integer)) {
4522  int64_t Val = Tok.getIntVal();
4523  Parser.Lex(); // Eat the token.
4524  if (Val > 255 || Val < 0) {
4525  Error(Loc, "encoded floating point value out of range");
4526  return MatchOperand_ParseFail;
4527  }
4528  double RealVal = ARM_AM::getFPImmFloat(Val);
4529  Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4530  Operands.push_back(ARMOperand::CreateImm(
4531  MCConstantExpr::Create(Val, getContext()), S,
4532  Parser.getTok().getLoc()));
4533  return MatchOperand_Success;
4534  }
4535 
4536  Error(Loc, "invalid floating point immediate");
4537  return MatchOperand_ParseFail;
4538 }
4539 
4540 /// Parse a arm instruction operand. For now this parses the operand regardless
4541 /// of the mnemonic.
4542 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4543  StringRef Mnemonic) {
4544  SMLoc S, E;
4545 
4546  // Check if the current operand has a custom associated parser, if so, try to
4547  // custom parse the operand, or fallback to the general approach.
4548  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4549  if (ResTy == MatchOperand_Success)
4550  return false;
4551  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4552  // there was a match, but an error occurred, in which case, just return that
4553  // the operand parsing failed.
4554  if (ResTy == MatchOperand_ParseFail)
4555  return true;
4556 
4557  switch (getLexer().getKind()) {
4558  default:
4559  Error(Parser.getTok().getLoc(), "unexpected token in operand");
4560  return true;
4561  case AsmToken::Identifier: {
4562  // If we've seen a branch mnemonic, the next operand must be a label. This
4563  // is true even if the label is a register name. So "br r1" means branch to
4564  // label "r1".
4565  bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4566  if (!ExpectLabel) {
4567  if (!tryParseRegisterWithWriteBack(Operands))
4568  return false;
4569  int Res = tryParseShiftRegister(Operands);
4570  if (Res == 0) // success
4571  return false;
4572  else if (Res == -1) // irrecoverable error
4573  return true;
4574  // If this is VMRS, check for the apsr_nzcv operand.
4575  if (Mnemonic == "vmrs" &&
4576  Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4577  S = Parser.getTok().getLoc();
4578  Parser.Lex();
4579  Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4580  return false;
4581  }
4582  }
4583 
4584  // Fall though for the Identifier case that is not a register or a
4585  // special name.
4586  }
4587  case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
4588  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4589  case AsmToken::String: // quoted label names.
4590  case AsmToken::Dot: { // . as a branch target
4591  // This was not a register so parse other operands that start with an
4592  // identifier (like labels) as expressions and create them as immediates.
4593  const MCExpr *IdVal;
4594  S = Parser.getTok().getLoc();
4595  if (getParser().parseExpression(IdVal))
4596  return true;
4597  E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4598  Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4599  return false;
4600  }
4601  case AsmToken::LBrac:
4602  return parseMemory(Operands);
4603  case AsmToken::LCurly:
4604  return parseRegisterList(Operands);
4605  case AsmToken::Dollar:
4606  case AsmToken::Hash: {
4607  // #42 -> immediate.
4608  S = Parser.getTok().getLoc();
4609  Parser.Lex();
4610 
4611  if (Parser.getTok().isNot(AsmToken::Colon)) {
4612  bool isNegative = Parser.getTok().is(AsmToken::Minus);
4613  const MCExpr *ImmVal;
4614  if (getParser().parseExpression(ImmVal))
4615  return true;
4616  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4617  if (CE) {
4618  int32_t Val = CE->getValue();
4619  if (isNegative && Val == 0)
4620  ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4621  }
4622  E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4623  Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4624 
4625  // There can be a trailing '!' on operands that we want as a separate
4626  // '!' Token operand. Handle that here. For example, the compatibilty
4627  // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4628  if (Parser.getTok().is(AsmToken::Exclaim)) {
4629  Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4630  Parser.getTok().getLoc()));
4631  Parser.Lex(); // Eat exclaim token
4632  }
4633  return false;
4634  }
4635  // w/ a ':' after the '#', it's just like a plain ':'.
4636  // FALLTHROUGH
4637  }
4638  case AsmToken::Colon: {
4639  // ":lower16:" and ":upper16:" expression prefixes
4640  // FIXME: Check it's an expression prefix,
4641  // e.g. (FOO - :lower16:BAR) isn't legal.
4642  ARMMCExpr::VariantKind RefKind;
4643  if (parsePrefix(RefKind))
4644  return true;
4645 
4646  const MCExpr *SubExprVal;
4647  if (getParser().parseExpression(SubExprVal))
4648  return true;
4649 
4650  const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4651  getContext());
4652  E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4653  Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4654  return false;
4655  }
4656  }
4657 }
4658 
4659 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4660 // :lower16: and :upper16:.
4661 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4662  RefKind = ARMMCExpr::VK_ARM_None;
4663 
4664  // :lower16: and :upper16: modifiers
4665  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4666  Parser.Lex(); // Eat ':'
4667 
4668  if (getLexer().isNot(AsmToken::Identifier)) {
4669  Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4670  return true;
4671  }
4672 
4673  StringRef IDVal = Parser.getTok().getIdentifier();
4674  if (IDVal == "lower16") {
4675  RefKind = ARMMCExpr::VK_ARM_LO16;
4676  } else if (IDVal == "upper16") {
4677  RefKind = ARMMCExpr::VK_ARM_HI16;
4678  } else {
4679  Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4680  return true;
4681  }
4682  Parser.Lex();
4683 
4684  if (getLexer().isNot(AsmToken::Colon)) {
4685  Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4686  return true;
4687  }
4688  Parser.Lex(); // Eat the last ':'
4689  return false;
4690 }
4691 
4692 /// \brief Given a mnemonic, split out possible predication code and carry
4693 /// setting letters to form a canonical mnemonic and flags.
4694 //
4695 // FIXME: Would be nice to autogen this.
4696 // FIXME: This is a bit of a maze of special cases.
4697 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4698  unsigned &PredicationCode,
4699  bool &CarrySetting,
4700  unsigned &ProcessorIMod,
4701  StringRef &ITMask) {
4702  PredicationCode = ARMCC::AL;
4703  CarrySetting = false;
4704  ProcessorIMod = 0;
4705 
4706  // Ignore some mnemonics we know aren't predicated forms.
4707  //
4708  // FIXME: Would be nice to autogen this.
4709  if ((Mnemonic == "movs" && isThumb()) ||
4710  Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
4711  Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
4712  Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
4713  Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
4714  Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
4715  Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
4716  Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
4717  Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4718  Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
4719  Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
4720  Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
4721  Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
4722  return Mnemonic;
4723 
4724  // First, split out any predication code. Ignore mnemonics we know aren't
4725  // predicated but do have a carry-set and so weren't caught above.
4726  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4727  Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4728  Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4729  Mnemonic != "sbcs" && Mnemonic != "rscs") {
4730  unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4731  .Case("eq", ARMCC::EQ)
4732  .Case("ne", ARMCC::NE)
4733  .Case("hs", ARMCC::HS)
4734  .Case("cs", ARMCC::HS)
4735  .Case("lo", ARMCC::LO)
4736  .Case("cc", ARMCC::LO)
4737  .Case("mi", ARMCC::MI)
4738  .Case("pl", ARMCC::PL)
4739  .Case("vs", ARMCC::VS)
4740  .Case("vc", ARMCC::VC)
4741  .Case("hi", ARMCC::HI)
4742  .Case("ls", ARMCC::LS)
4743  .Case("ge", ARMCC::GE)
4744  .Case("lt", ARMCC::LT)
4745  .Case("gt", ARMCC::GT)
4746  .Case("le", ARMCC::LE)
4747  .Case("al", ARMCC::AL)
4748  .Default(~0U);
4749  if (CC != ~0U) {
4750  Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4751  PredicationCode = CC;
4752  }
4753  }
4754 
4755  // Next, determine if we have a carry setting bit. We explicitly ignore all
4756  // the instructions we know end in 's'.
4757  if (Mnemonic.endswith("s") &&
4758  !(Mnemonic == "cps" || Mnemonic == "mls" ||
4759  Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4760  Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4761  Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4762  Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4763  Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4764  Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4765  Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4766  Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4767  (Mnemonic == "movs" && isThumb()))) {
4768  Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4769  CarrySetting = true;
4770  }
4771 
4772  // The "cps" instruction can have a interrupt mode operand which is glued into
4773  // the mnemonic. Check if this is the case, split it and parse the imod op
4774  if (Mnemonic.startswith("cps")) {
4775  // Split out any imod code.
4776  unsigned IMod =
4777  StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4778  .Case("ie", ARM_PROC::IE)
4779  .Case("id", ARM_PROC::ID)
4780  .Default(~0U);
4781  if (IMod != ~0U) {
4782  Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4783  ProcessorIMod = IMod;
4784  }
4785  }
4786 
4787  // The "it" instruction has the condition mask on the end of the mnemonic.
4788  if (Mnemonic.startswith("it")) {
4789  ITMask = Mnemonic.slice(2, Mnemonic.size());
4790  Mnemonic = Mnemonic.slice(0, 2);
4791  }
4792 
4793  return Mnemonic;
4794 }
4795 
4796 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
4797 /// inclusion of carry set or predication code operands.
4798 //
4799 // FIXME: It would be nice to autogen this.
4800 void ARMAsmParser::
4801 getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
4802  bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) {
4803  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4804  Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4805  Mnemonic == "add" || Mnemonic == "adc" ||
4806  Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4807  Mnemonic == "orr" || Mnemonic == "mvn" ||
4808  Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4809  Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4810  Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4811  (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4812  Mnemonic == "mla" || Mnemonic == "smlal" ||
4813  Mnemonic == "umlal" || Mnemonic == "umull"))) {
4814  CanAcceptCarrySet = true;
4815  } else
4816  CanAcceptCarrySet = false;
4817 
4818  if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
4819  Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
4820  Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") ||
4821  Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") ||
4822  Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
4823  Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
4824  Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
4825  Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
4826  Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
4827  (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
4828  // These mnemonics are never predicable
4829  CanAcceptPredicationCode = false;
4830  } else if (!isThumb()) {
4831  // Some instructions are only predicable in Thumb mode
4832  CanAcceptPredicationCode
4833  = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
4834  Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
4835  Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
4836  Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
4837  Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
4838  Mnemonic != "stc2" && Mnemonic != "stc2l" &&
4839  !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
4840  } else if (isThumbOne()) {
4841  if (hasV6MOps())
4842  CanAcceptPredicationCode = Mnemonic != "movs";
4843  else
4844  CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
4845  } else
4846  CanAcceptPredicationCode = true;
4847 }
4848 
4849 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4851  // FIXME: This is all horribly hacky. We really need a better way to deal
4852  // with optional operands like this in the matcher table.
4853 
4854  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4855  // another does not. Specifically, the MOVW instruction does not. So we
4856  // special case it here and remove the defaulted (non-setting) cc_out
4857  // operand if that's the instruction we're trying to match.
4858  //
4859  // We do this as post-processing of the explicit operands rather than just
4860  // conditionally adding the cc_out in the first place because we need
4861  // to check the type of the parsed immediate operand.
4862  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4863  !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4864  static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4865  static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4866  return true;
4867 
4868  // Register-register 'add' for thumb does not have a cc_out operand
4869  // when there are only two register operands.
4870  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4871  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4872  static_cast<ARMOperand*>(Operands[4])->isReg() &&
4873  static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4874  return true;
4875  // Register-register 'add' for thumb does not have a cc_out operand
4876  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4877  // have to check the immediate range here since Thumb2 has a variant
4878  // that can handle a different range and has a cc_out operand.
4879  if (((isThumb() && Mnemonic == "add") ||
4880  (isThumbTwo() && Mnemonic == "sub")) &&
4881  Operands.size() == 6 &&
4882  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4883  static_cast<ARMOperand*>(Operands[4])->isReg() &&
4884  static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4885  static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4886  ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4887  static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4888  return true;
4889  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4890  // imm0_4095 variant. That's the least-preferred variant when
4891  // selecting via the generic "add" mnemonic, so to know that we
4892  // should remove the cc_out operand, we have to explicitly check that
4893  // it's not one of the other variants. Ugh.
4894  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4895  Operands.size() == 6 &&
4896  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4897  static_cast<ARMOperand*>(Operands[4])->isReg() &&
4898  static_cast<ARMOperand*>(Operands[5])->isImm()) {
4899  // Nest conditions rather than one big 'if' statement for readability.
4900  //
4901  // If both registers are low, we're in an IT block, and the immediate is
4902  // in range, we should use encoding T1 instead, which has a cc_out.
4903  if (inITBlock() &&
4904  isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4905  isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4906  static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4907  return false;
4908  // Check against T3. If the second register is the PC, this is an
4909  // alternate form of ADR, which uses encoding T4, so check for that too.
4910  if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4911  static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4912  return false;
4913 
4914  // Otherwise, we use encoding T4, which does not have a cc_out
4915  // operand.
4916  return true;
4917  }
4918 
4919  // The thumb2 multiply instruction doesn't have a CCOut register, so
4920  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4921  // use the 16-bit encoding or not.
4922  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4923  static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4924  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4925  static_cast<ARMOperand*>(Operands[4])->isReg() &&
4926  static_cast<ARMOperand*>(Operands[5])->isReg() &&
4927  // If the registers aren't low regs, the destination reg isn't the
4928  // same as one of the source regs, or the cc_out operand is zero
4929  // outside of an IT block, we have to use the 32-bit encoding, so
4930  // remove the cc_out operand.
4931  (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4932  !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4933  !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4934  !inITBlock() ||
4935  (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4936  static_cast<ARMOperand*>(Operands[5])->getReg() &&
4937  static_cast<ARMOperand*>(Operands[3])->getReg() !=
4938  static_cast<ARMOperand*>(Operands[4])->getReg())))
4939  return true;
4940 
4941  // Also check the 'mul' syntax variant that doesn't specify an explicit
4942  // destination register.
4943  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4944  static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4945  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4946  static_cast<ARMOperand*>(Operands[4])->isReg() &&
4947  // If the registers aren't low regs or the cc_out operand is zero
4948  // outside of an IT block, we have to use the 32-bit encoding, so
4949  // remove the cc_out operand.
4950  (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4951  !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4952  !inITBlock()))
4953  return true;
4954 
4955 
4956 
4957  // Register-register 'add/sub' for thumb does not have a cc_out operand
4958  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4959  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4960  // right, this will result in better diagnostics (which operand is off)
4961  // anyway.
4962  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4963  (Operands.size() == 5 || Operands.size() == 6) &&
4964  static_cast<ARMOperand*>(Operands[3])->isReg() &&
4965  static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4966  static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4967  (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4968  (Operands.size() == 6 &&
4969  static_cast<ARMOperand*>(Operands[5])->isImm())))
4970  return true;
4971 
4972  return false;
4973 }
4974 
4975 bool ARMAsmParser::shouldOmitPredicateOperand(
4976  StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
4977  // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
4978  unsigned RegIdx = 3;
4979  if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
4980  static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") {
4981  if (static_cast<ARMOperand *>(Operands[3])->isToken() &&
4982  static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32")
4983  RegIdx = 4;
4984 
4985  if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() &&
4986  (ARMMCRegisterClasses[ARM::DPRRegClassID]
4987  .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) ||
4988  ARMMCRegisterClasses[ARM::QPRRegClassID]
4989  .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg())))
4990  return true;
4991  }
4992  return false;
4993 }
4994 
4995 static bool isDataTypeToken(StringRef Tok) {
4996  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4997  Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4998  Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4999  Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5000  Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5001  Tok == ".f" || Tok == ".d";
5002 }
5003 
5004 // FIXME: This bit should probably be handled via an explicit match class
5005 // in the .td files that matches the suffix instead of having it be
5006 // a literal string token the way it is now.
5007 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5008  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5009 }
5010 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5011  unsigned VariantID);
5012 /// Parse an arm instruction mnemonic followed by its operands.
5013 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5014  SMLoc NameLoc,
5016  // Apply mnemonic aliases before doing anything else, as the destination
5017  // mnemnonic may include suffices and we want to handle them normally.
5018  // The generic tblgen'erated code does this later, at the start of
5019  // MatchInstructionImpl(), but that's too late for aliases that include
5020  // any sort of suffix.
5021  unsigned AvailableFeatures = getAvailableFeatures();
5022  unsigned AssemblerDialect = getParser().getAssemblerDialect();
5023  applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5024 
5025  // First check for the ARM-specific .req directive.
5026  if (Parser.getTok().is(AsmToken::Identifier) &&
5027  Parser.getTok().getIdentifier() == ".req") {
5028  parseDirectiveReq(Name, NameLoc);
5029  // We always return 'error' for this, as we're done with this
5030  // statement and don't need to match the 'instruction."
5031  return true;
5032  }
5033 
5034  // Create the leading tokens for the mnemonic, split by '.' characters.
5035  size_t Start = 0, Next = Name.find('.');
5036  StringRef Mnemonic = Name.slice(Start, Next);
5037 
5038  // Split out the predication code and carry setting flag from the mnemonic.
5039  unsigned PredicationCode;
5040  unsigned ProcessorIMod;
5041  bool CarrySetting;
5042  StringRef ITMask;
5043  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5044  ProcessorIMod, ITMask);
5045 
5046  // In Thumb1, only the branch (B) instruction can be predicated.
5047  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5048  Parser.eatToEndOfStatement();
5049  return Error(NameLoc, "conditional execution not supported in Thumb1");
5050  }
5051 
5052  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5053 
5054  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5055  // is the mask as it will be for the IT encoding if the conditional
5056  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5057  // where the conditional bit0 is zero, the instruction post-processing
5058  // will adjust the mask accordingly.
5059  if (Mnemonic == "it") {
5060  SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5061  if (ITMask.size() > 3) {
5062  Parser.eatToEndOfStatement();
5063  return Error(Loc, "too many conditions on IT instruction");
5064  }
5065  unsigned Mask = 8;
5066  for (unsigned i = ITMask.size(); i != 0; --i) {
5067  char pos = ITMask[i - 1];
5068  if (pos != 't' && pos != 'e') {
5069  Parser.eatToEndOfStatement();
5070  return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5071  }
5072  Mask >>= 1;
5073  if (ITMask[i - 1] == 't')
5074  Mask |= 8;
5075  }
5076  Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5077  }
5078 
5079  // FIXME: This is all a pretty gross hack. We should automatically handle
5080  // optional operands like this via tblgen.
5081 
5082  // Next, add the CCOut and ConditionCode operands, if needed.
5083  //
5084  // For mnemonics which can ever incorporate a carry setting bit or predication
5085  // code, our matching model involves us always generating CCOut and
5086  // ConditionCode operands to match the mnemonic "as written" and then we let
5087  // the matcher deal with finding the right instruction or generating an
5088  // appropriate error.
5089  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5090  getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5091 
5092  // If we had a carry-set on an instruction that can't do that, issue an
5093  // error.
5094  if (!CanAcceptCarrySet && CarrySetting) {
5095  Parser.eatToEndOfStatement();
5096  return Error(NameLoc, "instruction '" + Mnemonic +
5097  "' can not set flags, but 's' suffix specified");
5098  }
5099  // If we had a predication code on an instruction that can't do that, issue an
5100  // error.
5101  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5102  Parser.eatToEndOfStatement();
5103  return Error(NameLoc, "instruction '" + Mnemonic +
5104  "' is not predicable, but condition code specified");
5105  }
5106 
5107  // Add the carry setting operand, if necessary.
5108  if (CanAcceptCarrySet) {
5109  SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5110  Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5111  Loc));
5112  }
5113 
5114  // Add the predication code operand, if necessary.
5115  if (CanAcceptPredicationCode) {
5116  SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5117  CarrySetting);
5118  Operands.push_back(ARMOperand::CreateCondCode(
5119  ARMCC::CondCodes(PredicationCode), Loc));
5120  }
5121 
5122  // Add the processor imod operand, if necessary.
5123  if (ProcessorIMod) {
5124  Operands.push_back(ARMOperand::CreateImm(
5125  MCConstantExpr::Create(ProcessorIMod, getContext()),
5126  NameLoc, NameLoc));
5127  }
5128 
5129  // Add the remaining tokens in the mnemonic.
5130  while (Next != StringRef::npos) {
5131  Start = Next;
5132  Next = Name.find('.', Start + 1);
5133  StringRef ExtraToken = Name.slice(Start, Next);
5134 
5135  // Some NEON instructions have an optional datatype suffix that is
5136  // completely ignored. Check for that.
5137  if (isDataTypeToken(ExtraToken) &&
5138  doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5139  continue;
5140 
5141  // For for ARM mode generate an error if the .n qualifier is used.
5142  if (ExtraToken == ".n" && !isThumb()) {
5143  SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5144  return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5145  "arm mode");
5146  }
5147 
5148  // The .n qualifier is always discarded as that is what the tables
5149  // and matcher expect. In ARM mode the .w qualifier has no effect,
5150  // so discard it to avoid errors that can be caused by the matcher.
5151  if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5152  SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5153  Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5154  }
5155  }
5156 
5157  // Read the remaining operands.
5158  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5159  // Read the first operand.
5160  if (parseOperand(Operands, Mnemonic)) {
5161  Parser.eatToEndOfStatement();
5162  return true;
5163  }
5164 
5165  while (getLexer().is(AsmToken::Comma)) {
5166  Parser.Lex(); // Eat the comma.
5167 
5168  // Parse and remember the operand.
5169  if (parseOperand(Operands, Mnemonic)) {
5170  Parser.eatToEndOfStatement();
5171  return true;
5172  }
5173  }
5174  }
5175 
5176  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5177  SMLoc Loc = getLexer().getLoc();
5178  Parser.eatToEndOfStatement();
5179  return Error(Loc, "unexpected token in argument list");
5180  }
5181 
5182  Parser.Lex(); // Consume the EndOfStatement
5183 
5184  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5185  // do and don't have a cc_out optional-def operand. With some spot-checks
5186  // of the operand list, we can figure out which variant we're trying to
5187  // parse and adjust accordingly before actually matching. We shouldn't ever
5188  // try to remove a cc_out operand that was explicitly set on the the
5189  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5190  // table driven matcher doesn't fit well with the ARM instruction set.
5191  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5192  ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5193  Operands.erase(Operands.begin() + 1);
5194  delete Op;
5195  }
5196 
5197  // Some instructions have the same mnemonic, but don't always
5198  // have a predicate. Distinguish them here and delete the
5199  // predicate if needed.
5200  if (shouldOmitPredicateOperand(Mnemonic, Operands)) {
5201  ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5202  Operands.erase(Operands.begin() + 1);
5203  delete Op;
5204  }
5205 
5206  // ARM mode 'blx' need special handling, as the register operand version
5207  // is predicable, but the label operand version is not. So, we can't rely
5208  // on the Mnemonic based checking to correctly figure out when to put
5209  // a k_CondCode operand in the list. If we're trying to match the label
5210  // version, remove the k_CondCode operand here.
5211  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5212  static_cast<ARMOperand*>(Operands[2])->isImm()) {
5213  ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5214  Operands.erase(Operands.begin() + 1);
5215  delete Op;
5216  }
5217 
5218  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5219  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5220  // a single GPRPair reg operand is used in the .td file to replace the two
5221  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5222  // expressed as a GPRPair, so we have to manually merge them.
5223  // FIXME: We would really like to be able to tablegen'erate this.
5224  if (!isThumb() && Operands.size() > 4 &&
5225  (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5226  Mnemonic == "stlexd")) {
5227  bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5228  unsigned Idx = isLoad ? 2 : 3;
5229  ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5230  ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5231 
5232  const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5233  // Adjust only if Op1 and Op2 are GPRs.
5234  if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5235  MRC.contains(Op2->getReg())) {
5236  unsigned Reg1 = Op1->getReg();
5237  unsigned Reg2 = Op2->getReg();
5238  unsigned Rt = MRI->getEncodingValue(Reg1);
5239  unsigned Rt2 = MRI->getEncodingValue(Reg2);
5240 
5241  // Rt2 must be Rt + 1 and Rt must be even.
5242  if (Rt + 1 != Rt2 || (Rt & 1)) {
5243  Error(Op2->getStartLoc(), isLoad ?
5244  "destination operands must be sequential" :
5245  "source operands must be sequential");
5246  return true;
5247  }
5248  unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5249  &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5250  Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5251  Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5252  NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5253  delete Op1;
5254  delete Op2;
5255  }
5256  }
5257 
5258  // FIXME: As said above, this is all a pretty gross hack. This instruction
5259  // does not fit with other "subs" and tblgen.
5260  // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5261  // so the Mnemonic is the original name "subs" and delete the predicate
5262  // operand so it will match the table entry.
5263  if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5264  static_cast<ARMOperand*>(Operands[3])->isReg() &&
5265  static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC &&
5266  static_cast<ARMOperand*>(Operands[4])->isReg() &&
5267  static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR &&
5268  static_cast<ARMOperand*>(Operands[5])->isImm()) {
5269  ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]);
5270  Operands.erase(Operands.begin());
5271  delete Op0;
5272  Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc));
5273 
5274  ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]);
5275  Operands.erase(Operands.begin() + 1);
5276  delete Op1;
5277  }
5278  return false;
5279 }
5280 
5281 // Validate context-sensitive operand constraints.
5282 
5283 // return 'true' if register list contains non-low GPR registers,
5284 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5285 // 'containsReg' to true.
5286 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5287  unsigned HiReg, bool &containsReg) {
5288  containsReg = false;
5289  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5290  unsigned OpReg = Inst.getOperand(i).getReg();
5291  if (OpReg == Reg)
5292  containsReg = true;
5293  // Anything other than a low register isn't legal here.
5294  if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5295  return true;
5296  }
5297  return false;
5298 }
5299 
5300 // Check if the specified regisgter is in the register list of the inst,
5301 // starting at the indicated operand number.
5302 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5303  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5304  unsigned OpReg = Inst.getOperand(i).getReg();
5305  if (OpReg == Reg)
5306  return true;
5307  }
5308  return false;
5309 }
5310 
5311 // Return true if instruction has the interesting property of being
5312 // allowed in IT blocks, but not being predicable.
5313 static bool instIsBreakpoint(const MCInst &Inst) {
5314  return Inst.getOpcode() == ARM::tBKPT ||
5315  Inst.getOpcode() == ARM::BKPT ||
5316  Inst.getOpcode() == ARM::tHLT ||
5317  Inst.getOpcode() == ARM::HLT;
5318 
5319 }
5320 
5321 // FIXME: We would really like to be able to tablegen'erate this.
5322 bool ARMAsmParser::
5323 validateInstruction(MCInst &Inst,
5324  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5325  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5326  SMLoc Loc = Operands[0]->getStartLoc();
5327 
5328  // Check the IT block state first.
5329  // NOTE: BKPT and HLT instructions have the interesting property of being
5330  // allowed in IT blocks, but not being predicable. They just always execute.
5331  if (inITBlock() && !instIsBreakpoint(Inst)) {
5332  unsigned Bit = 1;
5333  if (ITState.FirstCond)
5334  ITState.FirstCond = false;
5335  else
5336  Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5337  // The instruction must be predicable.
5338  if (!MCID.isPredicable())
5339  return Error(Loc, "instructions in IT block must be predicable");
5340  unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5341  unsigned ITCond = Bit ? ITState.Cond :
5342  ARMCC::getOppositeCondition(ITState.Cond);
5343  if (Cond != ITCond) {
5344  // Find the condition code Operand to get its SMLoc information.
5345  SMLoc CondLoc;
5346  for (unsigned I = 1; I < Operands.size(); ++I)
5347  if (static_cast<ARMOperand*>(Operands[I])->isCondCode())
5348  CondLoc = Operands[I]->getStartLoc();
5349  return Error(CondLoc, "incorrect condition in IT block; got '" +
5351  "', but expected '" +
5352  ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5353  }
5354  // Check for non-'al' condition codes outside of the IT block.
5355  } else if (isThumbTwo() && MCID.isPredicable() &&
5356  Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5357  ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
5358  Inst.getOpcode() != ARM::t2Bcc)
5359  return Error(Loc, "predicated instructions must be in IT block");
5360 
5361  const unsigned Opcode = Inst.getOpcode();
5362  switch (Opcode) {
5363  case ARM::LDRD:
5364  case ARM::LDRD_PRE:
5365  case ARM::LDRD_POST: {
5366  const unsigned RtReg = Inst.getOperand(0).getReg();
5367 
5368  // Rt can't be R14.
5369  if (RtReg == ARM::LR)
5370  return Error(Operands[3]->getStartLoc(),
5371  "Rt can't be R14");
5372 
5373  const unsigned Rt = MRI->getEncodingValue(RtReg);
5374  // Rt must be even-numbered.
5375  if ((Rt & 1) == 1)
5376  return Error(Operands[3]->getStartLoc(),
5377  "Rt must be even-numbered");
5378 
5379  // Rt2 must be Rt + 1.
5380  const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5381  if (Rt2 != Rt + 1)
5382  return Error(Operands[3]->getStartLoc(),
5383  "destination operands must be sequential");
5384 
5385  if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
5386  const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
5387  // For addressing modes with writeback, the base register needs to be
5388  // different from the destination registers.
5389  if (Rn == Rt || Rn == Rt2)
5390  return Error(Operands[3]->getStartLoc(),
5391  "base register needs to be different from destination "
5392  "registers");
5393  }
5394 
5395  return false;
5396  }
5397  case ARM::t2LDRDi8:
5398  case ARM::t2LDRD_PRE:
5399  case ARM::t2LDRD_POST: {
5400  // Rt2 must be different from Rt.
5401  unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5402  unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5403  if (Rt2 == Rt)
5404  return Error(Operands[3]->getStartLoc(),
5405  "destination operands can't be identical");
5406  return false;
5407  }
5408  case ARM::STRD: {
5409  // Rt2 must be Rt + 1.
5410  unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5411  unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5412  if (Rt2 != Rt + 1)
5413  return Error(Operands[3]->getStartLoc(),
5414  "source operands must be sequential");
5415  return false;
5416  }
5417  case ARM::STRD_PRE:
5418  case ARM::STRD_POST: {
5419  // Rt2 must be Rt + 1.
5420  unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5421  unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5422  if (Rt2 != Rt + 1)
5423  return Error(Operands[3]->getStartLoc(),
5424  "source operands must be sequential");
5425  return false;
5426  }
5427  case ARM::SBFX:
5428  case ARM::UBFX: {
5429  // Width must be in range [1, 32-lsb].
5430  unsigned LSB = Inst.getOperand(2).getImm();
5431  unsigned Widthm1 = Inst.getOperand(3).getImm();
5432  if (Widthm1 >= 32 - LSB)
5433  return Error(Operands[5]->getStartLoc(),
5434  "bitfield width must be in range [1,32-lsb]");
5435  return false;
5436  }
5437  // Notionally handles ARM::tLDMIA_UPD too.
5438  case ARM::tLDMIA: {
5439  // If we're parsing Thumb2, the .w variant is available and handles
5440  // most cases that are normally illegal for a Thumb1 LDM instruction.
5441  // We'll make the transformation in processInstruction() if necessary.
5442  //
5443  // Thumb LDM instructions are writeback iff the base register is not
5444  // in the register list.
5445  unsigned Rn = Inst.getOperand(0).getReg();
5446  bool HasWritebackToken =
5447  (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5448  static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5449  bool ListContainsBase;
5450  if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
5451  return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
5452  "registers must be in range r0-r7");
5453  // If we should have writeback, then there should be a '!' token.
5454  if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
5455  return Error(Operands[2]->getStartLoc(),
5456  "writeback operator '!' expected");
5457  // If we should not have writeback, there must not be a '!'. This is
5458  // true even for the 32-bit wide encodings.
5459  if (ListContainsBase && HasWritebackToken)
5460  return Error(Operands[3]->getStartLoc(),
5461  "writeback operator '!' not allowed when base register "
5462  "in register list");
5463 
5464  break;
5465  }
5466  case ARM::LDMIA_UPD:
5467  case ARM::LDMDB_UPD:
5468  case ARM::LDMIB_UPD:
5469  case ARM::LDMDA_UPD:
5470  // ARM variants loading and updating the same register are only officially
5471  // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
5472  if (!hasV7Ops())
5473  break;
5474  // Fallthrough
5475  case ARM::t2LDMIA_UPD:
5476  case ARM::t2LDMDB_UPD:
5477  case ARM::t2STMIA_UPD:
5478  case ARM::t2STMDB_UPD: {
5479  if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5480  return Error(Operands.back()->getStartLoc(),
5481  "writeback register not allowed in register list");
5482  break;
5483  }
5484  case ARM::sysLDMIA_UPD:
5485  case ARM::sysLDMDA_UPD:
5486  case ARM::sysLDMDB_UPD:
5487  case ARM::sysLDMIB_UPD:
5488  if (!listContainsReg(Inst, 3, ARM::PC))
5489  return Error(Operands[4]->getStartLoc(),
5490  "writeback register only allowed on system LDM "
5491  "if PC in register-list");
5492  break;
5493  case ARM::sysSTMIA_UPD:
5494  case ARM::sysSTMDA_UPD:
5495  case ARM::sysSTMDB_UPD:
5496  case ARM::sysSTMIB_UPD:
5497  return Error(Operands[2]->getStartLoc(),
5498  "system STM cannot have writeback register");
5499  break;
5500  case ARM::tMUL: {
5501  // The second source operand must be the same register as the destination
5502  // operand.
5503  //
5504  // In this case, we must directly check the parsed operands because the
5505  // cvtThumbMultiply() function is written in such a way that it guarantees
5506  // this first statement is always true for the new Inst. Essentially, the
5507  // destination is unconditionally copied into the second source operand
5508  // without checking to see if it matches what we actually parsed.
5509  if (Operands.size() == 6 &&
5510  (((ARMOperand*)Operands[3])->getReg() !=
5511  ((ARMOperand*)Operands[5])->getReg()) &&
5512  (((ARMOperand*)Operands[3])->getReg() !=
5513  ((ARMOperand*)Operands[4])->getReg())) {
5514  return Error(Operands[3]->getStartLoc(),
5515  "destination register must match source register");
5516  }
5517  break;
5518  }
5519  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5520  // so only issue a diagnostic for thumb1. The instructions will be
5521  // switched to the t2 encodings in processInstruction() if necessary.
5522  case ARM::tPOP: {
5523  bool ListContainsBase;
5524  if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
5525  !isThumbTwo())
5526  return Error(Operands[2]->getStartLoc(),
5527  "registers must be in range r0-r7 or pc");
5528  break;
5529  }
5530  case ARM::tPUSH: {
5531  bool ListContainsBase;
5532  if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
5533  !isThumbTwo())
5534  return Error(Operands[2]->getStartLoc(),
5535  "registers must be in range r0-r7 or lr");
5536  break;
5537  }
5538  case ARM::tSTMIA_UPD: {
5539  bool ListContainsBase, InvalidLowList;
5540  InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
5541  0, ListContainsBase);
5542  if (InvalidLowList && !isThumbTwo())
5543  return Error(Operands[4]->getStartLoc(),
5544  "registers must be in range r0-r7");
5545 
5546  // This would be converted to a 32-bit stm, but that's not valid if the
5547  // writeback register is in the list.
5548  if (InvalidLowList && ListContainsBase)
5549  return Error(Operands[4]->getStartLoc(),
5550  "writeback operator '!' not allowed when base register "
5551  "in register list");
5552  break;
5553  }
5554  case ARM::tADDrSP: {
5555  // If the non-SP source operand and the destination operand are not the
5556  // same, we need thumb2 (for the wide encoding), or we have an error.
5557  if (!isThumbTwo() &&
5558  Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5559  return Error(Operands[4]->getStartLoc(),
5560  "source register must be the same as destination");
5561  }
5562  break;
5563  }
5564  // Final range checking for Thumb unconditional branch instructions.
5565  case ARM::tB:
5566  if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>())
5567  return Error(Operands[2]->getStartLoc(), "branch target out of range");
5568  break;
5569  case ARM::t2B: {
5570  int op = (Operands[2]->isImm()) ? 2 : 3;
5571  if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>())
5572  return Error(Operands[op]->getStartLoc(), "branch target out of range");
5573  break;
5574  }
5575  // Final range checking for Thumb conditional branch instructions.
5576  case ARM::tBcc:
5577  if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>())
5578  return Error(Operands[2]->getStartLoc(), "branch target out of range");
5579  break;
5580  case ARM::t2Bcc: {
5581  int Op = (Operands[2]->isImm()) ? 2 : 3;
5582  if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>())
5583  return Error(Operands[Op]->getStartLoc(), "branch target out of range");
5584  break;
5585  }
5586  }
5587 
5588  return false;
5589 }
5590 
5591 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5592  switch(Opc) {
5593  default: llvm_unreachable("unexpected opcode!");
5594  // VST1LN
5595  case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
5596  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5597  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5598  case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
5599  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5600  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5601  case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
5602  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5603  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5604 
5605  // VST2LN
5606  case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
5607  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5608  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5609  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5610  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5611 
5612  case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
5613  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5614  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5615  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5616  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5617 
5618  case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
5619  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5620  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5621  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5622  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5623 
5624  // VST3LN
5625  case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
5626  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5627  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5628  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5629  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5630  case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
5631  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5632  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5633  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5634  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5635  case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
5636  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5637  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5638  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5639  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5640 
5641  // VST3
5642  case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
5643  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5644  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5645  case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
5646  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5647  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5648  case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
5649  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5650  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5651  case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
5652  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5653  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5654  case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
5655  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5656  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5657  case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
5658  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5659  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5660 
5661  // VST4LN
5662  case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
5663  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5664  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5665  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5666  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5667  case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
5668  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5669  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5670  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5671  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5672  case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
5673  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5674  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5675  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5676  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5677 
5678  // VST4
5679  case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
5680  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5681  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5682  case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
5683  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5684  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5685  case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
5686  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5687  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5688  case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
5689  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5690  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5691  case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
5692  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5693  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5694  case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
5695  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5696  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5697  }
5698 }
5699 
5700 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5701  switch(Opc) {
5702  default: llvm_unreachable("unexpected opcode!");
5703  // VLD1LN
5704  case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
5705  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5706  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5707  case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
5708  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5709  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5710  case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
5711  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5712  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5713 
5714  // VLD2LN
5715  case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
5716  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5717  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5718  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5719  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5720  case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
5721  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5722  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5723  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5724  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5725  case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
5726  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5727  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5728  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5729  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5730 
5731  // VLD3DUP
5732  case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
5733  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5734  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5735  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5736  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5737  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5738  case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
5739  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5740  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5741  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5742  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5743  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5744  case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
5745  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5746  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5747  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5748  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5749  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5750 
5751  // VLD3LN
5752  case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
5753  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5754  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5755  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5756  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5757  case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
5758  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5759  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5760  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5761  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5762  case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
5763  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5764  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5765  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5766  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5767 
5768  // VLD3
5769  case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
5770  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5771  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5772  case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
5773  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5774  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5775  case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
5776  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5777  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5778  case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
5779  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5780  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5781  case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
5782  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5783  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5784  case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
5785  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5786  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5787 
5788  // VLD4LN
5789  case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
5790  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5791  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5792  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5793  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5794  case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
5795  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5796  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5797  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5798  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5799  case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
5800  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5801  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5802  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5803  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5804 
5805  // VLD4DUP
5806  case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
5807  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5808  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5809  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5810  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5811  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5812  case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
5813  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5814  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5815  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5816  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5817  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5818  case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
5819  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5820  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5821  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5822  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5823  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5824 
5825  // VLD4
5826  case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
5827  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5828  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5829  case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
5830  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5831  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5832  case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
5833  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5834  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5835  case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
5836  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5837  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5838  case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
5839  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5840  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5841  case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
5842  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5843  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5844  }
5845 }
5846 
5847 bool ARMAsmParser::
5848 processInstruction(MCInst &Inst,
5849  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5850  switch (Inst.getOpcode()) {
5851  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5852  case ARM::ADDri: {
5853  if (Inst.getOperand(1).getReg() != ARM::PC ||
5854  Inst.getOperand(5).getReg() != 0)
5855  return false;
5856  MCInst TmpInst;
5857  TmpInst.setOpcode(ARM::ADR);
5858  TmpInst.addOperand(Inst.getOperand(0));
5859  TmpInst.addOperand(Inst.getOperand(2));
5860  TmpInst.addOperand(Inst.getOperand(3));
5861  TmpInst.addOperand(Inst.getOperand(4));
5862  Inst = TmpInst;
5863  return true;
5864  }
5865  // Aliases for alternate PC+imm syntax of LDR instructions.
5866  case ARM::t2LDRpcrel:
5867  // Select the narrow version if the immediate will fit.
5868  if (Inst.getOperand(1).getImm() > 0 &&
5869  Inst.getOperand(1).getImm() <= 0xff &&
5870  !(static_cast<ARMOperand*>(Operands[2])->isToken() &&
5871  static_cast<ARMOperand*>(Operands[2])->getToken() == ".w"))
5872  Inst.setOpcode(ARM::tLDRpci);
5873  else
5874  Inst.setOpcode(ARM::t2LDRpci);
5875  return true;
5876  case ARM::t2LDRBpcrel:
5877  Inst.setOpcode(ARM::t2LDRBpci);
5878  return true;
5879  case ARM::t2LDRHpcrel:
5880  Inst.setOpcode(ARM::t2LDRHpci);
5881  return true;
5882  case ARM::t2LDRSBpcrel:
5883  Inst.setOpcode(ARM::t2LDRSBpci);
5884  return true;
5885  case ARM::t2LDRSHpcrel:
5886  Inst.setOpcode(ARM::t2LDRSHpci);
5887  return true;
5888  // Handle NEON VST complex aliases.
5889  case ARM::VST1LNdWB_register_Asm_8:
5890  case ARM::VST1LNdWB_register_Asm_16:
5891  case ARM::VST1LNdWB_register_Asm_32: {
5892  MCInst TmpInst;
5893  // Shuffle the operands around so the lane index operand is in the
5894  // right place.
5895  unsigned Spacing;
5896  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5897  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5898  TmpInst.addOperand(Inst.getOperand(2)); // Rn
5899  TmpInst.addOperand(Inst.getOperand(3)); // alignment
5900  TmpInst.addOperand(Inst.getOperand(4)); // Rm
5901  TmpInst.addOperand(Inst.getOperand(0)); // Vd
5902  TmpInst.addOperand(Inst.getOperand(1)); // lane
5903  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5904  TmpInst.addOperand(Inst.getOperand(6));
5905  Inst = TmpInst;
5906  return true;
5907  }
5908 
5909  case ARM::VST2LNdWB_register_Asm_8:
5910  case ARM::VST2LNdWB_register_Asm_16:
5911  case ARM::VST2LNdWB_register_Asm_32:
5912  case ARM::VST2LNqWB_register_Asm_16:
5913  case ARM::VST2LNqWB_register_Asm_32: {
5914  MCInst TmpInst;
5915  // Shuffle the operands around so the lane index operand is in the
5916  // right place.
5917  unsigned Spacing;
5918  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5919  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5920  TmpInst.addOperand(Inst.getOperand(2)); // Rn
5921  TmpInst.addOperand(Inst.getOperand(3)); // alignment
5922  TmpInst.addOperand(Inst.getOperand(4)); // Rm
5923  TmpInst.addOperand(Inst.getOperand(0)); // Vd
5924  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5925  Spacing));
5926  TmpInst.addOperand(Inst.getOperand(1)); // lane
5927  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5928  TmpInst.addOperand(Inst.getOperand(6));
5929  Inst = TmpInst;
5930  return true;
5931  }
5932 
5933  case ARM::VST3LNdWB_register_Asm_8:
5934  case ARM::VST3LNdWB_register_Asm_16:
5935  case ARM::VST3LNdWB_register_Asm_32:
5936  case ARM::VST3LNqWB_register_Asm_16:
5937  case ARM::VST3LNqWB_register_Asm_32: {
5938  MCInst TmpInst;
5939  // Shuffle the operands around so the lane index operand is in the
5940  // right place.
5941  unsigned Spacing;
5942  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5943  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5944  TmpInst.addOperand(Inst.getOperand(2)); // Rn
5945  TmpInst.addOperand(Inst.getOperand(3)); // alignment
5946  TmpInst.addOperand(Inst.getOperand(4)); // Rm
5947  TmpInst.addOperand(Inst.getOperand(0)); // Vd
5948  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5949  Spacing));
5950  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5951  Spacing * 2));
5952  TmpInst.addOperand(Inst.getOperand(1)); // lane
5953  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5954  TmpInst.addOperand(Inst.getOperand(6));
5955  Inst = TmpInst;
5956  return true;
5957  }
5958 
5959  case ARM::VST4LNdWB_register_Asm_8:
5960  case ARM::VST4LNdWB_register_Asm_16:
5961  case ARM::VST4LNdWB_register_Asm_32:
5962  case ARM::VST4LNqWB_register_Asm_16:
5963  case ARM::VST4LNqWB_register_Asm_32: {
5964  MCInst TmpInst;
5965  // Shuffle the operands around so the lane index operand is in the
5966  // right place.
5967  unsigned Spacing;
5968  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5969  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5970  TmpInst.addOperand(Inst.getOperand(2)); // Rn
5971  TmpInst.addOperand(Inst.getOperand(3)); // alignment
5972  TmpInst.addOperand(Inst.getOperand(4)); // Rm
5973  TmpInst.addOperand(Inst.getOperand(0)); // Vd
5974  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5975  Spacing));
5976  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5977  Spacing * 2));
5978  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5979  Spacing * 3));
5980  TmpInst.addOperand(Inst.getOperand(1)); // lane
5981  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5982  TmpInst.addOperand(Inst.getOperand(6));
5983  Inst = TmpInst;
5984  return true;
5985  }
5986 
5987  case ARM::VST1LNdWB_fixed_Asm_8:
5988  case ARM::VST1LNdWB_fixed_Asm_16:
5989  case ARM::VST1LNdWB_fixed_Asm_32: {
5990  MCInst TmpInst;
5991  // Shuffle the operands around so the lane index operand is in the
5992  // right place.
5993  unsigned Spacing;
5994  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5995  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5996  TmpInst.addOperand(Inst.getOperand(2)); // Rn
5997  TmpInst.addOperand(Inst.getOperand(3)); // alignment
5998  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5999  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6000  TmpInst.addOperand(Inst.getOperand(1)); // lane
6001  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6002  TmpInst.addOperand(Inst.getOperand(5));
6003  Inst = TmpInst;
6004  return true;
6005  }
6006 
6007  case ARM::VST2LNdWB_fixed_Asm_8:
6008  case ARM::VST2LNdWB_fixed_Asm_16:
6009  case ARM::VST2LNdWB_fixed_Asm_32:
6010  case ARM::VST2LNqWB_fixed_Asm_16:
6011  case ARM::VST2LNqWB_fixed_Asm_32: {
6012  MCInst TmpInst;
6013  // Shuffle the operands around so the lane index operand is in the
6014  // right place.
6015  unsigned Spacing;
6016  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6017  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6018  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6019  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6020  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6021  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6022  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6023  Spacing));
6024  TmpInst.addOperand(Inst.getOperand(1)); // lane
6025  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6026  TmpInst.addOperand(Inst.getOperand(5));
6027  Inst = TmpInst;
6028  return true;
6029  }
6030 
6031  case ARM::VST3LNdWB_fixed_Asm_8:
6032  case ARM::VST3LNdWB_fixed_Asm_16:
6033  case ARM::VST3LNdWB_fixed_Asm_32:
6034  case ARM::VST3LNqWB_fixed_Asm_16:
6035  case ARM::VST3LNqWB_fixed_Asm_32: {
6036  MCInst TmpInst;
6037  // Shuffle the operands around so the lane index operand is in the
6038  // right place.
6039  unsigned Spacing;
6040  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6041  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6042  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6043  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6044  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6045  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6046  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6047  Spacing));
6048  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049  Spacing * 2));
6050  TmpInst.addOperand(Inst.getOperand(1)); // lane
6051  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6052  TmpInst.addOperand(Inst.getOperand(5));
6053  Inst = TmpInst;
6054  return true;
6055  }
6056 
6057  case ARM::VST4LNdWB_fixed_Asm_8:
6058  case ARM::VST4LNdWB_fixed_Asm_16:
6059  case ARM::VST4LNdWB_fixed_Asm_32:
6060  case ARM::VST4LNqWB_fixed_Asm_16:
6061  case ARM::VST4LNqWB_fixed_Asm_32: {
6062  MCInst TmpInst;
6063  // Shuffle the operands around so the lane index operand is in the
6064  // right place.
6065  unsigned Spacing;
6066  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6067  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6068  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6069  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6070  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6071  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6072  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6073  Spacing));
6074  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6075  Spacing * 2));
6076  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6077  Spacing * 3));
6078  TmpInst.addOperand(Inst.getOperand(1)); // lane
6079  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6080  TmpInst.addOperand(Inst.getOperand(5));
6081  Inst = TmpInst;
6082  return true;
6083  }
6084 
6085  case ARM::VST1LNdAsm_8:
6086  case ARM::VST1LNdAsm_16:
6087  case ARM::VST1LNdAsm_32: {
6088  MCInst TmpInst;
6089  // Shuffle the operands around so the lane index operand is in the
6090  // right place.
6091  unsigned Spacing;
6092  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6093  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6094  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6095  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6096  TmpInst.addOperand(Inst.getOperand(1)); // lane
6097  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6098  TmpInst.addOperand(Inst.getOperand(5));
6099  Inst = TmpInst;
6100  return true;
6101  }
6102 
6103  case ARM::VST2LNdAsm_8:
6104  case ARM::VST2LNdAsm_16:
6105  case ARM::VST2LNdAsm_32:
6106  case ARM::VST2LNqAsm_16:
6107  case ARM::VST2LNqAsm_32: {
6108  MCInst TmpInst;
6109  // Shuffle the operands around so the lane index operand is in the
6110  // right place.
6111  unsigned Spacing;
6112  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6113  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6114  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6115  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6116  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6117  Spacing));
6118  TmpInst.addOperand(Inst.getOperand(1)); // lane
6119  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6120  TmpInst.addOperand(Inst.getOperand(5));
6121  Inst = TmpInst;
6122  return true;
6123  }
6124 
6125  case ARM::VST3LNdAsm_8:
6126  case ARM::VST3LNdAsm_16:
6127  case ARM::VST3LNdAsm_32:
6128  case ARM::VST3LNqAsm_16:
6129  case ARM::VST3LNqAsm_32: {
6130  MCInst TmpInst;
6131  // Shuffle the operands around so the lane index operand is in the
6132  // right place.
6133  unsigned Spacing;
6134  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6135  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6136  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6137  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6138  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6139  Spacing));
6140  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6141  Spacing * 2));
6142  TmpInst.addOperand(Inst.getOperand(1)); // lane
6143  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6144  TmpInst.addOperand(Inst.getOperand(5));
6145  Inst = TmpInst;
6146  return true;
6147  }
6148 
6149  case ARM::VST4LNdAsm_8:
6150  case ARM::VST4LNdAsm_16:
6151  case ARM::VST4LNdAsm_32:
6152  case ARM::VST4LNqAsm_16:
6153  case ARM::VST4LNqAsm_32: {
6154  MCInst TmpInst;
6155  // Shuffle the operands around so the lane index operand is in the
6156  // right place.
6157  unsigned Spacing;
6158  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6159  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6160  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6161  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6162  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6163  Spacing));
6164  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6165  Spacing * 2));
6166  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6167  Spacing * 3));
6168  TmpInst.addOperand(Inst.getOperand(1)); // lane
6169  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6170  TmpInst.addOperand(Inst.getOperand(5));
6171  Inst = TmpInst;
6172  return true;
6173  }
6174 
6175  // Handle NEON VLD complex aliases.
6176  case ARM::VLD1LNdWB_register_Asm_8:
6177  case ARM::VLD1LNdWB_register_Asm_16:
6178  case ARM::VLD1LNdWB_register_Asm_32: {
6179  MCInst TmpInst;
6180  // Shuffle the operands around so the lane index operand is in the
6181  // right place.
6182  unsigned Spacing;
6183  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6184  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6185  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6186  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6187  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6188  TmpInst.addOperand(Inst.getOperand(4)); // Rm
6189  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6190  TmpInst.addOperand(Inst.getOperand(1)); // lane
6191  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6192  TmpInst.addOperand(Inst.getOperand(6));
6193  Inst = TmpInst;
6194  return true;
6195  }
6196 
6197  case ARM::VLD2LNdWB_register_Asm_8:
6198  case ARM::VLD2LNdWB_register_Asm_16:
6199  case ARM::VLD2LNdWB_register_Asm_32:
6200  case ARM::VLD2LNqWB_register_Asm_16:
6201  case ARM::VLD2LNqWB_register_Asm_32: {
6202  MCInst TmpInst;
6203  // Shuffle the operands around so the lane index operand is in the
6204  // right place.
6205  unsigned Spacing;
6206  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6207  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6208  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6209  Spacing));
6210  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6211  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6212  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6213  TmpInst.addOperand(Inst.getOperand(4)); // Rm
6214  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6215  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6216  Spacing));
6217  TmpInst.addOperand(Inst.getOperand(1)); // lane
6218  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6219  TmpInst.addOperand(Inst.getOperand(6));
6220  Inst = TmpInst;
6221  return true;
6222  }
6223 
6224  case ARM::VLD3LNdWB_register_Asm_8:
6225  case ARM::VLD3LNdWB_register_Asm_16:
6226  case ARM::VLD3LNdWB_register_Asm_32:
6227  case ARM::VLD3LNqWB_register_Asm_16:
6228  case ARM::VLD3LNqWB_register_Asm_32: {
6229  MCInst TmpInst;
6230  // Shuffle the operands around so the lane index operand is in the
6231  // right place.
6232  unsigned Spacing;
6233  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6234  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6235  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6236  Spacing));
6237  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6238  Spacing * 2));
6239  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6240  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6241  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6242  TmpInst.addOperand(Inst.getOperand(4)); // Rm
6243  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6244  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6245  Spacing));
6246  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6247  Spacing * 2));
6248  TmpInst.addOperand(Inst.getOperand(1)); // lane
6249  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6250  TmpInst.addOperand(Inst.getOperand(6));
6251  Inst = TmpInst;
6252  return true;
6253  }
6254 
6255  case ARM::VLD4LNdWB_register_Asm_8:
6256  case ARM::VLD4LNdWB_register_Asm_16:
6257  case ARM::VLD4LNdWB_register_Asm_32:
6258  case ARM::VLD4LNqWB_register_Asm_16:
6259  case ARM::VLD4LNqWB_register_Asm_32: {
6260  MCInst TmpInst;
6261  // Shuffle the operands around so the lane index operand is in the
6262  // right place.
6263  unsigned Spacing;
6264  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6265  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6266  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6267  Spacing));
6268  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6269  Spacing * 2));
6270  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6271  Spacing * 3));
6272  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6273  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6274  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6275  TmpInst.addOperand(Inst.getOperand(4)); // Rm
6276  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6277  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6278  Spacing));
6279  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6280  Spacing * 2));
6281  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6282  Spacing * 3));
6283  TmpInst.addOperand(Inst.getOperand(1)); // lane
6284  TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6285  TmpInst.addOperand(Inst.getOperand(6));
6286  Inst = TmpInst;
6287  return true;
6288  }
6289 
6290  case ARM::VLD1LNdWB_fixed_Asm_8:
6291  case ARM::VLD1LNdWB_fixed_Asm_16:
6292  case ARM::VLD1LNdWB_fixed_Asm_32: {
6293  MCInst TmpInst;
6294  // Shuffle the operands around so the lane index operand is in the
6295  // right place.
6296  unsigned Spacing;
6297  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6298  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6299  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6300  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6301  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6302  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6303  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6304  TmpInst.addOperand(Inst.getOperand(1)); // lane
6305  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6306  TmpInst.addOperand(Inst.getOperand(5));
6307  Inst = TmpInst;
6308  return true;
6309  }
6310 
6311  case ARM::VLD2LNdWB_fixed_Asm_8:
6312  case ARM::VLD2LNdWB_fixed_Asm_16:
6313  case ARM::VLD2LNdWB_fixed_Asm_32:
6314  case ARM::VLD2LNqWB_fixed_Asm_16:
6315  case ARM::VLD2LNqWB_fixed_Asm_32: {
6316  MCInst TmpInst;
6317  // Shuffle the operands around so the lane index operand is in the
6318  // right place.
6319  unsigned Spacing;
6320  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6321  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6322  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6323  Spacing));
6324  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6325  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6326  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6327  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6328  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6329  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6330  Spacing));
6331  TmpInst.addOperand(Inst.getOperand(1)); // lane
6332  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6333  TmpInst.addOperand(Inst.getOperand(5));
6334  Inst = TmpInst;
6335  return true;
6336  }
6337 
6338  case ARM::VLD3LNdWB_fixed_Asm_8:
6339  case ARM::VLD3LNdWB_fixed_Asm_16:
6340  case ARM::VLD3LNdWB_fixed_Asm_32:
6341  case ARM::VLD3LNqWB_fixed_Asm_16:
6342  case ARM::VLD3LNqWB_fixed_Asm_32: {
6343  MCInst TmpInst;
6344  // Shuffle the operands around so the lane index operand is in the
6345  // right place.
6346  unsigned Spacing;
6347  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6348  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6349  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6350  Spacing));
6351  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6352  Spacing * 2));
6353  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6354  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6355  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6356  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6357  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6358  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6359  Spacing));
6360  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6361  Spacing * 2));
6362  TmpInst.addOperand(Inst.getOperand(1)); // lane
6363  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6364  TmpInst.addOperand(Inst.getOperand(5));
6365  Inst = TmpInst;
6366  return true;
6367  }
6368 
6369  case ARM::VLD4LNdWB_fixed_Asm_8:
6370  case ARM::VLD4LNdWB_fixed_Asm_16:
6371  case ARM::VLD4LNdWB_fixed_Asm_32:
6372  case ARM::VLD4LNqWB_fixed_Asm_16:
6373  case ARM::VLD4LNqWB_fixed_Asm_32: {
6374  MCInst TmpInst;
6375  // Shuffle the operands around so the lane index operand is in the
6376  // right place.
6377  unsigned Spacing;
6378  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6379  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6380  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6381  Spacing));
6382  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6383  Spacing * 2));
6384  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6385  Spacing * 3));
6386  TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6387  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6388  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6389  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6390  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6391  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6392  Spacing));
6393  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6394  Spacing * 2));
6395  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6396  Spacing * 3));
6397  TmpInst.addOperand(Inst.getOperand(1)); // lane
6398  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6399  TmpInst.addOperand(Inst.getOperand(5));
6400  Inst = TmpInst;
6401  return true;
6402  }
6403 
6404  case ARM::VLD1LNdAsm_8:
6405  case ARM::VLD1LNdAsm_16:
6406  case ARM::VLD1LNdAsm_32: {
6407  MCInst TmpInst;
6408  // Shuffle the operands around so the lane index operand is in the
6409  // right place.
6410  unsigned Spacing;
6411  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6412  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6413  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6414  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6415  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6416  TmpInst.addOperand(Inst.getOperand(1)); // lane
6417  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6418  TmpInst.addOperand(Inst.getOperand(5));
6419  Inst = TmpInst;
6420  return true;
6421  }
6422 
6423  case ARM::VLD2LNdAsm_8:
6424  case ARM::VLD2LNdAsm_16:
6425  case ARM::VLD2LNdAsm_32:
6426  case ARM::VLD2LNqAsm_16:
6427  case ARM::VLD2LNqAsm_32: {
6428  MCInst TmpInst;
6429  // Shuffle the operands around so the lane index operand is in the
6430  // right place.
6431  unsigned Spacing;
6432  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6433  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6434  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6435  Spacing));
6436  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6437  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6438  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6439  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6440  Spacing));
6441  TmpInst.addOperand(Inst.getOperand(1)); // lane
6442  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6443  TmpInst.addOperand(Inst.getOperand(5));
6444  Inst = TmpInst;
6445  return true;
6446  }
6447 
6448  case ARM::VLD3LNdAsm_8:
6449  case ARM::VLD3LNdAsm_16:
6450  case ARM::VLD3LNdAsm_32:
6451  case ARM::VLD3LNqAsm_16:
6452  case ARM::VLD3LNqAsm_32: {
6453  MCInst TmpInst;
6454  // Shuffle the operands around so the lane index operand is in the
6455  // right place.
6456  unsigned Spacing;
6457  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6458  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6459  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6460  Spacing));
6461  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6462  Spacing * 2));
6463  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6464  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6465  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6466  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6467  Spacing));
6468  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6469  Spacing * 2));
6470  TmpInst.addOperand(Inst.getOperand(1)); // lane
6471  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6472  TmpInst.addOperand(Inst.getOperand(5));
6473  Inst = TmpInst;
6474  return true;
6475  }
6476 
6477  case ARM::VLD4LNdAsm_8:
6478  case ARM::VLD4LNdAsm_16:
6479  case ARM::VLD4LNdAsm_32:
6480  case ARM::VLD4LNqAsm_16:
6481  case ARM::VLD4LNqAsm_32: {
6482  MCInst TmpInst;
6483  // Shuffle the operands around so the lane index operand is in the
6484  // right place.
6485  unsigned Spacing;
6486  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6487  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6488  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6489  Spacing));
6490  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6491  Spacing * 2));
6492  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6493  Spacing * 3));
6494  TmpInst.addOperand(Inst.getOperand(2)); // Rn
6495  TmpInst.addOperand(Inst.getOperand(3)); // alignment
6496  TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6497  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6498  Spacing));
6499  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6500  Spacing * 2));
6501  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6502  Spacing * 3));
6503  TmpInst.addOperand(Inst.getOperand(1)); // lane
6504  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6505  TmpInst.addOperand(Inst.getOperand(5));
6506  Inst = TmpInst;
6507  return true;
6508  }
6509 
6510  // VLD3DUP single 3-element structure to all lanes instructions.
6511  case ARM::VLD3DUPdAsm_8:
6512  case ARM::VLD3DUPdAsm_16:
6513  case ARM::VLD3DUPdAsm_32:
6514  case ARM::VLD3DUPqAsm_8:
6515  case ARM::VLD3DUPqAsm_16:
6516  case ARM::VLD3DUPqAsm_32: {
6517  MCInst TmpInst;
6518  unsigned Spacing;
6519  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6520  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6521  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6522  Spacing));
6523  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6524  Spacing * 2));
6525  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6526  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6527  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6528  TmpInst.addOperand(Inst.getOperand(4));
6529  Inst = TmpInst;
6530  return true;
6531  }
6532 
6533  case ARM::VLD3DUPdWB_fixed_Asm_8:
6534  case ARM::VLD3DUPdWB_fixed_Asm_16:
6535  case ARM::VLD3DUPdWB_fixed_Asm_32:
6536  case ARM::VLD3DUPqWB_fixed_Asm_8:
6537  case ARM::VLD3DUPqWB_fixed_Asm_16:
6538  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6539  MCInst TmpInst;
6540  unsigned Spacing;
6541  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6542  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6543  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6544  Spacing));
6545  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6546  Spacing * 2));
6547  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6548  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6549  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6550  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6551  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6552  TmpInst.addOperand(Inst.getOperand(4));
6553  Inst = TmpInst;
6554  return true;
6555  }
6556 
6557  case ARM::VLD3DUPdWB_register_Asm_8:
6558  case ARM::VLD3DUPdWB_register_Asm_16:
6559  case ARM::VLD3DUPdWB_register_Asm_32:
6560  case ARM::VLD3DUPqWB_register_Asm_8:
6561  case ARM::VLD3DUPqWB_register_Asm_16:
6562  case ARM::VLD3DUPqWB_register_Asm_32: {
6563  MCInst TmpInst;
6564  unsigned Spacing;
6565  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6566  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6567  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6568  Spacing));
6569  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6570  Spacing * 2));
6571  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6572  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6573  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6574  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6575  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6576  TmpInst.addOperand(Inst.getOperand(5));
6577  Inst = TmpInst;
6578  return true;
6579  }
6580 
6581  // VLD3 multiple 3-element structure instructions.
6582  case ARM::VLD3dAsm_8:
6583  case ARM::VLD3dAsm_16:
6584  case ARM::VLD3dAsm_32:
6585  case ARM::VLD3qAsm_8:
6586  case ARM::VLD3qAsm_16:
6587  case ARM::VLD3qAsm_32: {
6588  MCInst TmpInst;
6589  unsigned Spacing;
6590  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6591  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6592  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6593  Spacing));
6594  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6595  Spacing * 2));
6596  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6597  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6598  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6599  TmpInst.addOperand(Inst.getOperand(4));
6600  Inst = TmpInst;
6601  return true;
6602  }
6603 
6604  case ARM::VLD3dWB_fixed_Asm_8:
6605  case ARM::VLD3dWB_fixed_Asm_16:
6606  case ARM::VLD3dWB_fixed_Asm_32:
6607  case ARM::VLD3qWB_fixed_Asm_8:
6608  case ARM::VLD3qWB_fixed_Asm_16:
6609  case ARM::VLD3qWB_fixed_Asm_32: {
6610  MCInst TmpInst;
6611  unsigned Spacing;
6612  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6613  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6614  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6615  Spacing));
6616  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6617  Spacing * 2));
6618  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6619  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6620  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6621  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6622  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6623  TmpInst.addOperand(Inst.getOperand(4));
6624  Inst = TmpInst;
6625  return true;
6626  }
6627 
6628  case ARM::VLD3dWB_register_Asm_8:
6629  case ARM::VLD3dWB_register_Asm_16:
6630  case ARM::VLD3dWB_register_Asm_32:
6631  case ARM::VLD3qWB_register_Asm_8:
6632  case ARM::VLD3qWB_register_Asm_16:
6633  case ARM::VLD3qWB_register_Asm_32: {
6634  MCInst TmpInst;
6635  unsigned Spacing;
6636  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6637  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6638  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6639  Spacing));
6640  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6641  Spacing * 2));
6642  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6643  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6644  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6645  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6646  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6647  TmpInst.addOperand(Inst.getOperand(5));
6648  Inst = TmpInst;
6649  return true;
6650  }
6651 
6652  // VLD4DUP single 3-element structure to all lanes instructions.
6653  case ARM::VLD4DUPdAsm_8:
6654  case ARM::VLD4DUPdAsm_16:
6655  case ARM::VLD4DUPdAsm_32:
6656  case ARM::VLD4DUPqAsm_8:
6657  case ARM::VLD4DUPqAsm_16:
6658  case ARM::VLD4DUPqAsm_32: {
6659  MCInst TmpInst;
6660  unsigned Spacing;
6661  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6662  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6663  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6664  Spacing));
6665  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6666  Spacing * 2));
6667  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6668  Spacing * 3));
6669  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6670  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6671  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6672  TmpInst.addOperand(Inst.getOperand(4));
6673  Inst = TmpInst;
6674  return true;
6675  }
6676 
6677  case ARM::VLD4DUPdWB_fixed_Asm_8:
6678  case ARM::VLD4DUPdWB_fixed_Asm_16:
6679  case ARM::VLD4DUPdWB_fixed_Asm_32:
6680  case ARM::VLD4DUPqWB_fixed_Asm_8:
6681  case ARM::VLD4DUPqWB_fixed_Asm_16:
6682  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6683  MCInst TmpInst;
6684  unsigned Spacing;
6685  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6686  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6687  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6688  Spacing));
6689  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6690  Spacing * 2));
6691  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6692  Spacing * 3));
6693  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6694  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6695  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6696  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6697  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6698  TmpInst.addOperand(Inst.getOperand(4));
6699  Inst = TmpInst;
6700  return true;
6701  }
6702 
6703  case ARM::VLD4DUPdWB_register_Asm_8:
6704  case ARM::VLD4DUPdWB_register_Asm_16:
6705  case ARM::VLD4DUPdWB_register_Asm_32:
6706  case ARM::VLD4DUPqWB_register_Asm_8:
6707  case ARM::VLD4DUPqWB_register_Asm_16:
6708  case ARM::VLD4DUPqWB_register_Asm_32: {
6709  MCInst TmpInst;
6710  unsigned Spacing;
6711  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6712  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6713  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6714  Spacing));
6715  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6716  Spacing * 2));
6717  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6718  Spacing * 3));
6719  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6720  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6721  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6722  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6723  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6724  TmpInst.addOperand(Inst.getOperand(5));
6725  Inst = TmpInst;
6726  return true;
6727  }
6728 
6729  // VLD4 multiple 4-element structure instructions.
6730  case ARM::VLD4dAsm_8:
6731  case ARM::VLD4dAsm_16:
6732  case ARM::VLD4dAsm_32:
6733  case ARM::VLD4qAsm_8:
6734  case ARM::VLD4qAsm_16:
6735  case ARM::VLD4qAsm_32: {
6736  MCInst TmpInst;
6737  unsigned Spacing;
6738  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6739  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6740  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6741  Spacing));
6742  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6743  Spacing * 2));
6744  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6745  Spacing * 3));
6746  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6747  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6748  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6749  TmpInst.addOperand(Inst.getOperand(4));
6750  Inst = TmpInst;
6751  return true;
6752  }
6753 
6754  case ARM::VLD4dWB_fixed_Asm_8:
6755  case ARM::VLD4dWB_fixed_Asm_16:
6756  case ARM::VLD4dWB_fixed_Asm_32:
6757  case ARM::VLD4qWB_fixed_Asm_8:
6758  case ARM::VLD4qWB_fixed_Asm_16:
6759  case ARM::VLD4qWB_fixed_Asm_32: {
6760  MCInst TmpInst;
6761  unsigned Spacing;
6762  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6763  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6764  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6765  Spacing));
6766  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6767  Spacing * 2));
6768  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6769  Spacing * 3));
6770  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6771  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6772  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6773  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6774  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6775  TmpInst.addOperand(Inst.getOperand(4));
6776  Inst = TmpInst;
6777  return true;
6778  }
6779 
6780  case ARM::VLD4dWB_register_Asm_8:
6781  case ARM::VLD4dWB_register_Asm_16:
6782  case ARM::VLD4dWB_register_Asm_32:
6783  case ARM::VLD4qWB_register_Asm_8:
6784  case ARM::VLD4qWB_register_Asm_16:
6785  case ARM::VLD4qWB_register_Asm_32: {
6786  MCInst TmpInst;
6787  unsigned Spacing;
6788  TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6789  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6790  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6791  Spacing));
6792  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6793  Spacing * 2));
6794  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6795  Spacing * 3));
6796  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6797  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6798  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6799  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6800  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6801  TmpInst.addOperand(Inst.getOperand(5));
6802  Inst = TmpInst;
6803  return true;
6804  }
6805 
6806  // VST3 multiple 3-element structure instructions.
6807  case ARM::VST3dAsm_8:
6808  case ARM::VST3dAsm_16:
6809  case ARM::VST3dAsm_32:
6810  case ARM::VST3qAsm_8:
6811  case ARM::VST3qAsm_16:
6812  case ARM::VST3qAsm_32: {
6813  MCInst TmpInst;
6814  unsigned Spacing;
6815  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6816  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6817  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6818  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6819  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6820  Spacing));
6821  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6822  Spacing * 2));
6823  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6824  TmpInst.addOperand(Inst.getOperand(4));
6825  Inst = TmpInst;
6826  return true;
6827  }
6828 
6829  case ARM::VST3dWB_fixed_Asm_8:
6830  case ARM::VST3dWB_fixed_Asm_16:
6831  case ARM::VST3dWB_fixed_Asm_32:
6832  case ARM::VST3qWB_fixed_Asm_8:
6833  case ARM::VST3qWB_fixed_Asm_16:
6834  case ARM::VST3qWB_fixed_Asm_32: {
6835  MCInst TmpInst;
6836  unsigned Spacing;
6837  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6838  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6839  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6840  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6841  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6842  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6843  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6844  Spacing));
6845  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6846  Spacing * 2));
6847  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6848  TmpInst.addOperand(Inst.getOperand(4));
6849  Inst = TmpInst;
6850  return true;
6851  }
6852 
6853  case ARM::VST3dWB_register_Asm_8:
6854  case ARM::VST3dWB_register_Asm_16:
6855  case ARM::VST3dWB_register_Asm_32:
6856  case ARM::VST3qWB_register_Asm_8:
6857  case ARM::VST3qWB_register_Asm_16:
6858  case ARM::VST3qWB_register_Asm_32: {
6859  MCInst TmpInst;
6860  unsigned Spacing;
6861  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6862  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6863  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6864  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6865  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6866  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6867  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6868  Spacing));
6869  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6870  Spacing * 2));
6871  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6872  TmpInst.addOperand(Inst.getOperand(5));
6873  Inst = TmpInst;
6874  return true;
6875  }
6876 
6877  // VST4 multiple 3-element structure instructions.
6878  case ARM::VST4dAsm_8:
6879  case ARM::VST4dAsm_16:
6880  case ARM::VST4dAsm_32:
6881  case ARM::VST4qAsm_8:
6882  case ARM::VST4qAsm_16:
6883  case ARM::VST4qAsm_32: {
6884  MCInst TmpInst;
6885  unsigned Spacing;
6886  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6887  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6888  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6889  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6890  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6891  Spacing));
6892  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6893  Spacing * 2));
6894  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6895  Spacing * 3));
6896  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6897  TmpInst.addOperand(Inst.getOperand(4));
6898  Inst = TmpInst;
6899  return true;
6900  }
6901 
6902  case ARM::VST4dWB_fixed_Asm_8:
6903  case ARM::VST4dWB_fixed_Asm_16:
6904  case ARM::VST4dWB_fixed_Asm_32:
6905  case ARM::VST4qWB_fixed_Asm_8:
6906  case ARM::VST4qWB_fixed_Asm_16:
6907  case ARM::VST4qWB_fixed_Asm_32: {
6908  MCInst TmpInst;
6909  unsigned Spacing;
6910  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6911  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6912  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6913  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6914  TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6915  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6916  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6917  Spacing));
6918  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6919  Spacing * 2));
6920  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6921  Spacing * 3));
6922  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6923  TmpInst.addOperand(Inst.getOperand(4));
6924  Inst = TmpInst;
6925  return true;
6926  }
6927 
6928  case ARM::VST4dWB_register_Asm_8:
6929  case ARM::VST4dWB_register_Asm_16:
6930  case ARM::VST4dWB_register_Asm_32:
6931  case ARM::VST4qWB_register_Asm_8:
6932  case ARM::VST4qWB_register_Asm_16:
6933  case ARM::VST4qWB_register_Asm_32: {
6934  MCInst TmpInst;
6935  unsigned Spacing;
6936  TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6937  TmpInst.addOperand(Inst.getOperand(1)); // Rn
6938  TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6939  TmpInst.addOperand(Inst.getOperand(2)); // alignment
6940  TmpInst.addOperand(Inst.getOperand(3)); // Rm
6941  TmpInst.addOperand(Inst.getOperand(0)); // Vd
6942  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6943  Spacing));
6944  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6945  Spacing * 2));
6946  TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6947  Spacing * 3));
6948  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6949  TmpInst.addOperand(Inst.getOperand(5));
6950  Inst = TmpInst;
6951  return true;
6952  }
6953 
6954  // Handle encoding choice for the shift-immediate instructions.
6955  case ARM::t2LSLri:
6956  case ARM::t2LSRri:
6957  case ARM::t2ASRri: {
6958  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6959  Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6960  Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6961  !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6962  static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6963  unsigned NewOpc;
6964  switch (Inst.getOpcode()) {
6965  default: llvm_unreachable("unexpected opcode");
6966  case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6967  case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6968  case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6969  }
6970  // The Thumb1 operands aren't in the same order. Awesome, eh?
6971  MCInst TmpInst;
6972  TmpInst.setOpcode(NewOpc);
6973  TmpInst.addOperand(Inst.getOperand(0));
6974  TmpInst.addOperand(Inst.getOperand(5));
6975  TmpInst.addOperand(Inst.getOperand(1));
6976  TmpInst.addOperand(Inst.getOperand(2));
6977  TmpInst.addOperand(Inst.getOperand(3));
6978  TmpInst.addOperand(Inst.getOperand(4));
6979  Inst = TmpInst;
6980  return true;
6981  }
6982  return false;
6983  }
6984 
6985  // Handle the Thumb2 mode MOV complex aliases.
6986  case ARM::t2MOVsr:
6987  case ARM::t2MOVSsr: {
6988  // Which instruction to expand to depends on the CCOut operand and
6989  // whether we're in an IT block if the register operands are low
6990  // registers.
6991  bool isNarrow = false;
6992  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6993  isARMLowRegister(Inst.getOperand(1).getReg()) &&
6994  isARMLowRegister(Inst.getOperand(2).getReg()) &&
6995  Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6996  inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6997  isNarrow = true;
6998  MCInst TmpInst;
6999  unsigned newOpc;
7000  switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
7001  default: llvm_unreachable("unexpected opcode!");
7002  case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
7003  case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
7004  case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
7005  case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
7006  }
7007  TmpInst.setOpcode(newOpc);
7008  TmpInst.addOperand(Inst.getOperand(0)); // Rd
7009  if (isNarrow)
7011  Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7012  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7013  TmpInst.addOperand(Inst.getOperand(2)); // Rm
7014  TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7015  TmpInst.addOperand(Inst.getOperand(5));
7016  if (!isNarrow)
7018  Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7019  Inst = TmpInst;
7020  return true;
7021  }
7022  case ARM::t2MOVsi:
7023  case ARM::t2MOVSsi: {
7024  // Which instruction to expand to depends on the CCOut operand and
7025  // whether we're in an IT block if the register operands are low
7026  // registers.
7027  bool isNarrow = false;
7028  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7029  isARMLowRegister(Inst.getOperand(1).getReg()) &&
7030  inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7031  isNarrow = true;
7032  MCInst TmpInst;
7033  unsigned newOpc;
7034  switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7035  default: llvm_unreachable("unexpected opcode!");
7036  case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7037  case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7038  case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7039  case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7040  case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7041  }
7042  unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7043  if (Amount == 32) Amount = 0;
7044  TmpInst.setOpcode(newOpc);
7045  TmpInst.addOperand(Inst.getOperand(0)); // Rd
7046  if (isNarrow)
7048  Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7049  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7050  if (newOpc != ARM::t2RRX)
7051  TmpInst.addOperand(MCOperand::CreateImm(Amount));
7052  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7053  TmpInst.addOperand(Inst.getOperand(4));
7054  if (!isNarrow)
7056  Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7057  Inst = TmpInst;
7058  return true;
7059  }
7060  // Handle the ARM mode MOV complex aliases.
7061  case ARM::ASRr:
7062  case ARM::LSRr:
7063  case ARM::LSLr:
7064  case ARM::RORr: {
7065  ARM_AM::ShiftOpc ShiftTy;
7066  switch(Inst.getOpcode()) {
7067  default: llvm_unreachable("unexpected opcode!");
7068  case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7069  case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7070  case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7071  case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7072  }
7073  unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7074  MCInst TmpInst;
7075  TmpInst.setOpcode(ARM::MOVsr);
7076  TmpInst.addOperand(Inst.getOperand(0)); // Rd
7077  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7078  TmpInst.addOperand(Inst.getOperand(2)); // Rm
7079  TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7080  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7081  TmpInst.addOperand(Inst.getOperand(4));
7082  TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7083  Inst = TmpInst;
7084  return true;
7085  }
7086  case ARM::ASRi:
7087  case ARM::LSRi:
7088  case ARM::LSLi:
7089  case ARM::RORi: {
7090  ARM_AM::ShiftOpc ShiftTy;
7091  switch(Inst.getOpcode()) {
7092  default: llvm_unreachable("unexpected opcode!");
7093  case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
7094  case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
7095  case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
7096  case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7097  }
7098  // A shift by zero is a plain MOVr, not a MOVsi.
7099  unsigned Amt = Inst.getOperand(2).getImm();
7100  unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7101  // A shift by 32 should be encoded as 0 when permitted
7102  if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7103  Amt = 0;
7104  unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7105  MCInst TmpInst;
7106  TmpInst.setOpcode(Opc);
7107  TmpInst.addOperand(Inst.getOperand(0)); // Rd
7108  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7109  if (Opc == ARM::MOVsi)
7110  TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7111  TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7112  TmpInst.addOperand(Inst.getOperand(4));
7113  TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7114  Inst = TmpInst;
7115  return true;
7116  }
7117  case ARM::RRXi: {
7118  unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7119  MCInst TmpInst;
7120  TmpInst.setOpcode(ARM::MOVsi);
7121  TmpInst.addOperand(Inst.getOperand(0)); // Rd
7122  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7123  TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7124  TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7125  TmpInst.addOperand(Inst.getOperand(3));
7126  TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7127  Inst = TmpInst;
7128  return true;
7129  }
7130  case ARM::t2LDMIA_UPD: {
7131  // If this is a load of a single register, then we should use
7132  // a post-indexed LDR instruction instead, per the ARM ARM.
7133  if (Inst.getNumOperands() != 5)
7134  return false;
7135  MCInst TmpInst;
7136  TmpInst.setOpcode(ARM::t2LDR_POST);
7137  TmpInst.addOperand(Inst.getOperand(4)); // Rt
7138  TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7139  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7140  TmpInst.addOperand(MCOperand::CreateImm(4));
7141  TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7142  TmpInst.addOperand(Inst.getOperand(3));
7143  Inst = TmpInst;
7144  return true;
7145  }
7146  case ARM::t2STMDB_UPD: {
7147  // If this is a store of a single register, then we should use
7148  // a pre-indexed STR instruction instead, per the ARM ARM.
7149  if (Inst.getNumOperands() != 5)
7150  return false;
7151  MCInst TmpInst;
7152  TmpInst.setOpcode(ARM::t2STR_PRE);
7153  TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7154  TmpInst.addOperand(Inst.getOperand(4)); // Rt
7155  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7156  TmpInst.addOperand(MCOperand::CreateImm(-4));
7157  TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7158  TmpInst.addOperand(Inst.getOperand(3));
7159  Inst = TmpInst;
7160  return true;
7161  }
7162  case ARM::LDMIA_UPD:
7163  // If this is a load of a single register via a 'pop', then we should use
7164  // a post-indexed LDR instruction instead, per the ARM ARM.
7165  if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7166  Inst.getNumOperands() == 5) {
7167  MCInst TmpInst;
7168  TmpInst.setOpcode(ARM::LDR_POST_IMM);
7169  TmpInst.addOperand(Inst.getOperand(4)); // Rt
7170  TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7171  TmpInst.addOperand(Inst.getOperand(1)); // Rn
7172  TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset
7173  TmpInst.addOperand(MCOperand::CreateImm(4));
7174  TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7175  TmpInst.addOperand(Inst.getOperand(3));
7176  Inst = TmpInst;
7177  return true;
7178  }
7179  break;
7180  case ARM::STMDB_UPD:
7181  // If this is a store of a single register via a 'push', then we should use
7182  // a pre-indexed STR instruction instead, per the ARM ARM.
7183  if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7184  Inst.getNumOperands() == 5) {
7185  MCInst TmpInst;
7186  TmpInst.setOpcode(ARM::STR_PRE_IMM);
7187  TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7188  TmpInst.addOperand(Inst.getOperand(4)); // Rt
7189  TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7190  TmpInst.addOperand(MCOperand::CreateImm(-4));
7191  TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7192  TmpInst.addOperand(Inst.getOperand(3));
7193  Inst = TmpInst;
7194  }
7195  break;
7196  case ARM::t2ADDri12:
7197  // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7198  // mnemonic was used (not "addw"), encoding T3 is preferred.
7199  if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7200  ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7201  break;
7202  Inst.setOpcode(ARM::t2ADDri);
7203  Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7204  break;
7205  case ARM::t2SUBri12:
7206  // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7207  // mnemonic was used (not "subw"), encoding T3 is preferred.
7208  if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7209  ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7210  break;
7211  Inst.setOpcode(ARM::t2SUBri);
7212  Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7213  break;
7214  case ARM::tADDi8:
7215  // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7216  // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7217  // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7218  // to encoding T1 if <Rd> is omitted."
7219  if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7220  Inst.setOpcode(ARM::tADDi3);
7221  return true;
7222  }
7223  break;
7224  case ARM::tSUBi8:
7225  // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7226  // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7227  // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7228  // to encoding T1 if <Rd> is omitted."
7229  if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7230  Inst.setOpcode(ARM::tSUBi3);
7231  return true;
7232  }
7233  break;
7234  case ARM::t2ADDri:
7235  case ARM::t2SUBri: {
7236  // If the destination and first source operand are the same, and
7237  // the flags are compatible with the current IT status, use encoding T2
7238  // instead of T3. For compatibility with the system 'as'. Make sure the
7239  // wide encoding wasn't explicit.
7240  if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7241  !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7242  (unsigned)Inst.getOperand(2).getImm() > 255 ||
7243  ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7244  (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7245  (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7246  static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7247  break;
7248  MCInst TmpInst;
7249  TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7250  ARM::tADDi8 : ARM::tSUBi8);
7251  TmpInst.addOperand(Inst.getOperand(0));
7252  TmpInst.addOperand(Inst.getOperand(5));
7253  TmpInst.addOperand(Inst.getOperand(0));
7254  TmpInst.addOperand(Inst.getOperand(2));
7255  TmpInst.addOperand(Inst.getOperand(3));
7256  TmpInst.addOperand(Inst.getOperand(4));
7257  Inst = TmpInst;
7258  return true;
7259  }
7260  case ARM::t2ADDrr: {
7261  // If the destination and first source operand are the same, and
7262  // there's no setting of the flags, use encoding T2 instead of T3.
7263  // Note that this is only for ADD, not SUB. This mirrors the system
7264  // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7265  if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7266  Inst.getOperand(5).getReg() != 0 ||
7267  (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7268  static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7269  break;
7270  MCInst TmpInst;
7271  TmpInst.setOpcode(ARM::tADDhirr);
7272  TmpInst.addOperand(Inst.getOperand(0));
7273  TmpInst.addOperand(Inst.getOperand(0));
7274  TmpInst.addOperand(Inst.getOperand(2));
7275  TmpInst.addOperand(Inst.getOperand(3));
7276  TmpInst.addOperand(Inst.getOperand(4));
7277  Inst = TmpInst;
7278  return true;
7279  }
7280  case ARM::tADDrSP: {
7281  // If the non-SP source operand and the destination operand are not the
7282  // same, we need to use the 32-bit encoding if it's available.
7283  if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7284  Inst.setOpcode(ARM::t2ADDrr);
7285  Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7286  return true;
7287  }
7288  break;
7289  }
7290  case ARM::tB:
7291  // A Thumb conditional branch outside of an IT block is a tBcc.
7292  if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7293  Inst.setOpcode(ARM::tBcc);
7294  return true;
7295  }
7296  break;
7297  case ARM::t2B:
7298  // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7299  if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7300  Inst.setOpcode(ARM::t2Bcc);
7301  return true;
7302  }
7303  break;
7304  case ARM::t2Bcc:
7305  // If the conditional is AL or we're in an IT block, we really want t2B.
7306  if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7307  Inst.setOpcode(ARM::t2B);
7308  return true;
7309  }
7310  break;
7311  case ARM::tBcc:
7312  // If the conditional is AL, we really want tB.
7313  if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7314  Inst.setOpcode(ARM::tB);
7315  return true;
7316  }
7317  break;
7318  case ARM::tLDMIA: {
7319  // If the register list contains any high registers, or if the writeback
7320  // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7321  // instead if we're in Thumb2. Otherwise, this should have generated
7322  // an error in validateInstruction().
7323  unsigned Rn = Inst.getOperand(0).getReg();
7324  bool hasWritebackToken =
7325  (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7326  static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7327  bool listContainsBase;
7328  if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7329  (!listContainsBase && !hasWritebackToken) ||
7330  (listContainsBase && hasWritebackToken)) {
7331  // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7332  assert (isThumbTwo());
7333  Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7334  // If we're switching to the updating version, we need to insert
7335  // the writeback tied operand.
7336  if (hasWritebackToken)
7337  Inst.insert(Inst.begin(),
7339  return true;
7340  }
7341  break;
7342  }
7343  case ARM::tSTMIA_UPD: {
7344  // If the register list contains any high registers, we need to use
7345  // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7346  // should have generated an error in validateInstruction().
7347  unsigned Rn = Inst.getOperand(0).getReg();
7348  bool listContainsBase;
7349  if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7350  // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7351  assert (isThumbTwo());
7352  Inst.setOpcode(ARM::t2STMIA_UPD);
7353  return true;
7354  }
7355  break;
7356  }
7357  case ARM::tPOP: {
7358  bool listContainsBase;
7359  // If the register list contains any high registers, we need to use
7360  // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7361  // should have generated an error in validateInstruction().
7362  if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7363  return false;
7364  assert (isThumbTwo());
7365  Inst.setOpcode(ARM::t2LDMIA_UPD);
7366  // Add the base register and writeback operands.
7367  Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7368  Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7369  return true;
7370  }
7371  case ARM::tPUSH: {
7372  bool listContainsBase;
7373  if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7374  return false;
7375  assert (isThumbTwo());
7376  Inst.setOpcode(ARM::t2STMDB_UPD);
7377  // Add the base register and writeback operands.
7378  Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7379  Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7380  return true;
7381  }
7382  case ARM::t2MOVi: {
7383  // If we can use the 16-bit encoding and the user didn't explicitly
7384  // request the 32-bit variant, transform it here.
7385  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7386  (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7387  ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7388  Inst.getOperand(4).getReg() == ARM::CPSR) ||
7389  (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7390  (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7391  static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7392  // The operands aren't in the same order for tMOVi8...
7393  MCInst TmpInst;
7394  TmpInst.setOpcode(ARM::tMOVi8);
7395  TmpInst.addOperand(Inst.getOperand(0));
7396  TmpInst.addOperand(Inst.getOperand(4));
7397  TmpInst.addOperand(Inst.getOperand(1));
7398  TmpInst.addOperand(Inst.getOperand(2));
7399  TmpInst.addOperand(Inst.getOperand(3));
7400  Inst = TmpInst;
7401  return true;
7402  }
7403  break;
7404  }
7405  case ARM::t2MOVr: {
7406  // If we can use the 16-bit encoding and the user didn't explicitly
7407  // request the 32-bit variant, transform it here.
7408  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7409  isARMLowRegister(Inst.getOperand(1).getReg()) &&
7410  Inst.getOperand(2).getImm() == ARMCC::AL &&
7411  Inst.getOperand(4).getReg() == ARM::CPSR &&
7412  (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7413  static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7414  // The operands aren't the same for tMOV[S]r... (no cc_out)
7415  MCInst TmpInst;
7416  TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7417  TmpInst.addOperand(Inst.getOperand(0));
7418  TmpInst.addOperand(Inst.getOperand(1));
7419  TmpInst.addOperand(Inst.getOperand(2));
7420  TmpInst.addOperand(Inst.getOperand(3));
7421  Inst = TmpInst;
7422  return true;
7423  }
7424  break;
7425  }
7426  case ARM::t2SXTH:
7427  case ARM::t2SXTB:
7428  case ARM::t2UXTH:
7429  case ARM::t2UXTB: {
7430  // If we can use the 16-bit encoding and the user didn't explicitly
7431  // request the 32-bit variant, transform it here.
7432  if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7433  isARMLowRegister(Inst.getOperand(1).getReg()) &&
7434  Inst.getOperand(2).getImm() == 0 &&
7435  (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7436  static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7437  unsigned NewOpc;
7438  switch (Inst.getOpcode()) {
7439  default: llvm_unreachable("Illegal opcode!");
7440  case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7441  case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7442  case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7443  case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7444  }
7445  // The operands aren't the same for thumb1 (no rotate operand).
7446  MCInst TmpInst;
7447  TmpInst.setOpcode(NewOpc);
7448  TmpInst.addOperand(Inst.getOperand(0));
7449  TmpInst.addOperand(Inst.getOperand(1));
7450  TmpInst.addOperand(Inst.getOperand(3));
7451  TmpInst.addOperand(Inst.getOperand(4));
7452  Inst = TmpInst;
7453  return true;
7454  }
7455  break;
7456  }
7457  case ARM::MOVsi: {
7459  // rrx shifts and asr/lsr of #32 is encoded as 0
7460  if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7461  return false;
7462  if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7463  // Shifting by zero is accepted as a vanilla 'MOVr'
7464  MCInst TmpInst;
7465  TmpInst.setOpcode(ARM::MOVr);
7466  TmpInst.addOperand(Inst.getOperand(0));
7467  TmpInst.addOperand(Inst.getOperand(1));
7468  TmpInst.addOperand(Inst.getOperand(3));
7469  TmpInst.addOperand(Inst.getOperand(4));
7470  TmpInst.addOperand(Inst.getOperand(5));
7471  Inst = TmpInst;
7472  return true;
7473  }
7474  return false;
7475  }
7476  case ARM::ANDrsi:
7477  case ARM::ORRrsi:
7478  case ARM::EORrsi:
7479  case ARM::BICrsi:
7480  case ARM::SUBrsi:
7481  case ARM::ADDrsi: {
7482  unsigned newOpc;
7484  if (SOpc == ARM_AM::rrx) return false;
7485  switch (Inst.getOpcode()) {
7486  default: llvm_unreachable("unexpected opcode!");
7487  case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7488  case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7489  case ARM::EORrsi: newOpc = ARM::EORrr; break;
7490  case ARM::BICrsi: newOpc = ARM::BICrr; break;
7491  case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7492  case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7493  }
7494  // If the shift is by zero, use the non-shifted instruction definition.
7495  // The exception is for right shifts, where 0 == 32
7496  if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7497  !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7498  MCInst TmpInst;
7499  TmpInst.setOpcode(newOpc);
7500  TmpInst.addOperand(Inst.getOperand(0));
7501  TmpInst.addOperand(Inst.getOperand(1));
7502  TmpInst.addOperand(Inst.getOperand(2));
7503  TmpInst.addOperand(Inst.getOperand(4));
7504  TmpInst.addOperand(Inst.getOperand(5));
7505  TmpInst.addOperand(Inst.getOperand(6));
7506  Inst = TmpInst;
7507  return true;
7508  }
7509  return false;
7510  }
7511  case ARM::ITasm:
7512  case ARM::t2IT: {
7513  // The mask bits for all but the first condition are represented as
7514  // the low bit of the condition code value implies 't'. We currently
7515  // always have 1 implies 't', so XOR toggle the bits if the low bit
7516  // of the condition code is zero.
7517  MCOperand &MO = Inst.getOperand(1);
7518  unsigned Mask = MO.getImm();
7519  unsigned OrigMask = Mask;
7520  unsigned TZ = countTrailingZeros(Mask);
7521  if ((Inst.getOperand(0).getImm() & 1) == 0) {
7522  assert(Mask && TZ <= 3 && "illegal IT mask value!");
7523  Mask ^= (0xE << TZ) & 0xF;
7524  }
7525  MO.setImm(Mask);
7526 
7527  // Set up the IT block state according to the IT instruction we just
7528  // matched.
7529  assert(!inITBlock() && "nested IT blocks?!");
7530  ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7531  ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7532  ITState.CurPosition = 0;
7533  ITState.FirstCond = true;
7534  break;
7535  }
7536  case ARM::t2LSLrr:
7537  case ARM::t2LSRrr:
7538  case ARM::t2ASRrr:
7539  case ARM::t2SBCrr:
7540  case ARM::t2RORrr:
7541  case ARM::t2BICrr:
7542  {
7543  // Assemblers should use the narrow encodings of these instructions when permissible.
7544  if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7545  isARMLowRegister(Inst.getOperand(2).getReg())) &&
7546  Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7547  ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7548  (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7549  (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7550  !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7551  unsigned NewOpc;
7552  switch (Inst.getOpcode()) {
7553  default: llvm_unreachable("unexpected opcode");
7554  case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7555  case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7556  case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7557  case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7558  case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7559  case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7560  }
7561  MCInst TmpInst;
7562  TmpInst.setOpcode(NewOpc);
7563  TmpInst.addOperand(Inst.getOperand(0));
7564  TmpInst.addOperand(Inst.getOperand(5));
7565  TmpInst.addOperand(Inst.getOperand(1));
7566  TmpInst.addOperand(Inst.getOperand(2));
7567  TmpInst.addOperand(Inst.getOperand(3));
7568  TmpInst.addOperand(Inst.getOperand(4));
7569  Inst = TmpInst;
7570  return true;
7571  }
7572  return false;
7573  }
7574  case ARM::t2ANDrr:
7575  case ARM::t2EORrr:
7576  case ARM::t2ADCrr:
7577  case ARM::t2ORRrr:
7578  {
7579  // Assemblers should use the narrow encodings of these instructions when permissible.
7580  // These instructions are special in that they are commutable, so shorter encodings
7581  // are available more often.
7582  if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7583  isARMLowRegister(Inst.getOperand(2).getReg())) &&
7584  (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7585  Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7586  ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7587  (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7588  (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7589  !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7590  unsigned NewOpc;
7591  switch (Inst.getOpcode()) {
7592  default: llvm_unreachable("unexpected opcode");
7593  case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7594  case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7595  case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7596  case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7597  }
7598  MCInst TmpInst;
7599  TmpInst.setOpcode(NewOpc);
7600  TmpInst.addOperand(Inst.getOperand(0));
7601  TmpInst.addOperand(Inst.getOperand(5));
7602  if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7603  TmpInst.addOperand(Inst.getOperand(1));
7604  TmpInst.addOperand(Inst.getOperand(2));
7605  } else {
7606  TmpInst.addOperand(Inst.getOperand(2));
7607  TmpInst.addOperand(Inst.getOperand(1));
7608  }
7609  TmpInst.addOperand(Inst.getOperand(3));
7610  TmpInst.addOperand(Inst.getOperand(4));
7611  Inst = TmpInst;
7612  return true;
7613  }
7614  return false;
7615  }
7616  }
7617  return false;
7618 }
7619 
7620 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7621  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7622  // suffix depending on whether they're in an IT block or not.
7623  unsigned Opc = Inst.getOpcode();
7624  const MCInstrDesc &MCID = MII.get(Opc);
7626  assert(MCID.hasOptionalDef() &&
7627  "optionally flag setting instruction missing optional def operand");
7628  assert(MCID.NumOperands == Inst.getNumOperands() &&
7629  "operand count mismatch!");
7630  // Find the optional-def operand (cc_out).
7631  unsigned OpNo;
7632  for (OpNo = 0;
7633  !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7634  ++OpNo)
7635  ;
7636  // If we're parsing Thumb1, reject it completely.
7637  if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7638  return Match_MnemonicFail;
7639  // If we're parsing Thumb2, which form is legal depends on whether we're
7640  // in an IT block.
7641  if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7642  !inITBlock())
7643  return Match_RequiresITBlock;
7644  if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7645  inITBlock())
7646  return Match_RequiresNotITBlock;
7647  }
7648  // Some high-register supporting Thumb1 encodings only allow both registers
7649  // to be from r0-r7 when in Thumb2.
7650  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7651  isARMLowRegister(Inst.getOperand(1).getReg()) &&
7652  isARMLowRegister(Inst.getOperand(2).getReg()))
7653  return Match_RequiresThumb2;
7654  // Others only require ARMv6 or later.
7655  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7656  isARMLowRegister(Inst.getOperand(0).getReg()) &&
7657  isARMLowRegister(Inst.getOperand(1).getReg()))
7658  return Match_RequiresV6;
7659  return Match_Success;
7660 }
7661 
7662 static const char *getSubtargetFeatureName(unsigned Val);
7663 bool ARMAsmParser::
7664 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7666  MCStreamer &Out, unsigned &ErrorInfo,
7667  bool MatchingInlineAsm) {
7668  MCInst Inst;
7669  unsigned MatchResult;
7670 
7671  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7672  MatchingInlineAsm);
7673  switch (MatchResult) {
7674  default: break;
7675  case Match_Success:
7676  // Context sensitive operand constraints aren't handled by the matcher,
7677  // so check them here.
7678  if (validateInstruction(Inst, Operands)) {
7679  // Still progress the IT block, otherwise one wrong condition causes
7680  // nasty cascading errors.
7681  forwardITPosition();
7682  return true;
7683  }
7684 
7685  { // processInstruction() updates inITBlock state, we need to save it away
7686  bool wasInITBlock = inITBlock();
7687 
7688  // Some instructions need post-processing to, for example, tweak which
7689  // encoding is selected. Loop on it while changes happen so the
7690  // individual transformations can chain off each other. E.g.,
7691  // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7692  while (processInstruction(Inst, Operands))
7693  ;
7694 
7695  // Only after the instruction is fully processed, we can validate it
7696  if (wasInITBlock && hasV8Ops() && isThumb() &&
7697  !isV8EligibleForIT(&Inst, 2)) {
7698  Warning(IDLoc, "deprecated instruction in IT block");
7699  }
7700  }
7701 
7702  // Only move forward at the very end so that everything in validate
7703  // and process gets a consistent answer about whether we're in an IT
7704  // block.
7705  forwardITPosition();
7706 
7707  // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7708  // doesn't actually encode.
7709  if (Inst.getOpcode() == ARM::ITasm)
7710  return false;
7711 
7712  Inst.setLoc(IDLoc);
7713  Out.EmitInstruction(Inst);
7714  return false;
7715  case Match_MissingFeature: {
7716  assert(ErrorInfo && "Unknown missing feature!");
7717  // Special case the error message for the very common case where only
7718  // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7719  std::string Msg = "instruction requires:";
7720  unsigned Mask = 1;
7721  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7722  if (ErrorInfo & Mask) {
7723  Msg += " ";
7724  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7725  }
7726  Mask <<= 1;
7727  }
7728  return Error(IDLoc, Msg);
7729  }
7730  case Match_InvalidOperand: {
7731  SMLoc ErrorLoc = IDLoc;
7732  if (ErrorInfo != ~0U) {
7733  if (ErrorInfo >= Operands.size())
7734  return Error(IDLoc, "too few operands for instruction");
7735 
7736  ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7737  if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7738  }
7739 
7740  return Error(ErrorLoc, "invalid operand for instruction");
7741  }
7742  case Match_MnemonicFail:
7743  return Error(IDLoc, "invalid instruction",
7744  ((ARMOperand*)Operands[0])->getLocRange());
7745  case Match_RequiresNotITBlock:
7746  return Error(IDLoc, "flag setting instruction only valid outside IT block");
7747  case Match_RequiresITBlock:
7748  return Error(IDLoc, "instruction only valid inside IT block");
7749  case Match_RequiresV6:
7750  return Error(IDLoc, "instruction variant requires ARMv6 or later");
7751  case Match_RequiresThumb2:
7752  return Error(IDLoc, "instruction variant requires Thumb2");
7753  case Match_ImmRange0_15: {
7754  SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7755  if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7756  return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7757  }
7758  case Match_ImmRange0_239: {
7759  SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7760  if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7761  return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
7762  }
7763  }
7764 
7765  llvm_unreachable("Implement any new match types added!");
7766 }
7767 
7768 /// parseDirective parses the arm specific directives
7769 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7770  StringRef IDVal = DirectiveID.getIdentifier();
7771  if (IDVal == ".word")
7772  return parseDirectiveWord(4, DirectiveID.getLoc());
7773  else if (IDVal == ".thumb")
7774  return parseDirectiveThumb(DirectiveID.getLoc());
7775  else if (IDVal == ".arm")
7776  return parseDirectiveARM(DirectiveID.getLoc());
7777  else if (IDVal == ".thumb_func")
7778  return parseDirectiveThumbFunc(DirectiveID.getLoc());
7779  else if (IDVal == ".code")
7780  return parseDirectiveCode(DirectiveID.getLoc());
7781  else if (IDVal == ".syntax")
7782  return parseDirectiveSyntax(DirectiveID.getLoc());
7783  else if (IDVal == ".unreq")
7784  return parseDirectiveUnreq(DirectiveID.getLoc());
7785  else if (IDVal == ".arch")
7786  return parseDirectiveArch(DirectiveID.getLoc());
7787  else if (IDVal == ".eabi_attribute")
7788  return parseDirectiveEabiAttr(DirectiveID.getLoc());
7789  else if (IDVal == ".cpu")
7790  return parseDirectiveCPU(DirectiveID.getLoc());
7791  else if (IDVal == ".fpu")
7792  return parseDirectiveFPU(DirectiveID.getLoc());
7793  else if (IDVal == ".fnstart")
7794  return parseDirectiveFnStart(DirectiveID.getLoc());
7795  else if (IDVal == ".fnend")
7796  return parseDirectiveFnEnd(DirectiveID.getLoc());
7797  else if (IDVal == ".cantunwind")
7798  return parseDirectiveCantUnwind(DirectiveID.getLoc());
7799  else if (IDVal == ".personality")
7800  return parseDirectivePersonality(DirectiveID.getLoc());
7801  else if (IDVal == ".handlerdata")
7802  return parseDirectiveHandlerData(DirectiveID.getLoc());
7803  else if (IDVal == ".setfp")
7804  return parseDirectiveSetFP(DirectiveID.getLoc());
7805  else if (IDVal == ".pad")
7806  return parseDirectivePad(DirectiveID.getLoc());
7807  else if (IDVal == ".save")
7808  return parseDirectiveRegSave(DirectiveID.getLoc(), false);
7809  else if (IDVal == ".vsave")
7810  return parseDirectiveRegSave(DirectiveID.getLoc(), true);
7811  return true;
7812 }
7813 
7814 /// parseDirectiveWord
7815 /// ::= .word [ expression (, expression)* ]
7816 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7817  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7818  for (;;) {
7819  const MCExpr *Value;
7820  if (getParser().parseExpression(Value))
7821  return true;
7822 
7823  getParser().getStreamer().EmitValue(Value, Size);
7824 
7825  if (getLexer().is(AsmToken::EndOfStatement))
7826  break;
7827 
7828  // FIXME: Improve diagnostic.
7829  if (getLexer().isNot(AsmToken::Comma))
7830  return Error(L, "unexpected token in directive");
7831  Parser.Lex();
7832  }
7833  }
7834 
7835  Parser.Lex();
7836  return false;
7837 }
7838 
7839 /// parseDirectiveThumb
7840 /// ::= .thumb
7841 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7842  if (getLexer().isNot(AsmToken::EndOfStatement))
7843  return Error(L, "unexpected token in directive");
7844  Parser.Lex();
7845 
7846  if (!hasThumb())
7847  return Error(L, "target does not support Thumb mode");
7848 
7849  if (!isThumb())
7850  SwitchMode();
7851  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7852  return false;
7853 }
7854 
7855 /// parseDirectiveARM
7856 /// ::= .arm
7857 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7858  if (getLexer().isNot(AsmToken::EndOfStatement))
7859  return Error(L, "unexpected token in directive");
7860  Parser.Lex();
7861 
7862  if (!hasARM())
7863  return Error(L, "target does not support ARM mode");
7864 
7865  if (isThumb())
7866  SwitchMode();
7867  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7868  return false;
7869 }
7870 
7871 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
7872  if (NextSymbolIsThumb) {
7873  getParser().getStreamer().EmitThumbFunc(Symbol);
7874  NextSymbolIsThumb = false;
7875  }
7876 }
7877 
7878 /// parseDirectiveThumbFunc
7879 /// ::= .thumbfunc symbol_name
7880 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7881  const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
7882  bool isMachO = MAI->hasSubsectionsViaSymbols();
7883 
7884  // Darwin asm has (optionally) function name after .thumb_func direction
7885  // ELF doesn't
7886  if (isMachO) {
7887  const AsmToken &Tok = Parser.getTok();
7888  if (Tok.isNot(AsmToken::EndOfStatement)) {
7890  return Error(L, "unexpected token in .thumb_func directive");
7891  MCSymbol *Func =
7892  getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
7893  getParser().getStreamer().EmitThumbFunc(Func);
7894  Parser.Lex(); // Consume the identifier token.
7895  return false;
7896  }
7897  }
7898 
7899  if (getLexer().isNot(AsmToken::EndOfStatement))
7900  return Error(L, "unexpected token in directive");
7901 
7902  NextSymbolIsThumb = true;
7903 
7904  return false;
7905 }
7906 
7907 /// parseDirectiveSyntax
7908 /// ::= .syntax unified | divided
7909 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7910  const AsmToken &Tok = Parser.getTok();
7911  if (Tok.isNot(AsmToken::Identifier))
7912  return Error(L, "unexpected token in .syntax directive");
7913  StringRef Mode = Tok.getString();
7914  if (Mode == "unified" || Mode == "UNIFIED")
7915  Parser.Lex();
7916  else if (Mode == "divided" || Mode == "DIVIDED")
7917  return Error(L, "'.syntax divided' arm asssembly not supported");
7918  else
7919  return Error(L, "unrecognized syntax mode in .syntax directive");
7920 
7921  if (getLexer().isNot(AsmToken::EndOfStatement))
7922  return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7923  Parser.Lex();
7924 
7925  // TODO tell the MC streamer the mode
7926  // getParser().getStreamer().Emit???();
7927  return false;
7928 }
7929 
7930 /// parseDirectiveCode
7931 /// ::= .code 16 | 32
7932 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7933  const AsmToken &Tok = Parser.getTok();
7934  if (Tok.isNot(AsmToken::Integer))
7935  return Error(L, "unexpected token in .code directive");
7936  int64_t Val = Parser.getTok().getIntVal();
7937  if (Val == 16)
7938  Parser.Lex();
7939  else if (Val == 32)
7940  Parser.Lex();
7941  else
7942  return Error(L, "invalid operand to .code directive");
7943 
7944  if (getLexer().isNot(AsmToken::EndOfStatement))
7945  return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7946  Parser.Lex();
7947 
7948  if (Val == 16) {
7949  if (!hasThumb())
7950  return Error(L, "target does not support Thumb mode");
7951 
7952  if (!isThumb())
7953  SwitchMode();
7954  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7955  } else {
7956  if (!hasARM())
7957  return Error(L, "target does not support ARM mode");
7958 
7959  if (isThumb())
7960  SwitchMode();
7961  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7962  }
7963 
7964  return false;
7965 }
7966 
7967 /// parseDirectiveReq
7968 /// ::= name .req registername
7969 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7970  Parser.Lex(); // Eat the '.req' token.
7971  unsigned Reg;
7972  SMLoc SRegLoc, ERegLoc;
7973  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7974  Parser.eatToEndOfStatement();
7975  return Error(SRegLoc, "register name expected");
7976  }
7977 
7978  // Shouldn't be anything else.
7979  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7980  Parser.eatToEndOfStatement();
7981  return Error(Parser.getTok().getLoc(),
7982  "unexpected input in .req directive.");
7983  }
7984 
7985  Parser.Lex(); // Consume the EndOfStatement
7986 
7987  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7988  return Error(SRegLoc, "redefinition of '" + Name +
7989  "' does not match original.");
7990 
7991  return false;
7992 }
7993 
7994 /// parseDirectiveUneq
7995 /// ::= .unreq registername
7996 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7997  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7998  Parser.eatToEndOfStatement();
7999  return Error(L, "unexpected input in .unreq directive.");
8000  }
8001  RegisterReqs.erase(Parser.getTok().getIdentifier());
8002  Parser.Lex(); // Eat the identifier.
8003  return false;
8004 }
8005 
8006 /// parseDirectiveArch
8007 /// ::= .arch token
8008 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
8009  return true;
8010 }
8011 
8012 /// parseDirectiveEabiAttr
8013 /// ::= .eabi_attribute int, int
8014 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
8015  if (Parser.getTok().isNot(AsmToken::Integer))
8016  return Error(L, "integer expected");
8017  int64_t Tag = Parser.getTok().getIntVal();
8018  Parser.Lex(); // eat tag integer
8019 
8020  if (Parser.getTok().isNot(AsmToken::Comma))
8021  return Error(L, "comma expected");
8022  Parser.Lex(); // skip comma
8023 
8024  L = Parser.getTok().getLoc();
8025  if (Parser.getTok().isNot(AsmToken::Integer))
8026  return Error(L, "integer expected");
8027  int64_t Value = Parser.getTok().getIntVal();
8028  Parser.Lex(); // eat value integer
8029 
8030  getTargetStreamer().emitAttribute(Tag, Value);
8031  return false;
8032 }
8033 
8034 /// parseDirectiveCPU
8035 /// ::= .cpu str
8036 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
8037  StringRef CPU = getParser().parseStringToEndOfStatement().trim();
8038  getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
8039  return false;
8040 }
8041 
8042 /// parseDirectiveFPU
8043 /// ::= .fpu str
8044 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
8045  StringRef FPU = getParser().parseStringToEndOfStatement().trim();
8046 
8047  unsigned ID = StringSwitch<unsigned>(FPU)
8048 #define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID)
8049 #include "ARMFPUName.def"
8050  .Default(ARM::INVALID_FPU);
8051 
8052  if (ID == ARM::INVALID_FPU)
8053  return Error(L, "Unknown FPU name");
8054 
8055  getTargetStreamer().emitFPU(ID);
8056  return false;
8057 }
8058 
8059 /// parseDirectiveFnStart
8060 /// ::= .fnstart
8061 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
8062  if (FnStartLoc.isValid()) {
8063  Error(L, ".fnstart starts before the end of previous one");
8064  Error(FnStartLoc, "previous .fnstart starts here");
8065  return true;
8066  }
8067 
8068  FnStartLoc = L;
8069  getTargetStreamer().emitFnStart();
8070  return false;
8071 }
8072 
8073 /// parseDirectiveFnEnd
8074 /// ::= .fnend
8075 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
8076  // Check the ordering of unwind directives
8077  if (!FnStartLoc.isValid())
8078  return Error(L, ".fnstart must precede .fnend directive");
8079 
8080  // Reset the unwind directives parser state
8081  resetUnwindDirectiveParserState();
8082  getTargetStreamer().emitFnEnd();
8083  return false;
8084 }
8085 
8086 /// parseDirectiveCantUnwind
8087 /// ::= .cantunwind
8088 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
8089  // Check the ordering of unwind directives
8090  CantUnwindLoc = L;
8091  if (!FnStartLoc.isValid())
8092  return Error(L, ".fnstart must precede .cantunwind directive");
8093  if (HandlerDataLoc.isValid()) {
8094  Error(L, ".cantunwind can't be used with .handlerdata directive");
8095  Error(HandlerDataLoc, ".handlerdata was specified here");
8096  return true;
8097  }
8098  if (PersonalityLoc.isValid()) {
8099  Error(L, ".cantunwind can't be used with .personality directive");
8100  Error(PersonalityLoc, ".personality was specified here");
8101  return true;
8102  }
8103 
8104  getTargetStreamer().emitCantUnwind();
8105  return false;
8106 }
8107 
8108 /// parseDirectivePersonality
8109 /// ::= .personality name
8110 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
8111  // Check the ordering of unwind directives
8112  PersonalityLoc = L;
8113  if (!FnStartLoc.isValid())
8114  return Error(L, ".fnstart must precede .personality directive");
8115  if (CantUnwindLoc.isValid()) {
8116  Error(L, ".personality can't be used with .cantunwind directive");
8117  Error(CantUnwindLoc, ".cantunwind was specified here");
8118  return true;
8119  }
8120  if (HandlerDataLoc.isValid()) {
8121  Error(L, ".personality must precede .handlerdata directive");
8122  Error(HandlerDataLoc, ".handlerdata was specified here");
8123  return true;
8124  }
8125 
8126  // Parse the name of the personality routine
8127  if (Parser.getTok().isNot(AsmToken::Identifier)) {
8128  Parser.eatToEndOfStatement();
8129  return Error(L, "unexpected input in .personality directive.");
8130  }
8131  StringRef Name(Parser.getTok().getIdentifier());
8132  Parser.Lex();
8133 
8134  MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
8135  getTargetStreamer().emitPersonality(PR);
8136  return false;
8137 }
8138 
8139 /// parseDirectiveHandlerData
8140 /// ::= .handlerdata
8141 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
8142  // Check the ordering of unwind directives
8143  HandlerDataLoc = L;
8144  if (!FnStartLoc.isValid())
8145  return Error(L, ".fnstart must precede .personality directive");
8146  if (CantUnwindLoc.isValid()) {
8147  Error(L, ".handlerdata can't be used with .cantunwind directive");
8148  Error(CantUnwindLoc, ".cantunwind was specified here");
8149  return true;
8150  }
8151 
8152  getTargetStreamer().emitHandlerData();
8153  return false;
8154 }
8155 
8156 /// parseDirectiveSetFP
8157 /// ::= .setfp fpreg, spreg [, offset]
8158 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
8159  // Check the ordering of unwind directives
8160  if (!FnStartLoc.isValid())
8161  return Error(L, ".fnstart must precede .setfp directive");
8162  if (HandlerDataLoc.isValid())
8163  return Error(L, ".setfp must precede .handlerdata directive");
8164 
8165  // Parse fpreg
8166  SMLoc NewFPRegLoc = Parser.getTok().getLoc();
8167  int NewFPReg = tryParseRegister();
8168  if (NewFPReg == -1)
8169  return Error(NewFPRegLoc, "frame pointer register expected");
8170 
8171  // Consume comma
8172  if (!Parser.getTok().is(AsmToken::Comma))
8173  return Error(Parser.getTok().getLoc(), "comma expected");
8174  Parser.Lex(); // skip comma
8175 
8176  // Parse spreg
8177  SMLoc NewSPRegLoc = Parser.getTok().getLoc();
8178  int NewSPReg = tryParseRegister();
8179  if (NewSPReg == -1)
8180  return Error(NewSPRegLoc, "stack pointer register expected");
8181 
8182  if (NewSPReg != ARM::SP && NewSPReg != FPReg)
8183  return Error(NewSPRegLoc,
8184  "register should be either $sp or the latest fp register");
8185 
8186  // Update the frame pointer register
8187  FPReg = NewFPReg;
8188 
8189  // Parse offset
8190  int64_t Offset = 0;
8191  if (Parser.getTok().is(AsmToken::Comma)) {
8192  Parser.Lex(); // skip comma
8193 
8194  if (Parser.getTok().isNot(AsmToken::Hash) &&
8195  Parser.getTok().isNot(AsmToken::Dollar)) {
8196  return Error(Parser.getTok().getLoc(), "'#' expected");
8197  }
8198  Parser.Lex(); // skip hash token.
8199 
8200  const MCExpr *OffsetExpr;
8201  SMLoc ExLoc = Parser.getTok().getLoc();
8202  SMLoc EndLoc;
8203  if (getParser().parseExpression(OffsetExpr, EndLoc))
8204  return Error(ExLoc, "malformed setfp offset");
8205  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8206  if (!CE)
8207  return Error(ExLoc, "setfp offset must be an immediate");
8208 
8209  Offset = CE->getValue();
8210  }
8211 
8212  getTargetStreamer().emitSetFP(static_cast<unsigned>(NewFPReg),
8213  static_cast<unsigned>(NewSPReg), Offset);
8214  return false;
8215 }
8216 
8217 /// parseDirective
8218 /// ::= .pad offset
8219 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
8220  // Check the ordering of unwind directives
8221  if (!FnStartLoc.isValid())
8222  return Error(L, ".fnstart must precede .pad directive");
8223  if (HandlerDataLoc.isValid())
8224  return Error(L, ".pad must precede .handlerdata directive");
8225 
8226  // Parse the offset
8227  if (Parser.getTok().isNot(AsmToken::Hash) &&
8228  Parser.getTok().isNot(AsmToken::Dollar)) {
8229  return Error(Parser.getTok().getLoc(), "'#' expected");
8230  }
8231  Parser.Lex(); // skip hash token.
8232 
8233  const MCExpr *OffsetExpr;
8234  SMLoc ExLoc = Parser.getTok().getLoc();
8235  SMLoc EndLoc;
8236  if (getParser().parseExpression(OffsetExpr, EndLoc))
8237  return Error(ExLoc, "malformed pad offset");
8238  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8239  if (!CE)
8240  return Error(ExLoc, "pad offset must be an immediate");
8241 
8242  getTargetStreamer().emitPad(CE->getValue());
8243  return false;
8244 }
8245 
8246 /// parseDirectiveRegSave
8247 /// ::= .save { registers }
8248 /// ::= .vsave { registers }
8249 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
8250  // Check the ordering of unwind directives
8251  if (!FnStartLoc.isValid())
8252  return Error(L, ".fnstart must precede .save or .vsave directives");
8253  if (HandlerDataLoc.isValid())
8254  return Error(L, ".save or .vsave must precede .handlerdata directive");
8255 
8256  // RAII object to make sure parsed operands are deleted.
8257  struct CleanupObject {
8259  ~CleanupObject() {
8260  for (unsigned I = 0, E = Operands.size(); I != E; ++I)
8261  delete Operands[I];
8262  }
8263  } CO;
8264 
8265  // Parse the register list
8266  if (parseRegisterList(CO.Operands))
8267  return true;
8268  ARMOperand *Op = (ARMOperand*)CO.Operands[0];
8269  if (!IsVector && !Op->isRegList())
8270  return Error(L, ".save expects GPR registers");
8271  if (IsVector && !Op->isDPRRegList())
8272  return Error(L, ".vsave expects DPR registers");
8273 
8274  getTargetStreamer().emitRegSave(Op->getRegList(), IsVector);
8275  return false;
8276 }
8277 
8278 /// Force static initialization.
8279 extern "C" void LLVMInitializeARMAsmParser() {
8282 }
8283 
8284 #define GET_REGISTER_MATCHER
8285 #define GET_SUBTARGET_FEATURE_NAME
8286 #define GET_MATCHER_IMPLEMENTATION
8287 #include "ARMGenAsmMatcher.inc"
8288 
8289 // Define this matcher function after the auto-generated include so we
8290 // have the match class enum definitions.
8291 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
8292  unsigned Kind) {
8293  ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
8294  // If the kind is a token for a literal immediate, check if our asm
8295  // operand matches. This is for InstAliases which have a fixed-value
8296  // immediate in the syntax.
8297  if (Kind == MCK__35_0 && Op->isImm()) {
8298  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
8299  if (!CE)
8300  return Match_InvalidOperand;
8301  if (CE->getValue() == 0)
8302  return Match_Success;
8303  }
8304  return Match_InvalidOperand;
8305 }
static bool isReg(const MCInst &MI, unsigned OpNo)
iterator begin()
Definition: MCInst.h:175
#define R4(n)
const char * getPointer() const
Definition: SMLoc.h:33
size_t size() const
size - Get the string size.
Definition: StringRef.h:113
static MCOperand CreateReg(unsigned Reg)
Definition: MCInst.h:111
static const fltSemantics IEEEdouble
Definition: APFloat.h:133
static const MCConstantExpr * Create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:152
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser. The extension should use the AsmParser i...
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g. ARM instructions which can set condition cod...
Definition: MCInstrDesc.h:215
An abstraction for memory operations.
Definition: Memory.h:45
MCTargetAsmParser - Generic interface to target specific assembly parsers.
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:266
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
size_t find(char C, size_t From=0) const
Definition: StringRef.h:233
static unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
bool endswith(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:217
StringRef getString() const
Definition: MCAsmLexer.h:95
static MCOperand CreateExpr(const MCExpr *Val)
Definition: MCInst.h:129
StringRef substr(size_t Start, size_t N=npos) const
Definition: StringRef.h:392
iterator find(StringRef Key)
Definition: StringMap.h:291
iterator insert(iterator I, const T &Elt)
Definition: SmallVector.h:537
bool isNot(TokenKind K) const
Definition: MCAsmLexer.h:69
virtual void EmitInstruction(const MCInst &Inst)=0
static const char * getShiftOpcStr(ShiftOpc Op)
#define R2(n)
StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:55
static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features, unsigned VariantID)
static bool instIsBreakpoint(const MCInst &Inst)
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
Definition: SmallVector.h:430
#define llvm_unreachable(msg)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
static unsigned MatchRegisterName(StringRef Name)
AsmToken - Target independent representation for an assembler token.
Definition: MCAsmLexer.h:21
static unsigned getDRegFromQReg(unsigned QReg)
ID
LLVM Calling Convention Representation.
Definition: CallingConv.h:26
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution. It may be set to 'al...
Definition: MCInstrDesc.h:310
.code16 (X86) / .code 16 (ARM)
Definition: MCDirectives.h:50
unsigned getReg() const
getReg - Returns the register number.
Definition: MCInst.h:63
const char * data() const
Definition: StringRef.h:107
enable_if_c< std::numeric_limits< T >::is_integer &&!std::numeric_limits< T >::is_signed, std::size_t >::type countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:49
static int getT2SOImmVal(unsigned Arg)
static int getFP32Imm(const APInt &Imm)
int64_t getIntVal() const
Definition: MCAsmLexer.h:100
MCRegisterClass - Base class of TargetRegisterClass.
iterator insert(iterator I, const MCOperand &Op)
Definition: MCInst.h:177
static bool containsReg(ArrayRef< unsigned > RegUnits, unsigned RegUnit)
Convenient wrapper for checking membership in RegisterOperands. (std::count() doesn't have an early e...
unsigned short NumOperands
Definition: MCInstrDesc.h:140
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
void setImm(int64_t Val)
Definition: MCInst.h:78
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:42
#define true
Definition: ConvertUTF.c:65
void array_pod_sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:289
static unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
Target TheThumbTarget
static const char * InstSyncBOptToString(unsigned val)
Definition: ARMBaseInfo.h:184
StringRef trim(StringRef Chars=" \t\n\v\f\r") const
Definition: StringRef.h:510
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate...
Definition: MCInstrDesc.h:583
int64_t getValue() const
Definition: MCExpr.h:126
bool isOptionalDef() const
Definition: MCInstrDesc.h:87
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value. See class MCOperandInfo.
const MCInstrInfo & MII
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg)
iterator erase(iterator I)
Definition: SmallVector.h:478
void setLoc(SMLoc loc)
Definition: MCInst.h:160
static const ARMMCExpr * Create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Definition: ARMMCExpr.cpp:17
const MCInstrDesc & get(unsigned Opcode) const
Definition: MCInstrInfo.h:48
static unsigned getNextRegister(unsigned Reg)
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Definition: ARMBaseInfo.h:68
void setOpcode(unsigned Op)
Definition: MCInst.h:157
bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:208
#define R6(n)
bool contains(unsigned Reg) const
static bool isDataTypeToken(StringRef Tok)
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:126
Target TheARMTarget
bool is(TokenKind K) const
Definition: MCAsmLexer.h:68
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
R Default(const T &Value) const
Definition: StringSwitch.h:111
unsigned getOpcode() const
Definition: MCInst.h:158
Class for arbitrary precision integers.
Definition: APInt.h:75
VectorLaneTy
static const char * MemBOptToString(unsigned val, bool HasV8)
Definition: ARMBaseInfo.h:141
static unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
int64_t getImm() const
Definition: MCInst.h:74
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
static const char * IFlagsToString(unsigned val)
Definition: ARMBaseInfo.h:101
static int getSOImmVal(unsigned Arg)
.code32 (X86) / .code 32 (ARM)
Definition: MCDirectives.h:51
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:35
static bool isARMLowRegister(unsigned Reg)
Definition: ARMBaseInfo.h:209
static CondCodes getOppositeCondition(CondCodes CC)
Definition: ARMBaseInfo.h:47
StringRef getIdentifier() const
Definition: MCAsmLexer.h:84
static const fltSemantics IEEEsingle
Definition: APFloat.h:132
static const size_t npos
Definition: StringRef.h:45
bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:135
void LLVMInitializeARMAsmParser()
Force static initialization.
static MCOperand CreateImm(int64_t Val)
Definition: MCInst.h:117
unsigned getNumOperands() const
Definition: MCInst.h:165
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isMem(const MachineInstr *MI, unsigned Op)
Definition: X86InstrInfo.h:123
#define ARM_FPU_NAME(NAME, ID)
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
LLVM Value Representation.
Definition: Value.h:66
static unsigned getSORegOffset(unsigned Op)
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
bool hasSubsectionsViaSymbols() const
Definition: MCAsmInfo.h:343
bool isFPImm(const APFloat &Val, uint32_t &Imm8Bits)
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:148
void addOperand(const MCOperand &Op)
Definition: MCInst.h:167
bool isV8EligibleForIT(InstrType *Instr, int BLXOperandIndex=0)
Definition: ARMFeatures.h:22
const MCRegisterInfo & MRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
StringRef slice(size_t Start, size_t End) const
Definition: StringRef.h:421
Represents a location in source code.
Definition: SMLoc.h:23
static float getFPImmFloat(unsigned Imm)
static RegisterPass< NVPTXAllocaHoisting > X("alloca-hoisting","Hoisting alloca instructions in non-entry ""blocks to the entry block")
static const char * getSubtargetFeatureName(unsigned Val)
std::string lower() const
Definition: StringRef.cpp:118
iterator end()
Definition: StringMap.h:281
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:163
static ShiftOpc getSORegShOp(unsigned Op)
bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:110