15 #define DEBUG_TYPE "aarch64-isel"
379 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
380 AArch64::LDXR_word, AArch64::LDXR_dword};
381 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
382 AArch64::LDAXR_word, AArch64::LDAXR_dword};
383 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
384 AArch64::STXR_word, AArch64::STXR_dword};
385 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
386 AArch64::STLXR_word, AArch64::STLXR_dword};
388 const unsigned *LoadOps, *StoreOps;
395 StoreOps = StoreRels;
397 StoreOps = StoreBares;
400 "unsupported size for atomic binary op!");
402 LdrOpc = LoadOps[
Log2_32(Size)];
403 StrOpc = StoreOps[
Log2_32(Size)];
409 unsigned BinOpcode)
const {
426 unsigned ldrOpc, strOpc;
441 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
456 BuildMI(BB, dl, TII->
get(ldrOpc), dest).addReg(ptr);
462 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
476 .addReg(stxr_status).
addMBB(loopMBB);
508 unsigned oldval = dest;
514 TRC = &AArch64::GPR64RegClass;
515 TRCsp = &AArch64::GPR64xspRegClass;
517 TRC = &AArch64::GPR32RegClass;
518 TRCsp = &AArch64::GPR32wspRegClass;
521 unsigned ldrOpc, strOpc;
551 BuildMI(BB, dl, TII->
get(ldrOpc), dest).addReg(ptr);
558 BuildMI(BB, dl, TII->
get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
565 BuildMI(BB, dl, TII->
get(strOpc), stxr_status)
566 .addReg(scratch).
addReg(ptr);
568 .addReg(stxr_status).
addMBB(loopMBB);
585 unsigned Size)
const {
596 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
598 unsigned ldrOpc, strOpc;
629 BuildMI(BB, dl, TII->
get(ldrOpc), dest).addReg(ptr);
631 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
649 .addReg(stxr_status).
addMBB(loop1MBB);
719 BuildMI(MBB, DL, TII->
get(AArch64::LSFP128_STR))
739 BuildMI(TrueBB, DL, TII->
get(AArch64::LSFP128_STR))
752 BuildMI(*EndBB, StartOfEnd, DL, TII->
get(AArch64::LSFP128_LDR), DestReg)
753 .addFrameIndex(ScratchFI)
765 case AArch64::F128CSEL:
767 case AArch64::ATOMIC_LOAD_ADD_I8:
769 case AArch64::ATOMIC_LOAD_ADD_I16:
771 case AArch64::ATOMIC_LOAD_ADD_I32:
773 case AArch64::ATOMIC_LOAD_ADD_I64:
776 case AArch64::ATOMIC_LOAD_SUB_I8:
778 case AArch64::ATOMIC_LOAD_SUB_I16:
780 case AArch64::ATOMIC_LOAD_SUB_I32:
782 case AArch64::ATOMIC_LOAD_SUB_I64:
785 case AArch64::ATOMIC_LOAD_AND_I8:
787 case AArch64::ATOMIC_LOAD_AND_I16:
789 case AArch64::ATOMIC_LOAD_AND_I32:
791 case AArch64::ATOMIC_LOAD_AND_I64:
794 case AArch64::ATOMIC_LOAD_OR_I8:
796 case AArch64::ATOMIC_LOAD_OR_I16:
798 case AArch64::ATOMIC_LOAD_OR_I32:
800 case AArch64::ATOMIC_LOAD_OR_I64:
803 case AArch64::ATOMIC_LOAD_XOR_I8:
805 case AArch64::ATOMIC_LOAD_XOR_I16:
807 case AArch64::ATOMIC_LOAD_XOR_I32:
809 case AArch64::ATOMIC_LOAD_XOR_I64:
812 case AArch64::ATOMIC_LOAD_NAND_I8:
814 case AArch64::ATOMIC_LOAD_NAND_I16:
816 case AArch64::ATOMIC_LOAD_NAND_I32:
818 case AArch64::ATOMIC_LOAD_NAND_I64:
821 case AArch64::ATOMIC_LOAD_MIN_I8:
823 case AArch64::ATOMIC_LOAD_MIN_I16:
825 case AArch64::ATOMIC_LOAD_MIN_I32:
827 case AArch64::ATOMIC_LOAD_MIN_I64:
830 case AArch64::ATOMIC_LOAD_MAX_I8:
832 case AArch64::ATOMIC_LOAD_MAX_I16:
834 case AArch64::ATOMIC_LOAD_MAX_I32:
836 case AArch64::ATOMIC_LOAD_MAX_I64:
839 case AArch64::ATOMIC_LOAD_UMIN_I8:
841 case AArch64::ATOMIC_LOAD_UMIN_I16:
843 case AArch64::ATOMIC_LOAD_UMIN_I32:
845 case AArch64::ATOMIC_LOAD_UMIN_I64:
848 case AArch64::ATOMIC_LOAD_UMAX_I8:
850 case AArch64::ATOMIC_LOAD_UMAX_I16:
852 case AArch64::ATOMIC_LOAD_UMAX_I32:
854 case AArch64::ATOMIC_LOAD_UMAX_I64:
857 case AArch64::ATOMIC_SWAP_I8:
859 case AArch64::ATOMIC_SWAP_I16:
861 case AArch64::ATOMIC_SWAP_I32:
863 case AArch64::ATOMIC_SWAP_I64:
866 case AArch64::ATOMIC_CMP_SWAP_I8:
868 case AArch64::ATOMIC_CMP_SWAP_I16:
870 case AArch64::ATOMIC_CMP_SWAP_I32:
872 case AArch64::ATOMIC_CMP_SWAP_I64:
897 return "AArch64ISD::NEON_BSL";
899 return "AArch64ISD::NEON_MOVIMM";
901 return "AArch64ISD::NEON_MVNIMM";
903 return "AArch64ISD::NEON_FMOVIMM";
905 return "AArch64ISD::NEON_CMP";
907 return "AArch64ISD::NEON_CMPZ";
909 return "AArch64ISD::NEON_TST";
911 return "AArch64ISD::NEON_QSHLs";
913 return "AArch64ISD::NEON_QSHLu";
915 return "AArch64ISD::NEON_VDUP";
917 return "AArch64ISD::NEON_VDUPLANE";
919 return "AArch64ISD::NEON_REV16";
921 return "AArch64ISD::NEON_REV32";
923 return "AArch64ISD::NEON_REV64";
925 return "AArch64ISD::NEON_UZP1";
927 return "AArch64ISD::NEON_UZP2";
929 return "AArch64ISD::NEON_ZIP1";
931 return "AArch64ISD::NEON_ZIP2";
933 return "AArch64ISD::NEON_TRN1";
935 return "AArch64ISD::NEON_TRN2";
937 return "AArch64ISD::NEON_LD1_UPD";
939 return "AArch64ISD::NEON_LD2_UPD";
941 return "AArch64ISD::NEON_LD3_UPD";
943 return "AArch64ISD::NEON_LD4_UPD";
945 return "AArch64ISD::NEON_ST1_UPD";
947 return "AArch64ISD::NEON_ST2_UPD";
949 return "AArch64ISD::NEON_ST3_UPD";
951 return "AArch64ISD::NEON_ST4_UPD";
953 return "AArch64ISD::NEON_LD1x2_UPD";
955 return "AArch64ISD::NEON_LD1x3_UPD";
957 return "AArch64ISD::NEON_LD1x4_UPD";
959 return "AArch64ISD::NEON_ST1x2_UPD";
961 return "AArch64ISD::NEON_ST1x3_UPD";
963 return "AArch64ISD::NEON_ST1x4_UPD";
965 return "AArch64ISD::NEON_LD2DUP";
967 return "AArch64ISD::NEON_LD3DUP";
969 return "AArch64ISD::NEON_LD4DUP";
971 return "AArch64ISD::NEON_LD2DUP_UPD";
973 return "AArch64ISD::NEON_LD3DUP_UPD";
975 return "AArch64ISD::NEON_LD4DUP_UPD";
977 return "AArch64ISD::NEON_LD2LN_UPD";
979 return "AArch64ISD::NEON_LD3LN_UPD";
981 return "AArch64ISD::NEON_LD4LN_UPD";
983 return "AArch64ISD::NEON_ST2LN_UPD";
985 return "AArch64ISD::NEON_ST3LN_UPD";
987 return "AArch64ISD::NEON_ST4LN_UPD";
989 return "AArch64ISD::NEON_VEXTRACT";
996 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
997 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
1002 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
1003 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
1019 #include "AArch64GenCallingConv.inc"
1046 unsigned GPRSaveSize = 8 * (
NumArgRegs - FirstVariadicGPR);
1048 if (GPRSaveSize != 0) {
1053 for (
unsigned i = FirstVariadicGPR; i <
NumArgRegs; ++i) {
1059 MemOps.push_back(Store);
1066 unsigned FPRSaveSize = 16 * (
NumFPRArgRegs - FirstVariadicFPR);
1071 if (FPRSaveSize != 0) {
1076 for (
unsigned i = FirstVariadicFPR; i <
NumFPRArgRegs; ++i) {
1078 &AArch64::FPR128RegClass);
1083 MemOps.push_back(Store);
1098 if (!MemOps.empty()) {
1125 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1134 unsigned NumRegs = (Size + 7) / 8;
1136 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1158 false,
false,
false, 0);
1173 unsigned DestSubReg;
1176 case 8: DestSubReg = AArch64::sub_8;
break;
1177 case 16: DestSubReg = AArch64::sub_16;
break;
1178 case 32: DestSubReg = AArch64::sub_32;
break;
1179 case 64: DestSubReg = AArch64::sub_64;
break;
1197 unsigned StackArgSize = CCInfo.getNextStackOffset();
1238 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
1253 assert(VA.
isRegLoc() &&
"Only register-returns should be created by PCS");
1290 &RetOps[0], RetOps.
size());
1311 bool IsStructRet = !Outs.
empty() && Outs[0].Flags.isSRet();
1312 bool IsSibCall =
false;
1317 Outs, OutVals,
Ins, DAG);
1321 if (!TailCallOpt && IsTailCall)
1346 if (IsTailCall && !IsSibCall) {
1352 FPDiff = NumReusableBytes - NumBytes;
1359 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
1372 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1391 case 8: SrcSubReg = AArch64::sub_8;
break;
1392 case 16: SrcSubReg = AArch64::sub_16;
break;
1393 case 32: SrcSubReg = AArch64::sub_32;
break;
1394 case 64: SrcSubReg = AArch64::sub_64;
break;
1420 assert(VA.
isMemLoc() &&
"unexpected argument location");
1427 OpSize = (OpSize + 7) / 8;
1463 if (!MemOpChains.
empty())
1465 &MemOpChains[0], MemOpChains.
size());
1472 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
1473 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1474 RegsToPass[i].second, InFlag);
1485 const char *Sym = S->getSymbol();
1493 if (IsTailCall && !IsSibCall) {
1505 std::vector<SDValue> Ops;
1506 Ops.push_back(Chain);
1507 Ops.push_back(Callee);
1516 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
1517 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
1518 RegsToPass[i].second.getValueType()));
1525 assert(Mask &&
"Missing call preserved mask for calling convention");
1530 Ops.push_back(InFlag);
1544 uint64_t CalleePopBytes
1554 IsVarArg, Ins, dl, DAG, InVals);
1569 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1574 assert(VA.
isRegLoc() &&
"Memory locations not expected for call return");
1606 bool IsCalleeStructRet,
1607 bool IsCallerStructRet,
1622 bool CCMatch = CallerCC == CalleeCC;
1628 e = CallerF->
arg_end(); i != e; ++i)
1629 if (i->hasByValAttr())
1645 &&
"Unexpected variadic calling convention");
1647 if (IsVarArg && !Outs.
empty()) {
1659 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i)
1660 if (!ArgLocs[i].isRegLoc())
1677 if (RVLocs1.
size() != RVLocs2.
size())
1679 for (
unsigned i = 0, e = RVLocs1.
size(); i != e; ++i) {
1680 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1682 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1684 if (RVLocs1[i].isRegLoc()) {
1685 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1688 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1713 bool TailCallOpt)
const {
1724 int ClobberedFI)
const {
1727 int64_t LastByte = FirstByte + MFI->
getObjectSize(ClobberedFI) - 1;
1737 if (
LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1739 if (FI->getIndex() < 0) {
1741 int64_t InLastByte = InFirstByte;
1744 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1745 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1751 &ArgChains[0], ArgChains.
size());
1778 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1786 EVT VT = RHSC->getValueType(0);
1787 bool knownInvalid =
false;
1792 C = RHSC->getSExtValue();
1793 }
else if (RHSC->getZExtValue() > INT64_MAX) {
1796 knownInvalid =
true;
1798 C = RHSC->getZExtValue();
1878 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1957 Chain, CmpOp, A64cc, DestBB);
1969 Chain, SetCC, A64cc, DestBB);
1974 A64BR_CC, SetCC, A64cc, DestBB);
1992 Args.push_back(Entry);
2015 Callee, Args, DAG,
SDLoc(Op));
2016 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
2018 if (!CallInfo.second.getNode())
2022 return CallInfo.first;
2037 false,
SDLoc(Op)).first;
2052 bool IsSigned)
const {
2074 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2096 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2097 unsigned FrameReg = AArch64::X29;
2102 false,
false,
false, 0);
2142 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM ==
Reloc::Static) {
2165 if (Alignment == 0) {
2166 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2178 unsigned char HiFixup, LoFixup;
2254 std::vector<SDValue> Ops;
2255 Ops.push_back(Chain);
2256 Ops.push_back(Func);
2257 Ops.push_back(SymAddr);
2258 Ops.push_back(DAG.
getRegister(AArch64::X0, PtrVT));
2260 Ops.push_back(Glue);
2275 assert(getSubtarget()->isTargetELF() &&
2276 "TLS not implemented for non-ELF targets");
2278 &&
"TLS only supported in small memory model");
2367 bool IsSigned)
const {
2441 CmpOp, IfTrue, IfFalse, A64cc);
2454 SetCC, IfTrue, IfFalse, A64cc);
2459 SetCC, IfTrue, A64SELECT_CC, A64cc);
2463 return A64SELECT_CC;
2484 A64CMP, IfTrue, IfFalse,
2494 bool Invert =
false;
2577 NeonCmp = DAG.
getNOT(DL, NeonCmp, VT);
2607 bool SwapIfRegArgs =
false;
2625 SwapIfRegArgs =
true;
2634 SwapIfRegArgs =
true;
2643 SwapIfRegArgs =
true;
2652 SwapIfRegArgs =
true;
2665 SwapIfRegArgs =
true;
2674 SwapIfRegArgs =
true;
2690 NeonCmp = DAG.
getNOT(DL, NeonCmp, VT);
2715 "Unexpected setcc expansion!");
2750 return A64SELECT_CC;
2755 const Value *DestSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
2756 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
2880 unsigned &Imm,
unsigned &OpCmode) {
2881 switch (SplatBitSize) {
2887 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
2897 if ((SplatBits & ~0xff) == 0) {
2906 if ((SplatBits & ~0xff00) == 0) {
2911 Imm = SplatBits >> 8;
2924 if ((SplatBits & ~0xff) == 0) {
2933 if ((SplatBits & ~0xff00) == 0) {
2938 Imm = SplatBits >> 8;
2942 if ((SplatBits & ~0xff0000) == 0) {
2947 Imm = SplatBits >> 16;
2951 if ((SplatBits & ~0xff000000) == 0) {
2956 Imm = SplatBits >> 24;
2964 if ((SplatBits & ~0xffff) == 0 &&
2965 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
2969 Imm = SplatBits >> 8;
2973 if ((SplatBits & ~0xffffff) == 0 &&
2974 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
2978 Imm = SplatBits >> 16;
2992 uint64_t BitMask = 0xff;
2996 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
2997 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3000 }
else if ((SplatBits & BitMask) != 0) {
3040 if (!isa<ConstantSDNode>(Shift->
getOperand(1)))
3058 SDValue &MaskedVal, uint64_t Mask) {
3074 int64_t ShiftRightRequired = LSB;
3076 isa<ConstantSDNode>(MaskedVal.
getOperand(1))) {
3080 isa<ConstantSDNode>(MaskedVal.
getOperand(1))) {
3085 if (ShiftRightRequired > 0)
3088 else if (ShiftRightRequired < 0) {
3145 if (isa<ConstantSDNode>(LHS.getOperand(1)))
3163 if (LHSMask & RHSMask)
3169 if (
getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3179 int32_t LSB =
getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3184 assert(Width &&
"Expected non-zero bitfield width");
3187 LHS.getOperand(0), Bitfield,
3192 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.
getSizeInBits())))
3221 uint64_t ExistingMask = 0;
3222 bool Extended =
false;
3232 !isa<ConstantSDNode>(PossExtraMask.
getOperand(1)))
3238 if (ExtraMask & ExistingMask)
3248 &&
"Invalid types for BFI");
3254 if (PossExtraMask.
getOperand(0) != OldBFIVal)
3258 OldBFIVal, NewBFIVal,
3262 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.
getSizeInBits())))
3305 uint32_t ShiftLHS = 0;
3311 uint32_t ShiftRHS = 0;
3318 if (LHSFromHi == RHSFromHi)
3377 unsigned SplatBitSize;
3381 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3386 if (BVN1 && BVN1->
isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3388 !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
3424 if (!isa<ConstantSDNode>(Shift->
getOperand(1)))
3447 APInt SplatBits, SplatUndef;
3448 unsigned SplatBitSize;
3450 if (!BVN || !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3451 HasAnyUndefs, ElementBits) ||
3452 SplatBitSize > ElementBits)
3462 assert(VT.
isVector() &&
"vector shift count is not a vector type");
3466 return (Cnt >= 0 && Cnt < ElementBits);
3473 assert(VT.
isVector() &&
"vector shift count is not a vector type");
3477 return (Cnt >= 1 && Cnt <= ElementBits);
3494 assert(ST->
hasNEON() &&
"unexpected vector shift");
3526 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
3559 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
3567 UI.getUse().getResNo() != Addr.
getResNo())
3577 bool isLaneOp =
false;
3578 unsigned NewOpc = 0;
3579 unsigned NumVecs = 0;
3581 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
3593 NumVecs = 1; isLoad =
false;
break;
3595 NumVecs = 2; isLoad =
false;
break;
3597 NumVecs = 3; isLoad =
false;
break;
3599 NumVecs = 4; isLoad =
false;
break;
3607 NumVecs = 2; isLoad =
false;
break;
3609 NumVecs = 3; isLoad =
false;
break;
3611 NumVecs = 4; isLoad =
false;
break;
3613 NumVecs = 2; isLaneOp =
true;
break;
3615 NumVecs = 3; isLaneOp =
true;
break;
3617 NumVecs = 4; isLaneOp =
true;
break;
3619 NumVecs = 2; isLoad =
false; isLaneOp =
true;
break;
3621 NumVecs = 3; isLoad =
false; isLaneOp =
true;
break;
3623 NumVecs = 4; isLoad =
false; isLaneOp =
true;
break;
3651 uint32_t IncVal = CInc->getZExtValue();
3652 if (IncVal != NumBytes)
3659 unsigned NumResultVecs = (isLoad ? NumVecs : 0);
3661 for (n = 0; n < NumResultVecs; ++n)
3680 std::vector<SDValue> NewResults;
3681 for (
unsigned i = 0; i < NumResultVecs; ++i) {
3682 NewResults.push_back(
SDValue(UpdN.getNode(), i));
3684 NewResults.push_back(
SDValue(UpdN.getNode(), NumResultVecs + 1));
3704 unsigned NumVecs = 0;
3705 unsigned NewOpc = 0;
3706 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
3722 unsigned VLDLaneNo =
3723 cast<ConstantSDNode>(VLD->
getOperand(NumVecs + 3))->getZExtValue();
3727 if (UI.getUse().getResNo() == NumVecs)
3731 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
3738 for (n = 0; n < NumVecs; ++n)
3751 unsigned ResNo = UI.getUse().
getResNo();
3753 if (ResNo == NumVecs)
3761 std::vector<SDValue> VLDDupResults;
3762 for (
unsigned n = 0; n < NumVecs; ++n)
3763 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
3764 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
3791 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
3848 unsigned V0NumElts = 0;
3853 for (
unsigned i = 0; i < NumElts; ++i) {
3863 Mask[i] = (cast<ConstantSDNode>(Elt->
getOperand(1))->getZExtValue());
3865 }
else if (V1.
getNode() == 0) {
3869 unsigned Lane = cast<ConstantSDNode>(Elt->
getOperand(1))->getZExtValue();
3870 Mask[i] = (Lane + V0NumElts);
3877 if (!V1.
getNode() && V0NumElts == NumElts * 2) {
3885 if (V1.
getNode() && NumElts == V0NumElts &&
3904 APInt SplatBits, SplatUndef;
3905 unsigned SplatBitSize;
3916 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3917 if (SplatBitSize <= 64) {
3921 unsigned OpCmode = 0;
3931 ImmVal, OpCmodeVal);
3937 uint64_t NegatedImm = (~SplatBits).getZExtValue();
3945 ImmVal, OpCmodeVal);
3966 bool isOnlyLowElement =
true;
3967 bool usesOnlyOneValue =
true;
3968 bool hasDominantValue =
false;
3969 bool isConstant =
true;
3975 for (
unsigned i = 0; i < NumElts; ++i) {
3980 isOnlyLowElement =
false;
3981 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
3984 ValueCounts.
insert(std::make_pair(V, 0));
3985 unsigned &Count = ValueCounts[V];
3988 if (++Count > (NumElts / 2)) {
3989 hasDominantValue =
true;
3993 if (ValueCounts.
size() != 1)
3994 usesOnlyOneValue =
false;
3996 Value = ValueCounts.
begin()->first;
3998 if (ValueCounts.
size() == 0)
4008 if (hasDominantValue && EltSize <= 64) {
4023 if (!usesOnlyOneValue) {
4026 for (
unsigned I = 0;
I < NumElts; ++
I) {
4038 if (usesOnlyOneValue && isConstant) {
4059 if (!isConstant && !usesOnlyOneValue) {
4061 for (
unsigned i = 0 ; i < NumElts; ++i) {
4077 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4078 "Only possible block sizes for REV are: 16, 32, 64");
4085 unsigned BlockElts = M[0] + 1;
4088 BlockElts = BlockSize / EltSz;
4090 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4093 for (
unsigned i = 0; i < NumElts; ++i) {
4096 if ((
unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4110 bool ismatch =
true;
4113 for (
unsigned i = 0; i < NumElts; ++i) {
4114 if ((
unsigned)M[i] != i * 2) {
4124 for (
unsigned i = 0; i < NumElts; ++i) {
4125 if ((
unsigned)M[i] != i * 2 + 1) {
4135 for (
unsigned i = 0; i < NumElts; ++i) {
4136 if ((
unsigned)M[i] != i / 2 + NumElts * (i % 2)) {
4146 for (
unsigned i = 0; i < NumElts; ++i) {
4147 if ((
unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) {
4157 for (
unsigned i = 0; i < NumElts; ++i) {
4158 if ((
unsigned)M[i] != i + (NumElts - 1) * (i % 2)) {
4168 for (
unsigned i = 0; i < NumElts; ++i) {
4169 if ((
unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) {
4208 return DAG.
getNode(ISDNo, dl, VT, V1, V2);
4215 if (Lane == -1) Lane = 0;
4223 bool IsScalarToVector =
true;
4227 IsScalarToVector =
false;
4230 if (IsScalarToVector)
4238 int Length = ShuffleMask.size();
4244 if (V1EltNum == Length) {
4246 bool IsSequential =
true;
4247 int CurMask = ShuffleMask[0];
4248 for (
int I = 0;
I < Length; ++
I) {
4249 if (ShuffleMask[
I] != CurMask) {
4250 IsSequential =
false;
4256 assert((EltSize % 8 == 0) &&
"Bitsize of vector element is incorrect");
4257 unsigned VecSize = EltSize * V1EltNum;
4258 unsigned Index = (EltSize/8) * ShuffleMask[0];
4259 if (VecSize == 64 || VecSize == 128)
4277 for (
int I = 0;
I != Length; ++
I) {
4278 if (ShuffleMask[
I] !=
I) {
4283 for (
int I = 0;
I != Length; ++
I) {
4284 if (ShuffleMask[
I] != (
I + V1EltNum)) {
4295 if ((
int)NV1Elt.
size() != Length || (int)NV2Elt.
size() != Length) {
4296 if (NV1Elt.
size() > NV2Elt.
size()) {
4305 for (
int I = 0, E = InsMasks.
size();
I != E; ++
I) {
4307 int Mask = InsMasks[
I];
4308 if (Mask >= V1EltNum) {
4333 if (Constraint.size() == 1) {
4334 switch (Constraint[0]) {
4360 assert(Constraint !=
"Ump" && Constraint !=
"Utf" && Constraint !=
"Usa"
4361 && Constraint !=
"Ush" &&
"Unimplemented constraints");
4368 const char *Constraint)
const {
4375 std::string &Constraint,
4376 std::vector<SDValue> &Ops,
4381 if (Constraint.size() != 1)
return;
4386 switch(Constraint[0]) {
4388 case 'I':
case 'J':
case 'K':
case 'L':
4389 case 'M':
case 'N':
case 'Z': {
4397 switch (Constraint[0]) {
4427 GA->getValueType(0));
4429 = dyn_cast<BlockAddressSDNode>(Op)) {
4431 BA->getValueType(0));
4433 = dyn_cast<ExternalSymbolSDNode>(Op)) {
4435 ES->getValueType(0));
4442 if (CFP->isExactlyValue(0.0)) {
4451 Ops.push_back(Result);
4459 std::pair<unsigned, const TargetRegisterClass*>
4461 const std::string &Constraint,
4463 if (Constraint.size() == 1) {
4464 switch (Constraint[0]) {
4467 return std::make_pair(0U, &AArch64::GPR32RegClass);
4469 return std::make_pair(0U, &AArch64::GPR64RegClass);
4473 return std::make_pair(0U, &AArch64::FPR16RegClass);
4475 return std::make_pair(0U, &AArch64::FPR32RegClass);
4477 return std::make_pair(0U, &AArch64::FPR64RegClass);
4479 return std::make_pair(0U, &AArch64::FPR128RegClass);
4494 unsigned Intrinsic)
const {
4495 switch (Intrinsic) {
4513 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
4531 unsigned NumElts = 0;
4542 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
AArch64TargetLowering(AArch64TargetMachine &TM)
void setFrameAddressIsTaken(bool T)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void push_back(const T &Elt)
const MachineFunction * getParent() const
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
SDValue getValue(unsigned R) const
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * emitAtomicBinaryMinMax(MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned CmpOp, A64CC::CondCodes Cond) const
static SDValue tryCombineToLargerBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
LLVMContext * getContext() const
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL, SDValue &Chain) const
uint64_t getZExtValue() const
Get zero extended value.
void setArgumentStackToRestore(unsigned bytes)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, unsigned &LdrOpc, unsigned &StrOpc)
Reloc::Model getRelocationModel() const
bool isTargetLinux() const
LocInfo getLocInfo() const
static const fltSemantics IEEEdouble
static TargetLoweringObjectFile * createTLOF(AArch64TargetMachine &TM)
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC)
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const LLVM_OVERRIDE
static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
const TargetMachine & getTargetMachine() const
unsigned getAlignment() const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
bool isCalledByLegalizer() const
void setVariadicFPRSize(unsigned Size)
static SDValue PerformShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *ST)
Checks for immediate versions of vector shifts and lowers them.
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
int getSplatIndex() const
virtual const uint32_t * getCallPreservedMask(CallingConv::ID) const
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
virtual ConstraintType getConstraintType(const std::string &Constraint) const
Given a constraint, return the type of constraint it is for this target.
const GlobalValue * getGlobal() const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
unsigned getOpcode() const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
void addLiveIn(unsigned Reg)
Type * getTypeForEVT(LLVMContext &Context) const
unsigned getSizeInBits() const
unsigned getByValSize() const
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const
unsigned getNumOperands() const
void setBooleanVectorContents(BooleanContent Ty)
unsigned getNumOperands() const
static unsigned isPermuteMask(ArrayRef< int > M, EVT VT)
const SDValue & getOperand(unsigned Num) const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
const Function * getFunction() const
static MachinePointerInfo getConstantPool()
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
bool isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits)
SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL, SelectionDAG &DAG) const
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const AArch64Subtarget *ST) const
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Libcall getFPROUND(EVT OpVT, EVT RetVT)
bool isUnsignedIntSetCC(CondCode Code)
SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
static const unsigned NumFPRArgRegs
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool isVector() const
isVector - Return true if this is a vector value type.
CCAssignFn * CCAssignFnForNode(CallingConv::ID CC) const
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
int64_t getOffset() const
const HexagonInstrInfo * TII
const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
#define llvm_unreachable(msg)
bool isBuildVectorAllZeros(const SDNode *N)
EVT getValueType(unsigned ResNo) const
unsigned getVariadicFPRSize() const
MachineFunction & getMachineFunction() const
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, unsigned NumOps, bool isSigned, SDLoc dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
unsigned getNumArgOperands() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
static const uint16_t AArch64FPRArgRegs[]
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo *MFI, int ClobberedFI) const
EVT getScalarType() const
Abstract Stack Frame Information.
void incNumLocalDynamicTLSAccesses()
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
SDVTList getVTList(EVT VT)
virtual MVT getPointerTy(uint32_t=0) const
bool isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, SDValue &Res) const
ID
LLVM Calling Convention Representation.
const MachineInstrBuilder & addImm(int64_t Val) const
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
SDValue getConstantFP(double Val, EVT VT, bool isTarget=false)
SmallVector< ISD::InputArg, 32 > Ins
EVT getVectorElementType() const
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
unsigned getLocReg() const
static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt)
MachineBasicBlock * emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, unsigned Size, unsigned Opcode) const
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
void setVariadicGPRIdx(int Idx)
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
size_t array_lengthof(T(&)[N])
Find the length of an array.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
SDValue getRegisterMask(const uint32_t *RegMask)
const uint32_t * getTLSDescCallPreservedMask() const
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
enable_if_c< std::numeric_limits< T >::is_integer &&!std::numeric_limits< T >::is_signed, std::size_t >::type countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
static SDValue PerformSRACombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ISD::SRA.
SDValue CombineTo(SDNode *N, const std::vector< SDValue > &To, bool AddTo=true)
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
bool isMask_64(uint64_t Value)
SmallVector< ISD::OutputArg, 32 > Outs
static const uint16_t AArch64ArgRegs[]
SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
SDValue getUNDEF(EVT VT)
getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
bool isSignedIntSetCC(CondCode Code)
static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG)
Type * getElementType() const
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
const BasicBlock * getBasicBlock() const
UNDEF - An undefined node.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false)
SDNode * getNode() const
get the SDNode which holds the desired result
bundle_iterator< MachineInstr, instr_iterator > iterator
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
bool isTypeLegal(EVT VT) const
bool isNormalLoad(const SDNode *N)
static SDValue tryCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=0)
LLVM Basic Block Representation.
const SDValue & getOperand(unsigned i) const
void setBytesInStackArgArea(unsigned bytes)
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
int64_t getSExtValue() const
Get sign extended value.
const MachineOperand & getOperand(unsigned i) const
static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
int getVariadicGPRIdx() const
static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, A64CC::CondCodes &Alternative)
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
ItTy next(ItTy it, Dist n)
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
const DataLayout * getDataLayout() const
SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
bool isBeforeLegalize() const
unsigned getOpcode() const
unsigned GuaranteedTailCallOpt
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Target-specific dag combine xforms for ISD::OR.
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
static const unsigned NumArgRegs
CondCode getSetCCSwappedOperands(CondCode Operation)
int getVariadicStackIdx() const
use_iterator use_begin() const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned CountPopulation_64(uint64_t Value)
std::vector< ArgListEntry > ArgListTy
unsigned getNextStackOffset() const
static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, bool &Extended)
int CreateSpillStackObject(uint64_t Size, unsigned Alignment)
unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
uint64_t getConstantOperandVal(unsigned Num) const
ConstraintType getConstraintType(const std::string &Constraint) const
Given a constraint, return the type of constraint it is for this target.
const SDValue & getRoot() const
const MCInstrDesc & get(unsigned Opcode) const
bool isShiftedMask_64(uint64_t Value)
int getVariadicFPRIdx() const
int64_t getObjectOffset(int ObjectIdx) const
SDValue LowerF128ToCall(SDValue Op, SelectionDAG &DAG, RTLIB::Libcall Call) const
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
unsigned getByValAlign() const
static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action)
ArrayRef< int > getMask() const
Libcall getFPEXT(EVT OpVT, EVT RetVT)
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
virtual const TargetInstrInfo * getInstrInfo() const
unsigned getABITypeAlignment(Type *Ty) const
SDValue getTargetConstantFP(double Val, EVT VT)
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT)
getNOT - Create a bitwise NOT operation as (XOR Val, -1).
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, bool IsCalleeStructRet, bool IsCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
const STC & getSubtarget() const
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
uint64_t getTypeAllocSize(Type *Ty) const
void setExceptionPointerRegister(unsigned R)
static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isPredecessorOf(const SDNode *N) const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info, const char *Constraint) const
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getIntPtrConstant(uint64_t Val, bool isTarget=false)
MachineBasicBlock * emitAtomicCmpSwap(MachineInstr *MI, MachineBasicBlock *BB, unsigned Size) const
MachineMemOperand * getMemOperand() const
MachineFrameInfo * getFrameInfo()
bool isLegalICmpImmediate(int64_t Val) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
unsigned Log2_32(uint32_t Value)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, bool is128Bits, NeonModImmType type, EVT &VT, unsigned &Imm, unsigned &OpCmode)
Value * getArgOperand(unsigned i) const
Class for arbitrary precision integers.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true)
void computeRegisterProperties()
MachineBasicBlock * EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *MBB) const
void setExceptionSelectorRegister(unsigned R)
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align)
static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue &MaskedVal, uint64_t Mask)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, bool MayNeedSP=false, const AllocaInst *Alloca=0)
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
uint64_t getConstantOperandVal(unsigned i) const
SmallVector< SDValue, 32 > OutVals
static const fltSemantics IEEEsingle
unsigned getVariadicGPRSize() const
Bitwise operators - logical and, logical or, logical xor.
pointer data()
data - Return a pointer to the vector's buffer, even if empty().
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
MachineRegisterInfo & getRegInfo()
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, SDLoc dl, SelectionDAG &DAG) const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
void setStackPointerRegisterToSaveRestore(unsigned R)
const TargetMachine & getTarget() const
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
virtual const TargetRegisterInfo * getRegisterInfo() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const
void setVariadicFPRIdx(int Idx)
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
SDValue getCondCode(ISD::CondCode Cond)
SDValue LowerGlobalAddressELFSmall(SDValue Op, SelectionDAG &DAG) const
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
ARM-specific DAG combining for intrinsics.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
unsigned getBytesInStackArgArea() const
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
LLVM Value Representation.
SDValue getRegister(unsigned Reg, EVT VT)
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
bool isFPImm(const APFloat &Val, uint32_t &Imm8Bits)
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const
bool IsTailCallConvention(CallingConv::ID CallCC) const
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
BasicBlockListType::iterator iterator
const TargetLowering & getTargetLoweringInfo() const
bool isPowerOf2_32(uint32_t Value)
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
void setVariadicGPRSize(unsigned Size)
const MCRegisterInfo & MRI
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
SDValue getTargetConstant(uint64_t Val, EVT VT)
unsigned getLocMemOffset() const
SDValue getEntryNode() const
void setVariadicStackIdx(int Idx)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable)
TRUNCATE - Completely drop the high bits.
unsigned AllocateReg(unsigned Reg)
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
int64_t getObjectSize(int ObjectIdx) const
static bool isSplatMask(const int *Mask, EVT VT)
EVT changeVectorElementTypeToInteger() const
DebugLoc getDebugLoc() const
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
uint64_t getZExtValue() const
unsigned getVectorNumElements() const
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, SDLoc DL) const