15 #define DEBUG_TYPE "arm-isel"
54 STATISTIC(NumTailCalls,
"Number of tail calls");
55 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
56 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
61 cl::desc(
"Generate tail calls (TEMPORARY OPTION)."),
66 cl::desc(
"Generate calls via indirect call instructions"),
71 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
75 class ARMCCState :
public CCState {
80 :
CCState(CC, isVarArg, MF, TM, locs, C) {
82 "ARMCCState users must specify whether their context is call"
83 "or prologue generation.");
91 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
94 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
95 MVT PromotedBitwiseVT) {
96 if (VT != PromotedLdStVT) {
135 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
153 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
158 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
606 for (
unsigned i = 0; i < 6; ++i) {
924 bool isThumb2,
unsigned &LdrOpc,
926 static const unsigned LoadBares[4][2] = {{ARM::LDREXB, ARM::t2LDREXB},
927 {ARM::LDREXH, ARM::t2LDREXH},
928 {ARM::LDREX, ARM::t2LDREX},
929 {ARM::LDREXD, ARM::t2LDREXD}};
930 static const unsigned LoadAcqs[4][2] = {{ARM::LDAEXB, ARM::t2LDAEXB},
931 {ARM::LDAEXH, ARM::t2LDAEXH},
932 {ARM::LDAEX, ARM::t2LDAEX},
933 {ARM::LDAEXD, ARM::t2LDAEXD}};
934 static const unsigned StoreBares[4][2] = {{ARM::STREXB, ARM::t2STREXB},
935 {ARM::STREXH, ARM::t2STREXH},
936 {ARM::STREX, ARM::t2STREX},
937 {ARM::STREXD, ARM::t2STREXD}};
938 static const unsigned StoreRels[4][2] = {{ARM::STLEXB, ARM::t2STLEXB},
939 {ARM::STLEXH, ARM::t2STLEXH},
940 {ARM::STLEX, ARM::t2STLEX},
941 {ARM::STLEXD, ARM::t2STLEXD}};
943 const unsigned (*LoadOps)[2], (*StoreOps)[2];
950 StoreOps = StoreRels;
952 StoreOps = StoreBares;
955 "unsupported size for atomic binary op!");
957 LdrOpc = LoadOps[
Log2_32(Size)][isThumb2];
958 StrOpc = StoreOps[
Log2_32(Size)][isThumb2];
971 std::pair<const TargetRegisterClass*, uint8_t>
983 RRC = &ARM::DPRRegClass;
993 RRC = &ARM::DPRRegClass;
997 RRC = &ARM::DPRRegClass;
1001 RRC = &ARM::DPRRegClass;
1005 return std::make_pair(RRC, Cost);
1163 return &ARM::QQPRRegClass;
1165 return &ARM::QQQQPRRegClass;
1188 for (
unsigned i = 0; i != NumVals; ++i) {
1267 #include "ARMGenCallingConv.inc"
1273 bool isVarArg)
const {
1278 if (Subtarget->
hasVFP2() && !isVarArg) {
1280 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1282 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1288 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1289 else if (Subtarget->
hasVFP2() &&
1292 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1293 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1297 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1300 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1302 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1304 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1311 ARMTargetLowering::LowerCallResult(
SDValue Chain,
SDValue InFlag,
1316 bool isThisReturn,
SDValue ThisVal)
const {
1322 CCInfo.AnalyzeCallResult(Ins,
1323 CCAssignFnForNode(CallConv,
true,
1327 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1332 if (i == 0 && isThisReturn) {
1334 "unexpected return calling convention register assignment");
1393 ARMTargetLowering::LowerMemOpCallTo(
SDValue Chain,
1401 return DAG.
getStore(Chain, dl, Arg, PtrOff,
1408 RegsToPassVector &RegsToPass,
1416 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), fmrrd));
1450 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
1451 bool isThisReturn =
false;
1452 bool isSibCall =
false;
1458 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1460 Outs, OutVals,
Ins, DAG);
1473 CCInfo.AnalyzeCallOperands(Outs,
1474 CCAssignFnForNode(CallConv,
false,
1478 unsigned NumBytes = CCInfo.getNextStackOffset();
1492 RegsToPassVector RegsToPass;
1497 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
1499 ++i, ++realArgIdx) {
1501 SDValue Arg = OutVals[realArgIdx];
1503 bool isByVal = Flags.
isByVal();
1531 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1532 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1536 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1537 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1541 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1542 dl, DAG, VA, Flags));
1545 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1546 StackPtr, MemOpChains, Flags);
1551 "unexpected calling convention register assignment");
1552 assert(!Ins.empty() && Ins[0].VT ==
MVT::i32 &&
1553 "unexpected use of 'returned'");
1554 isThisReturn =
true;
1556 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
1557 }
else if (isByVal) {
1559 unsigned offset = 0;
1563 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1564 unsigned CurByValIdx = CCInfo.getInRegsParamsProceed();
1566 if (CurByValIdx < ByValArgsCount) {
1568 unsigned RegBegin, RegEnd;
1569 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1573 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1578 false,
false,
false,
1581 RegsToPass.push_back(std::make_pair(j, Load));
1586 offset = RegEnd - RegBegin;
1588 CCInfo.nextInRegsParam();
1603 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1607 }
else if (!isSibCall) {
1610 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1611 dl, DAG, VA, Flags));
1615 if (!MemOpChains.
empty())
1617 &MemOpChains[0], MemOpChains.
size());
1625 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1626 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1627 RegsToPass[i].second, InFlag);
1642 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1643 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1644 RegsToPass[i].second, InFlag);
1653 bool isDirect =
false;
1654 bool isARMFunc =
false;
1655 bool isLocalARMFunc =
false;
1660 &&
"long-calls with non-static relocation model!");
1677 false,
false,
false, 0);
1679 const char *Sym = S->getSymbol();
1685 ARMPCLabelIndex, 0);
1692 false,
false,
false, 0);
1700 isARMFunc = !Subtarget->
isThumb() || isStub;
1713 false,
false,
false, 0);
1719 unsigned OpFlags = 0;
1729 isARMFunc = !Subtarget->
isThumb() || isStub;
1731 const char *Sym = S->getSymbol();
1736 ARMPCLabelIndex, 4);
1742 false,
false,
false, 0);
1747 unsigned OpFlags = 0;
1761 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
1766 if (!isDirect && !Subtarget->
hasV5TOps())
1768 else if (doesNotRet && isDirect && Subtarget->
hasRAS() &&
1777 std::vector<SDValue> Ops;
1778 Ops.push_back(Chain);
1779 Ops.push_back(Callee);
1783 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1784 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
1785 RegsToPass[i].second.getValueType()));
1789 const uint32_t *Mask;
1799 isThisReturn =
false;
1805 assert(Mask &&
"Missing call preserved mask for calling convention");
1810 Ops.push_back(InFlag);
1817 Chain = DAG.
getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
1827 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
1828 InVals, isThisReturn,
1829 isThisReturn ? OutVals[0] :
SDValue());
1837 ARMTargetLowering::HandleByVal(
1838 CCState *State,
unsigned &size,
unsigned Align)
const {
1842 "unhandled ParmContext");
1855 assert(NSAAOffset >= (RE-RB)*4 &&
1856 "Stack offset for byval regs doesn't introduced anymore?");
1857 NSAAOffset -= (RE-RB)*4;
1860 if ((ARM::R0 <= reg) && (reg <= ARM::R3)) {
1862 unsigned AlignInRegs = Align / 4;
1863 unsigned Waste = (
ARM::R4 - reg) % AlignInRegs;
1864 for (
unsigned i = 0; i < Waste; ++i)
1868 unsigned excess = 4 * (
ARM::R4 - reg);
1874 if (Subtarget->
isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
1886 unsigned ByValRegBegin = reg;
1887 unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (
unsigned)
ARM::R4;
1891 for (
unsigned i = reg+1; i != ByValRegEnd; ++i)
1931 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
1939 SDValue Ptr = Ld->getBasePtr();
1947 assert(FI != INT_MAX);
1957 ARMTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
1960 bool isCalleeStructRet,
1961 bool isCallerStructRet,
1968 bool CCMatch = CallerCC == CalleeCC;
1975 if (isVarArg && !Outs.
empty())
1986 if (isCalleeStructRet || isCallerStructRet)
2015 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC,
true, isVarArg));
2020 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC,
true, isVarArg));
2022 if (RVLocs1.
size() != RVLocs2.
size())
2024 for (
unsigned i = 0, e = RVLocs1.
size(); i != e; ++i) {
2025 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
2027 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
2029 if (RVLocs1[i].isRegLoc()) {
2030 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
2033 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
2043 getInfo<ARMFunctionInfo>();
2049 if (!Outs.
empty()) {
2055 CCInfo.AnalyzeCallOperands(Outs,
2056 CCAssignFnForNode(CalleeCC,
false, isVarArg));
2057 if (CCInfo.getNextStackOffset()) {
2065 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2067 ++i, ++realArgIdx) {
2070 SDValue Arg = OutVals[realArgIdx];
2081 if (!ArgLocs[++i].isRegLoc())
2084 if (!ArgLocs[++i].isRegLoc())
2086 if (!ArgLocs[++i].isRegLoc())
2108 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv,
true,
2130 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2133 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2137 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2146 ARMTargetLowering::LowerReturn(
SDValue Chain,
2160 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv,
true,
2168 for (
unsigned i = 0, realRVLocIdx = 0;
2170 ++i, ++realRVLocIdx) {
2172 assert(VA.
isRegLoc() &&
"Can only return in registers!");
2174 SDValue Arg = OutVals[realRVLocIdx];
2247 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2271 if (Copies.
size() > 2)
2296 bool HasRet =
false;
2312 bool ARMTargetLowering::mayBeEmittedAsTailCall(
CallInst *CI)
const {
2351 unsigned ARMPCLabelIndex = 0;
2354 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2360 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2370 false,
false,
false, 0);
2383 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2394 false,
false,
false, 0);
2403 Entry.Node = Argument;
2405 Args.push_back(Entry);
2409 false,
false,
false,
false,
2413 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2414 return CallResult.first;
2436 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2443 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2445 false,
false,
false, 0);
2451 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2453 false,
false,
false, 0);
2461 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2463 false,
false,
false, 0);
2475 "TLS not implemented for non-ELF targets");
2483 return LowerToTLSGeneralDynamicModel(GA, DAG);
2486 return LowerToTLSExecModels(GA, DAG, model);
2495 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2506 false,
false,
false, 0);
2511 Result = DAG.
getLoad(PtrVT, dl, Chain, Result,
2513 false,
false,
false, 0);
2530 false,
false,
false, 0);
2538 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2558 false,
false,
false, 0);
2562 unsigned ARMPCLabelIndex = 0;
2579 false,
false,
false, 0);
2589 false,
false,
false, 0);
2597 "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
2603 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2606 ARMPCLabelIndex, PCAdj);
2611 false,
false,
false, 0);
2635 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2651 ? 0 : (Subtarget->
isThumb() ? 4 : 8);
2660 false,
false,
false, 0);
2687 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
2719 unsigned isRead = ~cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue() & 1;
2725 unsigned isData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
2728 isRead = ~isRead & 1;
2729 isData = ~isData & 1;
2760 RC = &ARM::tGPRRegClass;
2762 RC = &ARM::GPRRegClass;
2777 false,
false,
false, 0);
2788 unsigned InRegsParamRecordIdx,
2790 unsigned &ArgRegsSize,
2791 unsigned &ArgRegsSaveSize)
2795 unsigned RBegin, REnd;
2797 NumGPRs = REnd - RBegin;
2799 unsigned int firstUnalloced;
2803 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
2807 ArgRegsSize = NumGPRs * 4;
2810 if (NumGPRs && Align == 8 &&
2811 (ArgRegsSize < ArgSize ||
2826 ArgRegsSaveSize = ArgRegsSize + Padding;
2830 ArgRegsSaveSize = ArgRegsSize;
2844 const Value *OrigArg,
2845 unsigned InRegsParamRecordIdx,
2846 unsigned OffsetFromOrigArg,
2849 bool ForceMutable)
const {
2865 unsigned firstRegToSaveIndex, lastRegToSaveIndex;
2866 unsigned RBegin, REnd;
2869 firstRegToSaveIndex = RBegin - ARM::R0;
2870 lastRegToSaveIndex = REnd - ARM::R0;
2874 lastRegToSaveIndex = 4;
2877 unsigned ArgRegsSize, ArgRegsSaveSize;
2878 computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize,
2879 ArgRegsSize, ArgRegsSaveSize);
2885 if (ArgRegsSaveSize) {
2887 unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
2891 "The only parameter may be padded.");
2897 Padding + ArgOffset,
2902 for (
unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex;
2903 ++firstRegToSaveIndex, ++i) {
2906 RC = &ARM::tGPRRegClass;
2908 RC = &ARM::GPRRegClass;
2923 if (!MemOps.
empty())
2925 &MemOps[0], MemOps.
size());
2938 bool ForceMutable)
const {
2949 0, ArgOffset, 0, ForceMutable);
2955 ARMTargetLowering::LowerFormalArguments(
SDValue Chain,
2972 CCAssignFnForNode(CallConv,
false,
2976 int lastInsIndex = -1;
2979 unsigned CurArgIdx = 0;
2986 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2989 CurArgIdx = Ins[VA.
getValNo()].OrigArgIndex;
2998 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3007 false,
false,
false, 0);
3009 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3018 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3024 RC = &ARM::SPRRegClass;
3026 RC = &ARM::DPRRegClass;
3028 RC = &ARM::QPRRegClass;
3070 int index = ArgLocs[i].getValNo();
3074 if (index != lastInsIndex)
3084 int FrameIndex = StoreByValRegs(
3085 CCInfo, DAG, dl, Chain, CurOrigArg,
3103 false,
false,
false, 0));
3105 lastInsIndex = index;
3112 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3121 return CFP->getValueAPF().isPosZero();
3127 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3128 return CFP->getValueAPF().isPosZero();
3141 unsigned C = RHSC->getZExtValue();
3215 assert(Opc ==
ARMISD::FMSTAT &&
"unexpected comparison operation");
3244 if (CMOVTrue && CMOVFalse) {
3246 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
3250 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
3252 False = SelectFalse;
3253 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
3286 bool &swpCmpOps,
bool &swpVselOps) {
3314 swpCmpOps = !swpCmpOps;
3315 swpVselOps = !swpVselOps;
3366 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3391 bool swpCmpOps =
false;
3392 bool swpVselOps =
false;
3405 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
3412 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
3414 Result, TrueVal, ARMcc2, CCR, Cmp2);
3446 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
3448 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
3449 Ld->isVolatile(), Ld->isNonTemporal(),
3450 Ld->isInvariant(), Ld->getAlignment());
3463 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
3464 SDValue Ptr = Ld->getBasePtr();
3466 Ld->getChain(), Ptr,
3467 Ld->getPointerInfo(),
3468 Ld->isVolatile(), Ld->isNonTemporal(),
3469 Ld->isInvariant(), Ld->getAlignment());
3472 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
3476 Ld->getChain(), NewPtr,
3477 Ld->getPointerInfo().getWithOffset(4),
3478 Ld->isVolatile(), Ld->isNonTemporal(),
3479 Ld->isInvariant(), NewAlign);
3497 bool LHSSeenZero =
false;
3499 bool RHSSeenZero =
false;
3501 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
3517 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3520 Chain, Dest, ARMcc, CCR, Cmp);
3532 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
3549 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3552 Chain, Dest, ARMcc, CCR, Cmp);
3560 SDValue Result = OptimizeVFPBrcond(Op, DAG);
3569 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
3572 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
3607 false,
false,
false, 0);
3612 Addr = DAG.
getLoad(PTy, dl, Chain, Addr,
3614 false,
false,
false, 0);
3631 "Invalid type for custom lowering!");
3671 "Invalid type for custom lowering!");
3690 return DAG.
getNode(Opc, dl, VT, Op);
3712 return DAG.
getNode(Opc, dl, VT, Op);
3724 bool UseNEON = !InGPR && Subtarget->
hasNEON();
3774 &Tmp1, 1).getValue(1);
3804 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3806 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
3824 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3826 ? ARM::R7 : ARM::R11;
3831 false,
false,
false, 0);
3850 "ExpandBITCAST called for non-i64 type");
3865 DAG.
getVTList(MVT::i32, MVT::i32), &Op, 1);
3880 assert(VT.
isVector() &&
"Expected a vector type");
3921 return DAG.getMergeValues(Ops, 2, dl);
3930 unsigned VTBits = VT.getSizeInBits();
3955 return DAG.getMergeValues(Ops, 2, dl);
4085 assert(ST->
hasNEON() &&
"Custom ctpop lowering requires NEON.");
4088 "Unexpected type for custom ctpop lowering");
4105 assert(ST->
hasNEON() &&
"unexpected vector shift");
4141 "Unknown shift to lower!");
4144 if (!isa<ConstantSDNode>(N->
getOperand(1)) ||
4145 cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() != 1)
4171 bool Invert =
false;
4179 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->
get();
4183 switch (SetCCOpcode) {
4222 switch (SetCCOpcode) {
4288 Result = DAG.
getNode(Opc, dl, VT, Op0, Op1);
4291 Result = DAG.
getNode(Opc, dl, VT, Op0, Op1);
4295 Result = DAG.
getNOT(dl, Result, VT);
4306 unsigned OpCmode, Imm;
4316 switch (SplatBitSize) {
4321 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
4330 if ((SplatBits & ~0xff) == 0) {
4336 if ((SplatBits & ~0xff00) == 0) {
4339 Imm = SplatBits >> 8;
4350 if ((SplatBits & ~0xff) == 0) {
4356 if ((SplatBits & ~0xff00) == 0) {
4359 Imm = SplatBits >> 8;
4362 if ((SplatBits & ~0xff0000) == 0) {
4365 Imm = SplatBits >> 16;
4368 if ((SplatBits & ~0xff000000) == 0) {
4371 Imm = SplatBits >> 24;
4378 if ((SplatBits & ~0xffff) == 0 &&
4379 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
4382 Imm = SplatBits >> 8;
4387 if ((SplatBits & ~0xffffff) == 0 &&
4388 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
4391 Imm = SplatBits >> 16;
4392 SplatBits |= 0xffff;
4407 uint64_t BitMask = 0xff;
4411 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
4412 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
4415 }
else if ((SplatBits & BitMask) != 0) {
4471 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
4476 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
4530 unsigned ExpectedElt = Imm;
4531 for (
unsigned i = 1; i < NumElts; ++i) {
4535 if (ExpectedElt == NumElts)
4538 if (M[i] < 0)
continue;
4539 if (ExpectedElt != static_cast<unsigned>(M[i]))
4548 bool &ReverseVEXT,
unsigned &Imm) {
4550 ReverseVEXT =
false;
4561 unsigned ExpectedElt = Imm;
4562 for (
unsigned i = 1; i < NumElts; ++i) {
4566 if (ExpectedElt == NumElts * 2) {
4571 if (M[i] < 0)
continue;
4572 if (ExpectedElt != static_cast<unsigned>(M[i]))
4587 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
4588 "Only possible block sizes for VREV are: 16, 32, 64");
4595 unsigned BlockElts = M[0] + 1;
4598 BlockElts = BlockSize / EltSz;
4600 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4603 for (
unsigned i = 0; i < NumElts; ++i) {
4604 if (M[i] < 0)
continue;
4605 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
4625 WhichResult = (M[0] == 0 ? 0 : 1);
4626 for (
unsigned i = 0; i < NumElts; i += 2) {
4627 if ((M[i] >= 0 && (
unsigned) M[i] != i + WhichResult) ||
4628 (M[i+1] >= 0 && (
unsigned) M[i+1] != i + NumElts + WhichResult))
4643 WhichResult = (M[0] == 0 ? 0 : 1);
4644 for (
unsigned i = 0; i < NumElts; i += 2) {
4645 if ((M[i] >= 0 && (
unsigned) M[i] != i + WhichResult) ||
4646 (M[i+1] >= 0 && (
unsigned) M[i+1] != i + WhichResult))
4658 WhichResult = (M[0] == 0 ? 0 : 1);
4659 for (
unsigned i = 0; i != NumElts; ++i) {
4660 if (M[i] < 0)
continue;
4661 if ((
unsigned) M[i] != 2 * i + WhichResult)
4681 WhichResult = (M[0] == 0 ? 0 : 1);
4682 for (
unsigned j = 0; j != 2; ++j) {
4683 unsigned Idx = WhichResult;
4684 for (
unsigned i = 0; i != Half; ++i) {
4685 int MIdx = M[i + j * Half];
4686 if (MIdx >= 0 && (
unsigned) MIdx != Idx)
4705 WhichResult = (M[0] == 0 ? 0 : 1);
4706 unsigned Idx = WhichResult * NumElts / 2;
4707 for (
unsigned i = 0; i != NumElts; i += 2) {
4708 if ((M[i] >= 0 && (
unsigned) M[i] != Idx) ||
4709 (M[i+1] >= 0 && (
unsigned) M[i+1] != Idx + NumElts))
4730 WhichResult = (M[0] == 0 ? 0 : 1);
4731 unsigned Idx = WhichResult * NumElts / 2;
4732 for (
unsigned i = 0; i != NumElts; i += 2) {
4733 if ((M[i] >= 0 && (
unsigned) M[i] != Idx) ||
4734 (M[i+1] >= 0 && (
unsigned) M[i+1] != Idx))
4750 if (NumElts != M.
size())
4754 for (
unsigned i = 0; i != NumElts; ++i)
4755 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 - i))
4767 if (!isa<ConstantSDNode>(N))
4769 Val = cast<ConstantSDNode>(
N)->getZExtValue();
4772 if (Val <= 255 || ~Val <= 255)
4789 APInt SplatBits, SplatUndef;
4790 unsigned SplatBitSize;
4792 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4793 if (SplatBitSize <= 64) {
4806 uint64_t NegatedImm = (~SplatBits).getZExtValue();
4820 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
4835 bool isOnlyLowElement =
true;
4836 bool usesOnlyOneValue =
true;
4837 bool hasDominantValue =
false;
4838 bool isConstant =
true;
4844 for (
unsigned i = 0; i < NumElts; ++i) {
4849 isOnlyLowElement =
false;
4850 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
4853 ValueCounts.
insert(std::make_pair(V, 0));
4854 unsigned &Count = ValueCounts[V];
4857 if (++Count > (NumElts / 2)) {
4858 hasDominantValue =
true;
4862 if (ValueCounts.
size() != 1)
4863 usesOnlyOneValue =
false;
4865 Value = ValueCounts.
begin()->first;
4867 if (ValueCounts.
size() == 0)
4868 return DAG.getUNDEF(VT);
4879 if (hasDominantValue && EltSize <= 32) {
4897 assert(constIndex &&
"The index is not a constant!");
4902 Value, DAG.getConstant(index, MVT::i32)),
4903 DAG.getConstant(index, MVT::i32));
4910 if (!usesOnlyOneValue) {
4913 for (
unsigned I = 0;
I < NumElts; ++
I) {
4927 for (
unsigned i = 0; i < NumElts; ++i)
4932 Val = LowerBUILD_VECTOR(Val, DAG, ST);
4936 if (usesOnlyOneValue) {
4938 if (isConstant && Val.
getNode())
4951 SDValue shuffle = ReconstructShuffle(Op, DAG);
4959 if (EltSize >= 32) {
4965 for (
unsigned i = 0; i < NumElts; ++i)
4977 if (!isConstant && !usesOnlyOneValue) {
4978 SDValue Vec = DAG.getUNDEF(VT);
4979 for (
unsigned i = 0 ; i < NumElts; ++i) {
4983 SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
5004 for (
unsigned i = 0; i < NumElts; ++i) {
5028 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
5029 bool FoundSource =
false;
5030 for (
unsigned j = 0; j < SourceVecs.
size(); ++j) {
5031 if (SourceVecs[j] == SourceVec) {
5032 if (MinElts[j] > EltNo)
5034 if (MaxElts[j] < EltNo)
5051 if (SourceVecs.
size() > 2)
5055 int VEXTOffsets[2] = {0, 0};
5059 for (
unsigned i = 0; i < SourceVecs.
size(); ++i) {
5062 ShuffleSrcs[i] = SourceVecs[i];
5073 assert(SourceVecs[i].
getValueType().getVectorNumElements() == 2*NumElts &&
5074 "unexpected vector sizes in ReconstructShuffle");
5076 if (MaxElts[i] - MinElts[i] >= NumElts) {
5081 if (MinElts[i] >= NumElts) {
5083 VEXTOffsets[i] = NumElts;
5087 }
else if (MaxElts[i] < NumElts) {
5095 VEXTOffsets[i] = MinElts[i];
5109 for (
unsigned i = 0; i < NumElts; ++i) {
5117 int ExtractElt = cast<ConstantSDNode>(Op.
getOperand(i)
5119 if (ExtractVec == SourceVecs[0]) {
5120 Mask.
push_back(ExtractElt - VEXTOffsets[0]);
5122 Mask.
push_back(ExtractElt + NumElts - VEXTOffsets[1]);
5143 unsigned PFIndexes[4];
5144 for (
unsigned i = 0; i != 4; ++i) {
5148 PFIndexes[i] = M[i];
5152 unsigned PFTableIndex =
5153 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
5155 unsigned Cost = (PFEntry >> 30);
5162 unsigned Imm, WhichResult;
5165 return (EltSize >= 32 ||
5186 unsigned OpNum = (PFEntry >> 26) & 0x0F;
5187 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
5188 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
5208 if (OpNum == OP_COPY) {
5209 if (LHSID == (1*9+2)*9+3)
return LHS;
5210 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
5237 OpLHS, DAG.
getConstant(OpNum-OP_VDUP0, MVT::i32));
5247 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
5251 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
5255 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
5269 I = ShuffleMask.
begin(), E = ShuffleMask.
end();
I != E; ++
I)
5289 "Expect an v8i16/v16i8 type");
5294 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
5315 if (EltSize <= 32) {
5319 if (Lane == -1) Lane = 0;
5330 bool IsScalarToVector =
true;
5333 IsScalarToVector =
false;
5336 if (IsScalarToVector)
5345 if (
isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
5370 unsigned WhichResult;
5371 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
5373 V1,
V2).getValue(WhichResult);
5374 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
5376 V1,
V2).getValue(WhichResult);
5377 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
5379 V1,
V2).getValue(WhichResult);
5383 V1, V1).getValue(WhichResult);
5386 V1, V1).getValue(WhichResult);
5389 V1, V1).getValue(WhichResult);
5396 unsigned PFIndexes[4];
5397 for (
unsigned i = 0; i != 4; ++i) {
5398 if (ShuffleMask[i] < 0)
5401 PFIndexes[i] = ShuffleMask[i];
5405 unsigned PFTableIndex =
5406 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
5408 unsigned Cost = (PFEntry >> 30);
5415 if (EltSize >= 32) {
5423 for (
unsigned i = 0; i < NumElts; ++i) {
5424 if (ShuffleMask[i] < 0)
5428 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
5451 if (!isa<ConstantSDNode>(Lane))
5460 if (!isa<ConstantSDNode>(Lane))
5477 "unexpected CONCAT_VECTORS");
5506 unsigned HiElt = 1 - LoElt;
5511 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
5514 if (Hi0->getSExtValue() == Lo0->
getSExtValue() >> 32 &&
5515 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
5518 if (Hi0->isNullValue() && Hi1->isNullValue())
5531 unsigned HalfSize = EltSize / 2;
5533 if (!
isIntN(HalfSize, C->getSExtValue()))
5536 if (!
isUIntN(HalfSize, C->getZExtValue()))
5571 assert(OrigVT.
isSimple() &&
"Expecting a simple value type");
5574 switch (OrigSimpleTy) {
5590 unsigned ExtOpcode) {
5657 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
5658 unsigned NumElts = VT.getVectorNumElements();
5661 for (
unsigned i = 0; i != NumElts; ++i) {
5699 "unexpected type for custom-lowering ISD::MUL");
5702 unsigned NewOpc = 0;
5706 if (isN0SExt && isN1SExt)
5711 if (isN0ZExt && isN1ZExt)
5713 else if (isN1SExt || isN1ZExt) {
5747 "unexpected types for extended operands to VMULL");
5748 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
5762 return DAG.
getNode(N0->getOpcode(), DL, VT,
5838 "unexpected type for custom-lowering ISD::SDIV");
5873 "unexpected type for custom-lowering ISD::UDIV");
5949 bool ExtraOp =
false;
5992 Entry.isSExt =
false;
5993 Entry.isZExt =
false;
5994 Entry.isSRet =
true;
5995 Args.push_back(Entry);
5999 Entry.isSExt =
false;
6000 Entry.isZExt =
false;
6001 Args.push_back(Entry);
6003 const char *LibcallName = (ArgVT ==
MVT::f64)
6004 ?
"__sincos_stret" :
"__sincosf_stret";
6009 false,
false,
false,
false, 0,
6012 Callee, Args, DAG, dl);
6013 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
6015 SDValue LoadSin = DAG.
getLoad(ArgVT, dl, CallResult.second, SRet,
6031 if (cast<AtomicSDNode>(Op)->getOrdering() <=
Monotonic)
6044 "Only know how to expand i64 atomics");
6061 cast<MemSDNode>(Node)->getMemOperand(), AN->getOrdering(),
6062 AN->getSynchScope());
6112 return Subtarget->
isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
6113 LowerGlobalAddressELF(Op, DAG);
6213 unsigned Size)
const {
6221 bool isThumb2 = Subtarget->
isThumb2();
6234 unsigned ldrOpc, strOpc;
6245 MF->
insert(It, loop1MBB);
6246 MF->
insert(It, loop2MBB);
6266 if (ldrOpc == ARM::t2LDREX)
6270 .addReg(dest).
addReg(oldval));
6271 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6281 MIB =
BuildMI(BB, dl, TII->
get(strOpc), scratch).addReg(newval).
addReg(ptr);
6282 if (strOpc == ARM::t2STREX)
6286 .addReg(scratch).
addImm(0));
6287 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6303 unsigned Size,
unsigned BinOpcode)
const {
6317 bool isThumb2 = Subtarget->
isThumb2();
6326 unsigned ldrOpc, strOpc;
6360 if (ldrOpc == ARM::t2LDREX)
6365 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
6367 addReg(incr).
addReg(dest)).addReg(0);
6370 addReg(dest).
addReg(incr)).addReg(0);
6373 MIB =
BuildMI(BB, dl, TII->
get(strOpc), scratch).addReg(scratch2).
addReg(ptr);
6374 if (strOpc == ARM::t2STREX)
6378 .addReg(scratch).
addImm(0));
6379 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6395 ARMTargetLowering::EmitAtomicBinaryMinMax(
MachineInstr *MI,
6410 unsigned oldval = dest;
6413 bool isThumb2 = Subtarget->
isThumb2();
6422 unsigned ldrOpc, strOpc, extendOpc;
6427 extendOpc = isThumb2 ? ARM::t2SXTB :
ARM::SXTB;
6430 extendOpc = isThumb2 ? ARM::t2SXTH :
ARM::SXTH;
6470 if (ldrOpc == ARM::t2LDREX)
6475 if (signExtend && extendOpc) {
6477 : &ARM::GPRnopcRegClass);
6487 .addReg(oldval).
addReg(incr));
6488 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2)
6491 MIB =
BuildMI(BB, dl, TII->
get(strOpc), scratch).addReg(scratch2).
addReg(ptr);
6492 if (strOpc == ARM::t2STREX)
6496 .addReg(scratch).
addImm(0));
6497 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6514 unsigned Op1,
unsigned Op2,
6515 bool NeedsCarry,
bool IsCmpxchg,
6525 bool isStore = (MI->
getOpcode() == ARM::ATOMIC_STORE_I64);
6526 unsigned offset = (isStore ? -2 : 0);
6532 unsigned OrdIdx = offset + (IsCmpxchg ? 7 : 5);
6535 bool isThumb2 = Subtarget->
isThumb2();
6546 unsigned ldrOpc, strOpc;
6551 if (IsCmpxchg || IsMinMax)
6558 if (IsCmpxchg || IsMinMax) MF->
insert(It, contBB);
6559 if (IsCmpxchg) MF->
insert(It, cont2BB);
6601 .addReg(GPRPair0, 0, ARM::gsub_0);
6603 .addReg(GPRPair0, 0, ARM::gsub_1);
6607 unsigned StoreLo, StoreHi;
6610 for (
unsigned i = 0; i < 2; i++) {
6613 .addReg(i == 0 ? destlo : desthi)
6614 .
addReg(i == 0 ? vallo : valhi));
6615 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6619 BB = (i == 0 ? contBB : cont2BB);
6629 .addReg(destlo).
addReg(vallo))
6633 .addReg(desthi).
addReg(valhi))
6645 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6677 .addReg(StorePair).
addReg(ptr));
6681 .addReg(storesuccess).
addImm(0));
6682 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6707 bool isThumb2 = Subtarget->
isThumb2();
6715 unsigned ldrOpc, strOpc;
6731 .addReg(GPRPair0, 0, ARM::gsub_0);
6733 .addReg(GPRPair0, 0, ARM::gsub_1);
6744 void ARMTargetLowering::
6755 bool isThumb = Subtarget->
isThumb();
6756 bool isThumb2 = Subtarget->
isThumb2();
6759 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
6786 .addConstantPoolIndex(CPI)
6795 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg3)
6803 }
else if (isThumb) {
6813 .addConstantPoolIndex(CPI)
6816 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg2)
6845 .addConstantPoolIndex(CPI)
6877 unsigned MaxCSNum = 0;
6887 if (!II->isEHLabel())
continue;
6889 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
6894 CSI = CallSiteIdxs.
begin(), CSE = CallSiteIdxs.
end();
6895 CSI != CSE; ++CSI) {
6896 CallSiteNumToLPad[*CSI].push_back(BB);
6897 MaxCSNum = std::max(MaxCSNum, *CSI);
6904 std::vector<MachineBasicBlock*> LPadList;
6906 LPadList.reserve(CallSiteNumToLPad.
size());
6907 for (
unsigned I = 1;
I <= MaxCSNum; ++
I) {
6910 II = MBBList.
begin(),
IE = MBBList.
end(); II !=
IE; ++II) {
6911 LPadList.push_back(*II);
6912 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
6916 assert(!LPadList.empty() &&
6917 "No landing pad destinations for the dispatch jump table!");
6933 unsigned trap_opcode;
6935 trap_opcode = ARM::tTRAP;
6952 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
6960 MIB =
BuildMI(DispatchBB, dl, TII->
get(ARM::Int_eh_sjlj_dispatchsetup));
6969 unsigned NumLPads = LPadList.size();
6977 if (NumLPads < 256) {
6980 .
addImm(LPadList.size()));
6984 .addImm(NumLPads & 0xFFFF));
6986 unsigned VReg2 = VReg1;
6987 if ((NumLPads & 0xFFFF0000) != 0) {
6991 .
addImm(NumLPads >> 16));
6999 BuildMI(DispatchBB, dl, TII->
get(ARM::t2Bcc))
7006 .addJumpTableIndex(MJTI)
7012 BuildMI(DispContBB, dl, TII->
get(ARM::t2ADDrs), NewVReg4)
7017 BuildMI(DispContBB, dl, TII->
get(ARM::t2BR_JT))
7022 }
else if (Subtarget->
isThumb()) {
7029 if (NumLPads < 256) {
7066 .addJumpTableIndex(MJTI)
7085 unsigned NewVReg6 = NewVReg5;
7094 BuildMI(DispContBB, dl, TII->
get(ARM::tBR_JTr))
7105 if (NumLPads < 256) {
7112 .addImm(NumLPads & 0xFFFF));
7114 unsigned VReg2 = VReg1;
7115 if ((NumLPads & 0xFFFF0000) != 0) {
7119 .
addImm(NumLPads >> 16));
7158 .addJumpTableIndex(MJTI)
7166 BuildMI(DispContBB, dl, TII->
get(ARM::LDRrs), NewVReg5)
7173 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTadd))
7179 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTr))
7188 for (std::vector<MachineBasicBlock*>::iterator
7189 I = LPadList.begin(), E = LPadList.end();
I != E; ++
I) {
7191 if (SeenMBBs.
insert(CurMBB))
7199 I = InvokeBBs.
begin(), E = InvokeBBs.
end();
I != E; ++
I) {
7206 while (!Successors.empty()) {
7222 if (!II->isCall())
continue;
7226 OI = II->operands_begin(), OE = II->operands_end();
7228 if (!OI->isReg())
continue;
7229 DefRegs[OI->getReg()] =
true;
7234 for (
unsigned i = 0; SavedRegs[i] != 0; ++i) {
7235 unsigned Reg = SavedRegs[i];
7237 !ARM::tGPRRegClass.contains(Reg) &&
7238 !ARM::hGPRRegClass.contains(Reg))
7240 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
7242 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
7255 I = MBBLPads.
begin(), E = MBBLPads.
end();
I != E; ++
I)
7256 (*I)->setIsLandingPad(
false);
7275 static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
7277 return LdSize == 16 ? ARM::VLD1q32wb_fixed
7278 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
7280 return LdSize == 4 ? ARM::tLDRi
7281 : LdSize == 2 ? ARM::tLDRHi
7282 : LdSize == 1 ? ARM::tLDRBi : 0;
7284 return LdSize == 4 ? ARM::t2LDR_POST
7285 : LdSize == 2 ? ARM::t2LDRH_POST
7286 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
7287 return LdSize == 4 ? ARM::LDR_POST_IMM
7288 : LdSize == 2 ? ARM::LDRH_POST
7289 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
7294 static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
7296 return StSize == 16 ? ARM::VST1q32wb_fixed
7297 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
7299 return StSize == 4 ? ARM::tSTRi
7300 : StSize == 2 ? ARM::tSTRHi
7301 : StSize == 1 ? ARM::tSTRBi : 0;
7303 return StSize == 4 ? ARM::t2STR_POST
7304 : StSize == 2 ? ARM::t2STRH_POST
7305 : StSize == 1 ? ARM::t2STRB_POST : 0;
7306 return StSize == 4 ? ARM::STR_POST_IMM
7307 : StSize == 2 ? ARM::STRH_POST
7308 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
7315 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
7316 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
7317 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
7318 assert(LdOpc != 0 &&
"Should have a load opcode");
7323 }
else if (IsThumb1) {
7326 .addReg(AddrIn).
addImm(0));
7328 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
7330 MIB.addReg(AddrIn).addImm(LdSize);
7332 }
else if (IsThumb2) {
7347 unsigned StSize,
unsigned Data,
unsigned AddrIn,
7348 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
7349 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
7350 assert(StOpc != 0 &&
"Should have a store opcode");
7354 }
else if (IsThumb1) {
7359 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
7361 MIB.addReg(AddrIn).addImm(StSize);
7363 }
else if (IsThumb2) {
7392 unsigned UnitSize = 0;
7397 bool IsThumb2 = Subtarget->
isThumb2();
7401 }
else if (Align & 2) {
7406 hasAttribute(AttributeSet::FunctionIndex,
7409 if ((Align % 16 == 0) && SizeVal >= 16)
7411 else if ((Align % 8 == 0) && SizeVal >= 8)
7420 bool IsNeon = UnitSize >= 8;
7424 VecTRC = UnitSize == 16
7430 unsigned BytesLeft = SizeVal % UnitSize;
7431 unsigned LoopSize = SizeVal - BytesLeft;
7433 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
7437 unsigned srcIn = src;
7438 unsigned destIn = dest;
7439 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
7443 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
7444 IsThumb1, IsThumb2);
7445 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
7446 IsThumb1, IsThumb2);
7454 for (
unsigned i = 0; i < BytesLeft; i++) {
7458 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
7459 IsThumb1, IsThumb2);
7460 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
7461 IsThumb1, IsThumb2);
7503 unsigned Vtmp = varEnd;
7504 if ((LoopSize & 0xFFFF0000) != 0)
7507 .addImm(LoopSize & 0xFFFF));
7509 if ((LoopSize & 0xFFFF0000) != 0)
7511 .addReg(Vtmp).
addImm(LoopSize >> 16));
7546 .addReg(varLoop).
addMBB(loopMBB)
7549 .addReg(srcLoop).
addMBB(loopMBB)
7552 .addReg(destLoop).
addMBB(loopMBB)
7559 IsThumb1, IsThumb2);
7561 IsThumb1, IsThumb2);
7566 BuildMI(*BB, BB->
end(), dl, TII->
get(ARM::tSUBi8), varLoop);
7568 MIB.addReg(varPhi).addImm(UnitSize);
7573 TII->
get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
7579 TII->
get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
7592 unsigned srcIn = srcLoop;
7593 unsigned destIn = destLoop;
7594 for (
unsigned i = 0; i < BytesLeft; i++) {
7598 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
7599 IsThumb1, IsThumb2);
7600 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
7601 IsThumb1, IsThumb2);
7615 bool isThumb2 = Subtarget->
isThumb2();
7624 case ARM::t2STR_preidx:
7627 case ARM::t2STRB_preidx:
7630 case ARM::t2STRH_preidx:
7634 case ARM::STRi_preidx:
7635 case ARM::STRBi_preidx: {
7636 unsigned NewOpc = MI->
getOpcode() == ARM::STRi_preidx ?
7637 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM;
7653 .addMemOperand(MMO);
7657 case ARM::STRr_preidx:
7658 case ARM::STRBr_preidx:
7659 case ARM::STRH_preidx: {
7663 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
7664 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
7665 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
7673 case ARM::ATOMIC_LOAD_ADD_I8:
7674 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
7675 case ARM::ATOMIC_LOAD_ADD_I16:
7676 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
7677 case ARM::ATOMIC_LOAD_ADD_I32:
7678 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
7680 case ARM::ATOMIC_LOAD_AND_I8:
7681 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
7682 case ARM::ATOMIC_LOAD_AND_I16:
7683 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
7684 case ARM::ATOMIC_LOAD_AND_I32:
7685 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
7687 case ARM::ATOMIC_LOAD_OR_I8:
7688 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
7689 case ARM::ATOMIC_LOAD_OR_I16:
7690 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
7691 case ARM::ATOMIC_LOAD_OR_I32:
7692 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
7694 case ARM::ATOMIC_LOAD_XOR_I8:
7695 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
7696 case ARM::ATOMIC_LOAD_XOR_I16:
7697 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
7698 case ARM::ATOMIC_LOAD_XOR_I32:
7699 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
7701 case ARM::ATOMIC_LOAD_NAND_I8:
7702 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
7703 case ARM::ATOMIC_LOAD_NAND_I16:
7704 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
7705 case ARM::ATOMIC_LOAD_NAND_I32:
7706 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
7708 case ARM::ATOMIC_LOAD_SUB_I8:
7709 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
7710 case ARM::ATOMIC_LOAD_SUB_I16:
7711 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
7712 case ARM::ATOMIC_LOAD_SUB_I32:
7713 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
7715 case ARM::ATOMIC_LOAD_MIN_I8:
7716 return EmitAtomicBinaryMinMax(MI, BB, 1,
true,
ARMCC::LT);
7717 case ARM::ATOMIC_LOAD_MIN_I16:
7718 return EmitAtomicBinaryMinMax(MI, BB, 2,
true,
ARMCC::LT);
7719 case ARM::ATOMIC_LOAD_MIN_I32:
7720 return EmitAtomicBinaryMinMax(MI, BB, 4,
true,
ARMCC::LT);
7722 case ARM::ATOMIC_LOAD_MAX_I8:
7723 return EmitAtomicBinaryMinMax(MI, BB, 1,
true,
ARMCC::GT);
7724 case ARM::ATOMIC_LOAD_MAX_I16:
7725 return EmitAtomicBinaryMinMax(MI, BB, 2,
true,
ARMCC::GT);
7726 case ARM::ATOMIC_LOAD_MAX_I32:
7727 return EmitAtomicBinaryMinMax(MI, BB, 4,
true,
ARMCC::GT);
7729 case ARM::ATOMIC_LOAD_UMIN_I8:
7730 return EmitAtomicBinaryMinMax(MI, BB, 1,
false,
ARMCC::LO);
7731 case ARM::ATOMIC_LOAD_UMIN_I16:
7732 return EmitAtomicBinaryMinMax(MI, BB, 2,
false,
ARMCC::LO);
7733 case ARM::ATOMIC_LOAD_UMIN_I32:
7734 return EmitAtomicBinaryMinMax(MI, BB, 4,
false,
ARMCC::LO);
7736 case ARM::ATOMIC_LOAD_UMAX_I8:
7737 return EmitAtomicBinaryMinMax(MI, BB, 1,
false,
ARMCC::HI);
7738 case ARM::ATOMIC_LOAD_UMAX_I16:
7739 return EmitAtomicBinaryMinMax(MI, BB, 2,
false,
ARMCC::HI);
7740 case ARM::ATOMIC_LOAD_UMAX_I32:
7741 return EmitAtomicBinaryMinMax(MI, BB, 4,
false,
ARMCC::HI);
7743 case ARM::ATOMIC_SWAP_I8:
return EmitAtomicBinary(MI, BB, 1, 0);
7744 case ARM::ATOMIC_SWAP_I16:
return EmitAtomicBinary(MI, BB, 2, 0);
7745 case ARM::ATOMIC_SWAP_I32:
return EmitAtomicBinary(MI, BB, 4, 0);
7747 case ARM::ATOMIC_CMP_SWAP_I8:
return EmitAtomicCmpSwap(MI, BB, 1);
7748 case ARM::ATOMIC_CMP_SWAP_I16:
return EmitAtomicCmpSwap(MI, BB, 2);
7749 case ARM::ATOMIC_CMP_SWAP_I32:
return EmitAtomicCmpSwap(MI, BB, 4);
7751 case ARM::ATOMIC_LOAD_I64:
7752 return EmitAtomicLoad64(MI, BB);
7754 case ARM::ATOMIC_LOAD_ADD_I64:
7755 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr,
7756 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr,
7758 case ARM::ATOMIC_LOAD_SUB_I64:
7759 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7760 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7762 case ARM::ATOMIC_LOAD_OR_I64:
7763 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr,
7764 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
7765 case ARM::ATOMIC_LOAD_XOR_I64:
7766 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr,
7767 isThumb2 ? ARM::t2EORrr : ARM::EORrr);
7768 case ARM::ATOMIC_LOAD_AND_I64:
7769 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
7770 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
7771 case ARM::ATOMIC_STORE_I64:
7772 case ARM::ATOMIC_SWAP_I64:
7773 return EmitAtomicBinary64(MI, BB, 0, 0,
false);
7774 case ARM::ATOMIC_CMP_SWAP_I64:
7775 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7776 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7778 case ARM::ATOMIC_LOAD_MIN_I64:
7779 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7780 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7783 case ARM::ATOMIC_LOAD_MAX_I64:
7784 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7785 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7788 case ARM::ATOMIC_LOAD_UMIN_I64:
7789 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7790 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7793 case ARM::ATOMIC_LOAD_UMAX_I64:
7794 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
7795 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
7799 case ARM::tMOVCCr_pseudo: {
7830 BuildMI(BB, dl, TII->
get(ARM::tBcc)).addMBB(sinkMBB)
7855 case ARM::BCCZi64: {
7861 bool RHSisZero = MI->
getOpcode() == ARM::BCCZi64;
7867 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7868 .addReg(LHS1).
addImm(0));
7869 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7876 TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
7877 .addReg(LHS1).
addReg(RHS1));
7878 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
7879 .addReg(LHS2).
addReg(RHS2)
7888 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
7893 BuildMI(BB, dl, TII->
get(ARM::B)) .addMBB(exitMBB);
7899 case ARM::Int_eh_sjlj_setjmp:
7900 case ARM::Int_eh_sjlj_setjmp_nofp:
7901 case ARM::tInt_eh_sjlj_setjmp:
7902 case ARM::t2Int_eh_sjlj_setjmp:
7903 case ARM::t2Int_eh_sjlj_setjmp_nofp:
7904 EmitSjLjDispatchBlock(MI, BB);
7932 bool isThumb2 = Subtarget->
isThumb2();
7954 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7955 .addReg(ABSSrcReg).
addImm(0));
7959 TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
7966 TII->
get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
7974 .addReg(NewRsbDstReg).
addMBB(RSBBB)
7983 case ARM::COPY_STRUCT_BYVAL_I32:
7985 return EmitStructByval(MI, BB);
7993 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'");
8010 MCID = &TII->get(NewOpc);
8013 "converted opcode should be the same except for cc_out");
8025 assert(!NewOpc &&
"Optional cc_out operand required");
8031 bool deadCPSR =
false;
8044 assert(!NewOpc &&
"Optional cc_out operand required");
8047 assert(deadCPSR == !Node->
hasAnyUseOfValue(1) &&
"inconsistent dead flag");
8050 "expect uninitialized optional cc_out operand");
8090 default:
return false;
8159 bool AllOnes =
false) {
8166 NonConstantVal, DAG))
8172 OtherOp, NonConstantVal);
8178 CCOp, TrueVal, FalseVal);
8229 unsigned nextIndex = 0;
8252 || C1->getZExtValue() != nextIndex+1)
8285 widenType, &Ops[0], Ops.
size());
8324 if (AddcOp0.getNode() == AddcOp1.
getNode())
8329 "Expect ADDC with two result values. First: i32");
8344 if (AddeNode == NULL)
8353 "ADDE node has the wrong inputs");
8360 if (AddeOp0.getNode() == AddeOp1.
getNode())
8364 bool IsLeftOperandMUL =
false;
8369 IsLeftOperandMUL =
true;
8383 if (IsLeftOperandMUL)
8389 if (AddcOp0->getOpcode() == Opc) {
8401 if (LoMul->getNode() != HiMul->getNode())
8416 &Ops[0], Ops.
size());
8456 if (Result.
getNode())
return Result;
8488 if (Result.
getNode())
return Result;
8535 return DAG.
getNode(Opcode, DL, VT,
8562 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
8564 ShiftAmt = ShiftAmt & (32 - 1);
8569 MulAmt >>= ShiftAmt;
8591 uint64_t MulAmtAbs = -MulAmt;
8637 APInt SplatBits, SplatUndef;
8638 unsigned SplatBitSize;
8641 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
8642 if (SplatBitSize <= 64) {
8680 APInt SplatBits, SplatUndef;
8681 unsigned SplatBitSize;
8683 if (BVN && Subtarget->
hasNEON() &&
8684 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
8685 if (SplatBitSize <= 64) {
8719 unsigned SplatBitSize;
8722 APInt SplatBits0, SplatBits1;
8726 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
8727 HasAnyUndefs) && !HasAnyUndefs) {
8728 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
8729 HasAnyUndefs) && !HasAnyUndefs) {
8734 SplatBits0 == ~SplatBits1) {
8786 if ((Val & ~Mask) != Val)
8814 (Mask == 0xffff || Mask == 0xffff0000))
8830 (Mask2 == 0xffff || Mask2 == 0xffff0000))
8850 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
8893 unsigned InvMask = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
8896 unsigned Mask = (1 << Width)-1;
8898 if ((Mask & (~Mask2)) == 0)
8920 !cast<LoadSDNode>(InNode)->isVolatile()) {
8933 DAG.getConstant(4, MVT::i32));
8934 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.
getValue(1), OffsetPtr,
8985 assert(StVT != VT &&
"Cannot truncate to the same type");
8994 if (0 != (NumElems * FromEltSz) % ToEltSz)
return SDValue();
8996 unsigned SizeRatio = FromEltSz / ToEltSz;
8997 assert(SizeRatio * NumElems * ToEltSz == VT.
getSizeInBits());
9001 NumElems*SizeRatio);
9007 for (
unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio;
9042 for (
unsigned I = 0;
I < E;
I++) {
9044 StoreType, ShuffWide,
9112 for (
unsigned i = 0; i < NumElts; ++i) {
9143 for (
unsigned i = 0; i < NumElts; ++i) {
9177 assert(EltVT ==
MVT::f32 &&
"Unexpected type!");
9190 unsigned NumOfBitCastedElts = 0;
9192 unsigned NumOfRelevantElts = NumElts;
9193 for (
unsigned Idx = 0; Idx < NumElts; ++Idx) {
9198 ++NumOfBitCastedElts;
9202 --NumOfRelevantElts;
9206 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
9214 if (!TLI.isTypeLegal(VecVT))
9224 for (
unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
9310 unsigned HalfElts = NumElts/2;
9312 for (
unsigned n = 0; n < NumElts; ++n) {
9315 if (MaskElt < (
int)HalfElts)
9317 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
9318 NewElt = HalfElts + MaskElt - NumElts;
9335 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
9343 UI.getUse().getResNo() != Addr.
getResNo())
9353 bool isLaneOp =
false;
9354 unsigned NewOpc = 0;
9355 unsigned NumVecs = 0;
9357 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
9369 NumVecs = 2; isLaneOp =
true;
break;
9371 NumVecs = 3; isLaneOp =
true;
break;
9373 NumVecs = 4; isLaneOp =
true;
break;
9375 NumVecs = 1; isLoad =
false;
break;
9377 NumVecs = 2; isLoad =
false;
break;
9379 NumVecs = 3; isLoad =
false;
break;
9381 NumVecs = 4; isLoad =
false;
break;
9383 NumVecs = 2; isLoad =
false; isLaneOp =
true;
break;
9385 NumVecs = 3; isLoad =
false; isLaneOp =
true;
break;
9387 NumVecs = 4; isLoad =
false; isLaneOp =
true;
break;
9412 uint64_t IncVal = CInc->getZExtValue();
9413 if (IncVal != NumBytes)
9415 }
else if (NumBytes >= 3 * 16) {
9423 unsigned NumResultVecs = (isLoad ? NumVecs : 0);
9425 for (n = 0; n < NumResultVecs; ++n)
9444 std::vector<SDValue> NewResults;
9445 for (
unsigned i = 0; i < NumResultVecs; ++i) {
9446 NewResults.push_back(
SDValue(UpdN.getNode(), i));
9448 NewResults.push_back(
SDValue(UpdN.getNode(), NumResultVecs+1));
9472 unsigned NumVecs = 0;
9473 unsigned NewOpc = 0;
9474 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
9490 unsigned VLDLaneNo =
9491 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
9495 if (UI.getUse().getResNo() == NumVecs)
9499 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
9506 for (n = 0; n < NumVecs; ++n)
9519 unsigned ResNo = UI.getUse().
getResNo();
9521 if (ResNo == NumVecs)
9529 std::vector<SDValue> VLDDupResults;
9530 for (
unsigned n = 0; n < NumVecs; ++n)
9531 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
9532 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
9559 unsigned Imm = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
9588 c0 = (
I == 0) ? cN : c0;
9707 APInt SplatBits, SplatUndef;
9708 unsigned SplatBitSize;
9710 if (! BVN || ! BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
9711 HasAnyUndefs, ElementBits) ||
9712 SplatBitSize > ElementBits)
9714 Cnt = SplatBits.getSExtValue();
9723 assert(VT.
isVector() &&
"vector shift count is not a vector type");
9727 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
9738 assert(VT.
isVector() &&
"vector shift count is not a vector type");
9744 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
9749 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
9779 unsigned VShiftOpc = 0;
9884 unsigned VShiftOpc = 0;
9934 assert(ST->
hasNEON() &&
"unexpected vector shift");
9975 if (VT == MVT::i32 &&
9978 isa<ConstantSDNode>(Lane)) {
10020 unsigned Opcode = 0;
10023 IsReversed =
false;
10025 IsReversed =
true ;
10119 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
10122 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
10130 APInt KnownZero, KnownOne;
10133 if (KnownZero == 0xfffffffe)
10136 else if (KnownZero == 0xffffff00)
10139 else if (KnownZero == 0xffff0000)
10186 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
10225 if (AllowsUnaligned) {
10248 unsigned AlignCheck) {
10249 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
10250 (DstAlign == 0 || DstAlign % AlignCheck == 0));
10254 unsigned DstAlign,
unsigned SrcAlign,
10255 bool IsMemset,
bool ZeroMemset,
10261 if ((!IsMemset || ZeroMemset) &&
10270 }
else if (Size >= 8 &&
10280 else if (Size >= 2)
10327 unsigned Scale = 1;
10329 default:
return false;
10344 if ((V & (Scale - 1)) != 0)
10347 return V == (V & ((1LL << 5) - 1));
10352 bool isNeg =
false;
10359 default:
return false;
10366 return V == (V & ((1LL << 8) - 1));
10367 return V == (V & ((1LL << 12) - 1));
10376 return V == (V & ((1LL << 8) - 1));
10400 default:
return false;
10405 return V == (V & ((1LL << 12) - 1));
10408 return V == (V & ((1LL << 8) - 1));
10416 return V == (V & ((1LL << 8) - 1));
10422 int Scale = AM.
Scale;
10427 default:
return false;
10435 Scale = Scale & ~1;
10436 return Scale == 2 || Scale == 4 || Scale == 8;
10448 if (Scale & 1)
return false;
10465 switch (AM.
Scale) {
10483 int Scale = AM.
Scale;
10485 default:
return false;
10489 if (Scale < 0) Scale = -Scale;
10507 if (Scale & 1)
return false;
10525 return Imm >= 0 && Imm <= 255;
10540 return AbsImm >= 0 && AbsImm <= 255;
10545 SDValue &Offset,
bool &isInc,
10554 int RHSC = (int)RHS->getZExtValue();
10555 if (RHSC < 0 && RHSC > -256) {
10568 int RHSC = (int)RHS->getZExtValue();
10569 if (RHSC < 0 && RHSC > -0x1000) {
10604 SDValue &Offset,
bool &isInc,
10611 int RHSC = (int)RHS->getZExtValue();
10612 if (RHSC < 0 && RHSC > -0x100) {
10617 }
else if (RHSC > 0 && RHSC < 0x100) {
10642 Ptr =
LD->getBasePtr();
10643 VT =
LD->getMemoryVT();
10645 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
10646 Ptr = ST->getBasePtr();
10647 VT = ST->getMemoryVT();
10652 bool isLegal =
false;
10655 Offset, isInc, DAG);
10658 Offset, isInc, DAG);
10681 VT =
LD->getMemoryVT();
10682 Ptr =
LD->getBasePtr();
10684 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
10685 VT = ST->getMemoryVT();
10686 Ptr = ST->getBasePtr();
10691 bool isLegal =
false;
10721 unsigned Depth)
const {
10723 KnownZero = KnownOne =
APInt(BitWidth, 0);
10738 if (KnownZero == 0 && KnownOne == 0)
return;
10740 APInt KnownZeroRHS, KnownOneRHS;
10742 KnownZero &= KnownZeroRHS;
10743 KnownOne &= KnownOneRHS;
10763 switch (AsmPieces.
size()) {
10764 default:
return false;
10766 AsmStr = AsmPieces[0];
10771 if (AsmPieces.
size() == 3 &&
10772 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
10788 if (Constraint.size() == 1) {
10789 switch (Constraint[0]) {
10801 }
else if (Constraint.size() == 2) {
10802 switch (Constraint[0]) {
10821 if (CallOperandVal == NULL)
10825 switch (*constraint) {
10845 typedef std::pair<unsigned, const TargetRegisterClass*>
RCPair;
10849 if (Constraint.size() == 1) {
10851 switch (Constraint[0]) {
10854 return RCPair(0U, &ARM::tGPRRegClass);
10855 return RCPair(0U, &ARM::GPRRegClass);
10858 return RCPair(0U, &ARM::hGPRRegClass);
10861 return RCPair(0U, &ARM::GPRRegClass);
10866 return RCPair(0U, &ARM::SPRRegClass);
10868 return RCPair(0U, &ARM::DPRRegClass);
10870 return RCPair(0U, &ARM::QPRRegClass);
10876 return RCPair(0U, &ARM::SPR_8RegClass);
10878 return RCPair(0U, &ARM::DPR_8RegClass);
10880 return RCPair(0U, &ARM::QPR_8RegClass);
10884 return RCPair(0U, &ARM::SPRRegClass);
10888 if (
StringRef(
"{cc}").equals_lower(Constraint))
10889 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
10897 std::string &Constraint,
10898 std::vector<SDValue>&Ops,
10903 if (Constraint.length() != 1)
return;
10905 char ConstraintLetter = Constraint[0];
10906 switch (ConstraintLetter) {
10909 case 'I':
case 'J':
case 'K':
case 'L':
10910 case 'M':
case 'N':
case 'O':
10916 int CVal = (int) CVal64;
10919 if (CVal != CVal64)
10922 switch (ConstraintLetter) {
10927 if (CVal >= 0 && CVal <= 65535)
10934 if (CVal >= 0 && CVal <= 255)
10936 }
else if (Subtarget->
isThumb2()) {
10955 if (CVal >= -255 && CVal <= -1)
10961 if (CVal >= -4095 && CVal <= 4095)
10974 }
else if (Subtarget->
isThumb2()) {
10997 if (CVal >= -7 && CVal < 7)
10999 }
else if (Subtarget->
isThumb2()) {
11022 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
11028 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
11036 if (CVal >= 0 && CVal <= 31)
11045 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
11055 Ops.push_back(Result);
11062 assert(Subtarget->
isTargetAEABI() &&
"Register-based DivRem lowering only");
11065 "Invalid opcode for Div/Rem lowering");
11088 Entry.
isSExt = isSigned;
11089 Entry.
isZExt = !isSigned;
11090 Args.push_back(Entry);
11103 Callee, Args, DAG, dl);
11104 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
11106 return CallInfo.first;
11116 if (v == 0xffffffff)
11123 v = (v >> TO) << TO;
11124 v = (v << LO) >>
LO;
11146 unsigned Intrinsic)
const {
11147 switch (Intrinsic) {
11162 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
11177 unsigned NumElts = 0;
11188 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
OSType getOS() const
getOS - Get the parsed operating system type of this triple.
int getFunctionContextIndex() const
void setFrameAddressIsTaken(bool T)
unsigned getStackAlignment() const
const Value * getCalledValue() const
static MVT getIntegerVT(unsigned BitWidth)
void push_back(const T &Elt)
The memory access reads data.
const MachineFunction * getParent() const
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
The machine constant pool.
SDValue getValue(unsigned R) const
bool hasPostISelHook(QueryType Type=IgnoreBundle) const
instr_iterator erase(instr_iterator I)
The memory access writes data.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
LLVMContext & getContext() const
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
LLVM Argument representation.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(MVT VT) const
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
uint64_t getZExtValue() const
Get zero extended value.
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG)
bool isKnownNeverNaN(SDValue Op) const
isKnownNeverNan - Test whether the given SDValue is known to never be NaN.
Reloc::Model getRelocationModel() const
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
LocInfo getLocInfo() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
MachineBasicBlock * getMBB() const
static MachinePointerInfo getJumpTable()
void setIsLandingPad(bool V=true)
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
virtual void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const
const TargetMachine & getTargetMachine() const
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Y = RRC X, rotate right via carry.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
bool isCalledByLegalizer() const
static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG)
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
int getSplatIndex() const
const TargetMachine & getTarget() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
unsigned getPrefTypeAlignment(Type *Ty) const
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
virtual ConstraintType getConstraintType(const std::string &Constraint) const
Given a constraint, return the type of constraint it is for this target.
static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, bool isThumb2, unsigned &LdrOpc, unsigned &StrOpc)
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
static bool isVirtualRegister(unsigned Reg)
static bool hasNormalLoadOperand(SDNode *N)
bool hasT2ExtractPack() const
unsigned InferPtrAlignment(SDValue Ptr) const
const GlobalValue * getGlobal() const
const std::string & getAsmString() const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(MVT VT) const
unsigned getOpcode() const
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
ARMTargetLowering(TargetMachine &TM)
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
Type * getTypeForEVT(LLVMContext &Context) const
unsigned getSizeInBits() const
iterator insert(iterator I, const T &Elt)
unsigned getByValSize() const
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
unsigned getInRegsParamsCount() const
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
const MCInstrDesc & getDesc() const
unsigned getNumOperands() const
void setBooleanVectorContents(BooleanContent Ty)
const std::string & getConstraintString() const
unsigned getNumOperands() const
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
const SDValue & getOperand(unsigned Num) const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
const Function * getFunction() const
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static MachinePointerInfo getConstantPool()
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
void ComputeMaskedBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
bool isThumb1Only() const
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned getValNo() const
const SDValue & getBasePtr() const
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned createPICLabelUId()
ConstraintType getConstraintType(const std::string &Constraint) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
static cl::opt< bool > EnableARMTailCalls("arm-tail-calls", cl::Hidden, cl::desc("Generate tail calls (TEMPORARY OPTION)."), cl::init(false))
bool isAllOnesValue() const
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
EVT getValueType(Type *Ty, bool AllowUnknown=false) const
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
bool isKnownNeverZero(SDValue Op) const
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool isVector() const
isVector - Return true if this is a vector value type.
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, SDLoc DL, SelectionDAG &DAG)
static SDValue PerformVCVTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool isTargetAEABI() const
bool isSEXTLoad(const SDNode *N)
static error_code advance(T &it, size_t Val)
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
bool isLittleEndian() const
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
ParmContext getCallOrPrologue() const
const HexagonInstrInfo * TII
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool definesCPSR(MachineInstr *MI)
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG)
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
bool isTargetDarwin() const
bool isNormalStore(const SDNode *N)
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const
opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const
#define llvm_unreachable(msg)
bool isBuildVectorAllZeros(const SDNode *N)
EVT getValueType(unsigned ResNo) const
static SDValue findMUL_LOHI(SDValue V)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
MachineFunction & getMachineFunction() const
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getNumArgOperands() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
static SDValue PerformADDCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
std::vector< MachineBasicBlock * >::iterator succ_iterator
bool isZEXTLoad(const SDNode *N)
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
getSetCCResultType - Return the value type to use for ISD::SETCC.
EVT getScalarType() const
Abstract Stack Frame Information.
bool isFixedObjectIndex(int ObjectIdx) const
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
bool hasMPExtension() const
int getMaskElt(unsigned Idx) const
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
SDVTList getVTList(EVT VT)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
virtual MVT getPointerTy(uint32_t=0) const
enable_if_c< std::numeric_limits< T >::is_integer &&!std::numeric_limits< T >::is_signed, std::size_t >::type countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
The memory access is volatile.
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
bool isFPBrccSlow() const
ID
LLVM Calling Convention Representation.
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
virtual const InstrItineraryData * getInstrItineraryData() const
const MachineInstrBuilder & addImm(int64_t Val) const
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
virtual FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const
unsigned getNumOperands() const
SmallVector< ISD::InputArg, 32 > Ins
EVT getVectorElementType() const
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering Ordering, SynchronizationScope SynchScope)
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
void RemoveOperand(unsigned i)
unsigned getLocReg() const
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG)
const Triple & getTargetTriple() const
STATISTIC(NumTailCalls,"Number of tail calls")
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
bool count(PtrType Ptr) const
count - Return true if the specified pointer is in the set.
static unsigned createNEONModImm(unsigned OpCmode, unsigned Val)
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned getNumValues() const
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
SDValue getRegisterMask(const uint32_t *RegMask)
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
bool hasCallSiteLandingPad(MCSymbol *Sym)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
enable_if_c< std::numeric_limits< T >::is_integer &&!std::numeric_limits< T >::is_signed, std::size_t >::type countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
SDValue CombineTo(SDNode *N, const std::vector< SDValue > &To, bool AddTo=true)
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
static int getT2SOImmVal(unsigned Arg)
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
SmallVector< ISD::OutputArg, 32 > Outs
static void ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
bool isFloatingPointTy() const
bool hasAnyDataBarrier() const
const SDValue & getBasePtr() const
MachineConstantPoolValue * getMachineCPVal() const
SDValue getUNDEF(EVT VT)
getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
const APInt & getAPIntValue() const
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
static int getFP32Imm(const APInt &Imm)
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, EVT &VT, bool is128Bits, NEONModImmType type)
bool isThumb1OnlyFunction() const
unsigned getStoredByValParamsPadding() const
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
Type * getElementType() const
size_t size() const
size - Get the array size.
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
reverse_iterator rbegin()
const BasicBlock * getBasicBlock() const
UNDEF - An undefined node.
static bool isThumbImmShiftedVal(unsigned V)
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
static bool isWeakForLinker(LinkageTypes Linkage)
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
unsigned getDefRegState(bool B)
SDNode * getNode() const
get the SDNode which holds the desired result
unsigned createJumpTableUId()
bundle_iterator< MachineInstr, instr_iterator > iterator
bool isiOS() const
Is this an iOS triple.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
unsigned getStoreSize() const
bool isTypeLegal(EVT VT) const
const uint32_t * getCallPreservedMask(CallingConv::ID) const
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=0)
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
bool isMachineConstantPoolEntry() const
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
LLVM Basic Block Representation.
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
const SDValue & getOperand(unsigned i) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
bool isNonTemporal() const
bool supportsTailCall() const
bool isOptionalDef() const
LLVM Constant Representation.
bool hasHiddenVisibility() const
const Constant * getConstVal() const
virtual bool ExpandInlineAsm(CallInst *CI) const
const MachineOperand & getOperand(unsigned i) const
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
static bool isReverseMask(ArrayRef< int > M, EVT VT)
virtual void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
void AddToWorklist(SDNode *N)
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
virtual bool isZExtFree(SDValue Val, EVT VT2) const
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl)
static Type * getVoidTy(LLVMContext &C)
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
ItTy next(ItTy it, Dist n)
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
const DataLayout * getDataLayout() const
bool isBeforeLegalize() const
static EVT getFloatingPointVT(unsigned BitWidth)
static TargetLoweringObjectFile * createTLOF(TargetMachine &TM)
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned getOpcode() const
TRAP - Trapping instruction.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG)
bool hasVMLxForwarding() const
Integer representation type.
CondCode getSetCCSwappedOperands(CondCode Operation)
static MVT getVT(Type *Ty, bool HandleUnknown=false)
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
bool useNEONForSinglePrecisionFP() const
use_iterator use_begin() const
MachineConstantPool * getConstantPool()
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
succ_iterator succ_begin()
void removeSuccessor(MachineBasicBlock *succ)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
virtual const TargetFrameLowering * getFrameLowering() const
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
bool isIntN(unsigned N, int64_t x)
std::vector< ArgListEntry > ArgListTy
const APFloat & getValueAPF() const
unsigned getNextStackOffset() const
unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const
bool isEqualTo(SDValue A, SDValue B) const
PointerType * getPointerTo(unsigned AddrSpace=0)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
const ARMSubtarget * getSubtarget() const
std::pair< unsigned, const TargetRegisterClass * > RCPair
static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG)
const MCInstrDesc & get(unsigned Opcode) const
bool isEXTLoad(const SDNode *N)
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG)
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
unsigned getByValAlign() const
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
virtual unsigned getJumpTableEncoding() const
void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action)
ArrayRef< int > getMask() const
SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static AddrOpc getAM2Op(unsigned AM2Opc)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
virtual const TargetInstrInfo * getInstrInfo() const
unsigned getABITypeAlignment(Type *Ty) const
static const MachineInstrBuilder & AddDefaultCC(const MachineInstrBuilder &MIB)
static int getFP64Imm(const APInt &Imm)
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT)
getNOT - Create a bitwise NOT operation as (XOR Val, -1).
virtual unsigned getMaximalGlobalOffset() const
void setDesc(const MCInstrDesc &tid)
const STC & getSubtarget() const
unsigned CountTrailingOnes_32(uint32_t Value)
void RemoveFromWorklist(SDNode *N)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const
GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
uint64_t getTypeAllocSize(Type *Ty) const
void setExceptionPointerRegister(unsigned R)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static unsigned getAM2Offset(unsigned AM2Opc)
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
bool isPredecessorOf(const SDNode *N) const
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
Instr is a return instruction.
CCValAssign - Represent assignment of one arg/retval to a location.
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
const uint32_t * getNoPreservedMask() const
SDNode * getGluedUser() const
SDValue getIntPtrConstant(uint64_t Val, bool isTarget=false)
const SDValue & getChain() const
Byte Swap and Counting operators.
MachineMemOperand * getMemOperand() const
virtual bool isLegalICmpImmediate(int64_t Imm) const
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
unsigned getInRegsParamsProceed() const
MachineFrameInfo * getFrameInfo()
static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos, const TargetInstrInfo *TII, DebugLoc dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
const MachineInstrBuilder & addFrameIndex(int Idx) const
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG)
unsigned Log2_32(uint32_t Value)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, SDLoc dl)
const MDNode * getTBAAInfo() const
Returns the TBAAInfo that describes the dereference.
Value * getArgOperand(unsigned i) const
const uint16_t * getCalleeSavedRegs(const MachineFunction *MF=0) const
Code Generation virtual methods...
ISD::LoadExtType getExtensionType() const
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Class for arbitrary precision integers.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true)
static ARMConstantPoolSymbol * Create(LLVMContext &C, const char *s, unsigned ID, unsigned char PCAdj)
void computeRegisterProperties()
void setExceptionSelectorRegister(unsigned R)
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
bool isPowerOf2_64(uint64_t Value)
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
static int getSOImmVal(unsigned Arg)
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos, const TargetInstrInfo *TII, DebugLoc dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
static MachinePointerInfo getGOT()
uint64_t MinAlign(uint64_t A, uint64_t B)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, bool MayNeedSP=false, const AllocaInst *Alloca=0)
static CondCodes getOppositeCondition(CondCodes CC)
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
static const unsigned PerfectShuffleTable[6561+1]
void setVarArgsFrameIndex(int Index)
SmallVector< SDValue, 32 > OutVals
unsigned getSchedClass() const
Return the scheduling class for this instruction. The scheduling class is an index into the InstrItin...
bool isLandingPad() const
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDLoc dl)
static bool LowerToByteSwap(CallInst *CI)
Bitwise operators - logical and, logical or, logical xor.
bool hasAnyUseOfValue(unsigned Value) const
void setStoredByValParamsPadding(unsigned p)
pointer data()
data - Return a pointer to the vector's buffer, even if empty().
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
MachineRegisterInfo & getRegInfo()
static IntegerType * getInt32Ty(LLVMContext &C)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
bool isDeclaration() const
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
void setReg(unsigned Reg)
static SDValue PerformVDIVCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned char TargetFlags=0) const
void setArgRegsSaveSize(unsigned s)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
virtual bool isLegalAddImmediate(int64_t Imm) const
unsigned MaxStoresPerMemmoveOptSize
unsigned MaxStoresPerMemcpyOptSize
void setStackPointerRegisterToSaveRestore(unsigned R)
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool allowsUnalignedMem() const
const TargetMachine & getTarget() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC)
virtual const TargetRegisterInfo * getRegisterInfo() const
bool isNON_EXTLoad(const SDNode *N)
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
unsigned getPrimitiveSizeInBits() const
FSINCOS - Compute both fsin and fcos as a single operation.
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
MachineInstr * getVRegDef(unsigned Reg) const
bool hasLocalLinkage() const
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
unsigned getReg() const
getReg - Returns the register number.
StringRef getValueAsString() const
Return the attribute's value as a string. This requires the attribute to be a string attribute...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
void setReturnAddressIsTaken(bool s)
unsigned getAlignment() const
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
cl::opt< bool > EnableARMLongCalls("arm-long-calls", cl::Hidden, cl::desc("Generate calls via indirect call instructions"), cl::init(false))
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
const uint32_t * getThisReturnPreservedMask(CallingConv::ID) const
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
std::reverse_iterator< iterator > reverse_iterator
LLVM Value Representation.
int getVarArgsFrameIndex() const
SDValue getRegister(unsigned Reg, EVT VT)
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
void setInsertFencesForAtomic(bool fence)
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
bool isTruncatingStore() const
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction. Note that variadic (isVari...
SDValue getValueType(EVT)
Disable implicit floating point insts.
const MCOperandInfo * OpInfo
bool isUInt< 16 >(uint64_t x)
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
BasicBlockListType::iterator iterator
const TargetLowering & getTargetLoweringInfo() const
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
bool hasDataBarrier() const
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
bool isPowerOf2_32(uint32_t Value)
bool hasDivideInARMMode() const
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG)
MachineModuleInfo & getMMI() const
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
const MCRegisterInfo & MRI
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
SDValue getTargetConstant(uint64_t Val, EVT VT)
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
unsigned getLocMemOffset() const
MVT getVectorElementType() const
unsigned getArgRegsSaveSize(unsigned Align=0) const
SDValue getEntryNode() const
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable)
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
TRUNCATE - Completely drop the high bits.
bool isUIntN(unsigned N, uint64_t x)
unsigned getAlignment() const
bool isBitFieldInvertedMask(unsigned v)
unsigned AllocateReg(unsigned Reg)
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
MVT getSimpleValueType(unsigned ResNo) const
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
bool hasThumb2DSP() const
static const uint16_t GPRArgRegs[]
static RegisterPass< NVPTXAllocaHoisting > X("alloca-hoisting","Hoisting alloca instructions in non-entry ""blocks to the entry block")
unsigned Log2_64(uint64_t Value)
int64_t getObjectSize(int ObjectIdx) const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static bool isSplatMask(const int *Mask, EVT VT)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
EVT changeVectorElementTypeToInteger() const
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
bool PredictableSelectIsExpensive
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
DebugLoc getDebugLoc() const
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
bool isMachineOpcode() const
unsigned getMachineOpcode() const
unsigned CountLeadingOnes_32(uint32_t Value)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
FloatABI::ABIType FloatABIType
uint64_t getZExtValue() const
static uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits)
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
unsigned getVectorNumElements() const
Function must be optimized for size first.