71 bool isPPC64 = Subtarget->
isPPC64();
584 unsigned MaxMaxAlign) {
585 if (MaxAlign == MaxMaxAlign)
587 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
588 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
590 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
592 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
593 unsigned EltAlign = 0;
595 if (EltAlign > MaxAlign)
597 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
598 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
599 unsigned EltAlign = 0;
601 if (EltAlign > MaxAlign)
603 if (MaxAlign == MaxMaxAlign)
702 return CFP->getValueAPF().isZero();
706 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
707 return CFP->getValueAPF().isZero();
715 return Op < 0 || Op == Val;
722 for (
unsigned i = 0; i != 16; ++i)
726 for (
unsigned i = 0; i != 8; ++i)
738 for (
unsigned i = 0; i != 16; i += 2)
743 for (
unsigned i = 0; i != 8; i += 2)
756 unsigned LHSStart,
unsigned RHSStart) {
758 "PPC only supports shuffles by bytes!");
759 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
760 "Unsupported merge size!");
762 for (
unsigned i = 0; i != 8/UnitSize; ++i)
763 for (
unsigned j = 0; j != UnitSize; ++j) {
765 LHSStart+j+i*UnitSize) ||
767 RHSStart+j+i*UnitSize))
778 return isVMerge(N, UnitSize, 8, 24);
787 return isVMerge(N, UnitSize, 0, 16);
796 "PPC only supports shuffles by bytes!");
802 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
805 if (i == 16)
return -1;
810 if (ShiftAmt < i)
return -1;
815 for (++i; i != 16; ++i)
820 for (++i; i != 16; ++i)
832 (EltSize == 1 || EltSize == 2 || EltSize == 4));
839 if (ElementBase >= 16)
844 for (
unsigned i = 1; i != EltSize; ++i)
848 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
850 for (
unsigned j = 0; j != EltSize; ++j)
862 APInt APVal, APUndef;
866 if (BV->
isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32,
true))
868 return CFP->getValueAPF().isNegZero();
893 if (EltSize < ByteSize) {
894 unsigned Multiple = ByteSize/EltSize;
896 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
905 if (UniquedVals[i&(Multiple-1)].getNode() == 0)
906 UniquedVals[i&(Multiple-1)] = N->
getOperand(i);
907 else if (UniquedVals[i&(Multiple-1)] != N->
getOperand(i))
917 bool LeadingZero =
true;
918 bool LeadingOnes =
true;
919 for (
unsigned i = 0; i != Multiple-1; ++i) {
920 if (UniquedVals[i].getNode() == 0)
continue;
922 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
923 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
927 if (UniquedVals[Multiple-1].getNode() == 0)
929 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
934 if (UniquedVals[Multiple-1].getNode() == 0)
936 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
955 unsigned ValSizeInBytes = EltSize;
958 Value = CN->getZExtValue();
960 assert(CN->getValueType(0) ==
MVT::f32 &&
"Only one legal FP vector type!");
961 Value =
FloatToBits(CN->getValueAPF().convertToFloat());
967 if (ValSizeInBytes < ByteSize)
return SDValue();
972 while (ValSizeInBytes > ByteSize) {
973 ValSizeInBytes >>= 1;
976 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
977 (Value & ((1 << (8*ValSizeInBytes))-1)))
985 if (MaskVal == 0)
return SDValue();
988 if (SignExtend32<5>(MaskVal) == MaskVal)
1005 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
1007 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
1009 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
1039 APInt LHSKnownZero, LHSKnownOne;
1040 APInt RHSKnownZero, RHSKnownOne;
1042 LHSKnownZero, LHSKnownOne);
1046 RHSKnownZero, RHSKnownOne);
1049 if (~(LHSKnownZero | RHSKnownZero) == 0) {
1107 bool Aligned)
const {
1117 (!Aligned || (imm & 3) == 0)) {
1129 &&
"Cannot handle constant offsets yet!");
1141 (!Aligned || (imm & 3) == 0)) {
1145 APInt LHSKnownZero, LHSKnownOne;
1148 if ((LHSKnownZero.
getZExtValue()|~(uint64_t)imm) == ~0ULL) {
1165 CN->getValueType(0));
1170 if ((CN->getValueType(0) ==
MVT::i32 ||
1171 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
1172 (!Aligned || (CN->getZExtValue() & 3) == 0)) {
1173 int Addr = (int)CN->getZExtValue();
1235 Ptr =
LD->getBasePtr();
1236 VT =
LD->getMemoryVT();
1237 Alignment =
LD->getAlignment();
1239 Ptr =
ST->getBasePtr();
1240 VT =
ST->getMemoryVT();
1241 Alignment =
ST->getAlignment();
1258 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
1261 SDValue Val = cast<StoreSDNode>(
N)->getValue();
1291 isa<ConstantSDNode>(Offset))
1325 if (GV->hasHiddenVisibility()) {
1367 unsigned MOHiFlag, MOLoFlag;
1388 unsigned MOHiFlag, MOLoFlag;
1399 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1401 unsigned MOHiFlag, MOLoFlag;
1419 bool is64bit = PPCSubTarget.
isPPC64();
1443 PtrVT, GOTReg, TGA);
1445 PtrVT, TGA, TPOffsetHi);
1461 Chain = DAG.
getCopyToReg(Chain, dl, PPC::X3, GOTEntry);
1464 PtrVT, ParmReg, TGA);
1484 Chain = DAG.
getCopyToReg(Chain, dl, PPC::X3, GOTEntry);
1487 PtrVT, ParmReg, TGA);
1493 Chain, ParmReg, TGA);
1515 unsigned MOHiFlag, MOLoFlag;
1529 false,
false,
false, 0);
1544 if (VT.
bitsLT(MVT::i32)) {
1557 if (C->isAllOnesValue() || C->isNullValue())
1583 const Value *SV = cast<SrcValueSDNode>(Node->
getOperand(2))->getValue();
1586 assert(!Subtarget.
isPPC64() &&
"LowerVAARG is PPC32 only");
1624 SDValue OverflowArea = DAG.
getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
1627 InChain = OverflowArea.
getValue(1);
1629 SDValue RegSaveArea = DAG.
getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
1678 MVT::i32,
false,
false, 0);
1681 false,
false,
false, 0);
1686 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
1710 bool isPPC64 = (PtrVT ==
MVT::i64);
1718 Entry.
Ty = IntPtrTy;
1719 Entry.
Node = Trmp; Args.push_back(Entry);
1724 Args.push_back(Entry);
1726 Entry.
Node = FPtr; Args.push_back(Entry);
1727 Entry.
Node = Nest; Args.push_back(Entry);
1732 false,
false,
false,
false, 0,
1739 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
1741 return CallResult.second;
1804 uint64_t FPROffset = 1;
1814 uint64_t nextOffset = FPROffset;
1823 nextOffset += StackOffset;
1824 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
1828 DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
1831 nextOffset += FrameOffset;
1832 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
1835 return DAG.
getStore(thirdStore, dl, FR, nextPtr,
1841 #include "PPCGenCallingConv.inc"
1845 CCAssignFn *PPCTargetLowering::useFastISelCCs(
unsigned Flag)
const {
1846 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
1861 static const uint16_t ArgRegs[] = {
1863 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1873 if (RegNum != NumArgRegs && RegNum % 2 == 1) {
1888 static const uint16_t ArgRegs[] = {
1889 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1899 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
1913 static const uint16_t FPR[] = {
1914 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1915 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1924 unsigned PtrByteSize) {
1928 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1934 PPCTargetLowering::LowerFormalArguments(
SDValue Chain,
1943 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
1946 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
1949 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
1955 PPCTargetLowering::LowerFormalArguments_32SVR4(
2000 unsigned PtrByteSize = 4;
2010 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
2012 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2024 RC = &PPC::GPRCRegClass;
2027 RC = &PPC::F4RCRegClass;
2030 RC = &PPC::F8RCRegClass;
2036 RC = &PPC::VRRCRegClass;
2057 false,
false,
false, 0));
2069 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
2071 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
2074 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
2083 std::max(MinReservedArea,
2087 getStackAlignment();
2088 unsigned AlignMask = TargetAlign-1;
2089 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
2098 static const uint16_t GPArgRegs[] = {
2100 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2104 static const uint16_t FPArgRegs[] = {
2105 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
2116 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
2121 CCInfo.getNextStackOffset(),
true));
2129 for (
unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
2133 VReg = MF.
addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
2148 for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
2152 VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
2165 if (!MemOps.
empty())
2194 unsigned nAltivecParamsAtEnd,
2195 unsigned MinReservedArea,
2196 bool isPPC64)
const {
2199 if (nAltivecParamsAtEnd) {
2200 MinReservedArea = ((MinReservedArea+15)/16)*16;
2201 MinReservedArea += 16*nAltivecParamsAtEnd;
2204 std::max(MinReservedArea,
2206 unsigned TargetAlign
2208 getStackAlignment();
2209 unsigned AlignMask = TargetAlign-1;
2210 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
2215 PPCTargetLowering::LowerFormalArguments_64SVR4(
2232 unsigned PtrByteSize = 8;
2236 unsigned MinReservedArea = ArgOffset;
2238 static const uint16_t GPR[] = {
2239 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
2240 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
2243 static const uint16_t *FPR =
GetFPR();
2245 static const uint16_t VR[] = {
2247 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
2251 const unsigned Num_FPR_Regs = 13;
2254 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
2261 unsigned nAltivecParamsAtEnd = 0;
2263 unsigned CurArgIdx = 0;
2264 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
2266 bool needsLoad =
false;
2267 EVT ObjectVT = Ins[ArgNo].VT;
2269 unsigned ArgSize = ObjSize;
2271 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
2272 CurArgIdx = Ins[ArgNo].OrigArgIndex;
2274 unsigned CurArgOffset = ArgOffset;
2280 MinReservedArea = ((MinReservedArea+15)/16)*16;
2285 nAltivecParamsAtEnd++;
2297 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2314 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
2315 CurArgOffset = ArgOffset;
2319 if (ObjSize < PtrByteSize)
2320 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize);
2327 if (GPR_idx != Num_GPR_Regs) {
2328 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2332 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
2333 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
2337 ObjType,
false,
false, 0);
2357 ArgOffset += PtrByteSize;
2361 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
2365 if (GPR_idx != Num_GPR_Regs) {
2367 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2376 ArgOffset += PtrByteSize;
2378 ArgOffset += ArgSize - j;
2389 if (GPR_idx != Num_GPR_Regs) {
2390 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2393 if (ObjectVT == MVT::i32)
2396 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
2401 ArgSize = PtrByteSize;
2410 if (GPR_idx != Num_GPR_Regs) {
2413 if (FPR_idx != Num_FPR_Regs) {
2417 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
2419 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
2425 ArgSize = PtrByteSize;
2436 if (VR_idx != Num_VR_Regs) {
2437 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
2440 while ((ArgOffset % 16) != 0) {
2441 ArgOffset += PtrByteSize;
2442 if (GPR_idx != Num_GPR_Regs)
2446 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
2451 ArgOffset = ((ArgOffset+15)/16)*16;
2452 CurArgOffset = ArgOffset;
2463 CurArgOffset + (ArgSize - ObjSize),
2467 false,
false,
false, 0);
2477 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea,
true);
2482 int Depth = ArgOffset;
2491 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
2492 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2503 if (!MemOps.
empty())
2511 PPCTargetLowering::LowerFormalArguments_Darwin(
2529 unsigned PtrByteSize = isPPC64 ? 8 : 4;
2533 unsigned MinReservedArea = ArgOffset;
2535 static const uint16_t GPR_32[] = {
2537 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2539 static const uint16_t GPR_64[] = {
2540 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
2541 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
2544 static const uint16_t *FPR =
GetFPR();
2546 static const uint16_t VR[] = {
2548 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
2552 const unsigned Num_FPR_Regs = 13;
2555 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
2557 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
2566 unsigned VecArgOffset = ArgOffset;
2567 if (!isVarArg && !isPPC64) {
2568 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e;
2570 EVT ObjectVT = Ins[ArgNo].VT;
2577 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2578 VecArgOffset += ArgSize;
2605 VecArgOffset = ((VecArgOffset+15)/16)*16;
2606 VecArgOffset += 12*16;
2613 unsigned nAltivecParamsAtEnd = 0;
2615 unsigned CurArgIdx = 0;
2616 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
2618 bool needsLoad =
false;
2619 EVT ObjectVT = Ins[ArgNo].VT;
2621 unsigned ArgSize = ObjSize;
2623 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
2624 CurArgIdx = Ins[ArgNo].OrigArgIndex;
2626 unsigned CurArgOffset = ArgOffset;
2631 if (isVarArg || isPPC64) {
2632 MinReservedArea = ((MinReservedArea+15)/16)*16;
2636 }
else nAltivecParamsAtEnd++;
2648 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2651 if (ObjSize==1 || ObjSize==2) {
2652 CurArgOffset = CurArgOffset + (4 - ObjSize);
2658 if (ObjSize==1 || ObjSize==2) {
2659 if (GPR_idx != Num_GPR_Regs) {
2662 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2664 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2670 ObjType,
false,
false, 0);
2675 ArgOffset += PtrByteSize;
2679 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
2683 if (GPR_idx != Num_GPR_Regs) {
2686 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2688 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2697 ArgOffset += PtrByteSize;
2699 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
2710 if (GPR_idx != Num_GPR_Regs) {
2711 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2716 ArgSize = PtrByteSize;
2719 ArgOffset += PtrByteSize;
2724 if (GPR_idx != Num_GPR_Regs) {
2725 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2728 if (ObjectVT == MVT::i32)
2731 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
2736 ArgSize = PtrByteSize;
2746 if (GPR_idx != Num_GPR_Regs) {
2748 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
2751 if (FPR_idx != Num_FPR_Regs) {
2755 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
2757 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
2766 ArgOffset += isPPC64 ? 8 : ObjSize;
2774 if (VR_idx != Num_VR_Regs) {
2775 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
2778 while ((ArgOffset % 16) != 0) {
2779 ArgOffset += PtrByteSize;
2780 if (GPR_idx != Num_GPR_Regs)
2784 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
2788 if (!isVarArg && !isPPC64) {
2790 CurArgOffset = VecArgOffset;
2794 ArgOffset = ((ArgOffset+15)/16)*16;
2795 CurArgOffset = ArgOffset;
2807 CurArgOffset + (ArgSize - ObjSize),
2811 false,
false,
false, 0);
2821 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64);
2826 int Depth = ArgOffset;
2836 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
2840 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2842 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2854 if (!MemOps.
empty())
2871 unsigned &nAltivecParamsAtEnd) {
2876 unsigned NumOps = Outs.
size();
2877 unsigned PtrByteSize = isPPC64 ? 8 : 4;
2885 nAltivecParamsAtEnd = 0;
2886 for (
unsigned i = 0; i != NumOps; ++i) {
2888 EVT ArgVT = Outs[i].VT;
2892 if (!isVarArg && !isPPC64) {
2895 nAltivecParamsAtEnd++;
2899 NumBytes = ((NumBytes+15)/16)*16;
2905 if (nAltivecParamsAtEnd) {
2906 NumBytes = ((NumBytes+15)/16)*16;
2907 NumBytes += 16*nAltivecParamsAtEnd;
2915 NumBytes = std::max(NumBytes,
2921 getFrameLowering()->getStackAlignment();
2922 unsigned AlignMask = TargetAlign-1;
2923 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
2932 unsigned ParamSize) {
2934 if (!isTailCall)
return 0;
2938 int SPDiff = (int)CallerMinReservedArea - (
int)ParamSize;
2940 if (SPDiff < FI->getTailCallSPDelta())
2950 PPCTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
2966 for (
unsigned i = 0; i != Ins.
size(); i++) {
2968 if (Flags.
isByVal())
return false;
2978 return G->getGlobal()->hasHiddenVisibility()
2979 ||
G->getGlobal()->hasProtectedVisibility();
2992 if ((Addr & 3) != 0 ||
2993 SignExtend32<26>(Addr) != Addr)
3002 struct TailCallArgumentInfo {
3007 TailCallArgumentInfo() : FrameIdx(0) {}
3019 for (
unsigned i = 0, e = TailCallArgs.
size(); i != e; ++i) {
3020 SDValue Arg = TailCallArgs[i].Arg;
3021 SDValue FIN = TailCallArgs[i].FrameIdxOp;
3022 int FI = TailCallArgs[i].FrameIdx;
3043 int SlotSize = isPPC64 ? 8 : 4;
3047 NewRetAddrLoc,
true);
3050 Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
3062 Chain = DAG.
getStore(Chain, dl, OldFP, NewFramePtrIdx,
3074 SDValue Arg,
int SPDiff,
unsigned ArgOffset,
3076 int Offset = ArgOffset + SPDiff;
3081 TailCallArgumentInfo Info;
3083 Info.FrameIdxOp = FIN;
3101 LROpOut = getReturnAddrFrameIndex(DAG);
3103 false,
false,
false, 0);
3109 FPOpOut = getFramePointerFrameIndex(DAG);
3111 false,
false,
false, 0);
3139 unsigned ArgOffset,
bool isPPC64,
bool isTailCall,
3163 SDLoc dl,
bool isPPC64,
int SPDiff,
unsigned NumBytes,
3175 if (!MemOpChains2.
empty())
3177 &MemOpChains2[0], MemOpChains2.
size());
3181 isPPC64, isDarwinABI, dl);
3196 bool isPPC64 = PPCSubTarget.
isPPC64();
3197 bool isSVR4ABI = PPCSubTarget.
isSVR4ABI();
3205 bool needIndirectCall =
true;
3209 needIndirectCall =
false;
3217 unsigned OpFlags = 0;
3221 (
G->getGlobal()->isDeclaration() ||
3222 G->getGlobal()->isWeakForLinker())) {
3235 needIndirectCall =
false;
3240 unsigned char OpFlags = 0;
3253 needIndirectCall =
false;
3256 if (needIndirectCall) {
3259 SDValue MTCTROps[] = {Chain, Callee, InFlag};
3261 if (isSVR4ABI && isPPC64) {
3323 MTCTROps[0] = Chain;
3324 MTCTROps[1] = LoadFuncPtr;
3325 MTCTROps[2] = InFlag;
3339 if (isSVR4ABI && isPPC64)
3357 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3359 RegsToPass[i].second.getValueType()));
3368 return !
G->getGlobal()->isDeclaration() &&
3369 !
G->getGlobal()->isWeakForLinker();
3374 PPCTargetLowering::LowerCallResult(
SDValue Chain,
SDValue InFlag,
3386 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
3388 assert(VA.
isRegLoc() &&
"Can only return in registers!");
3421 bool isTailCall,
bool isVarArg,
3427 int SPDiff,
unsigned NumBytes,
3430 std::vector<EVT> NodeTys;
3432 unsigned CallOpc =
PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
3433 isTailCall, RegsToPass, Ops, NodeTys,
3443 int BytesCalleePops =
3450 assert(Mask &&
"Missing call preserved mask for calling convention");
3459 cast<RegisterSDNode>(Callee)->
getReg() == PPC::CTR) ||
3462 isa<ConstantSDNode>(Callee)) &&
3463 "Expecting an global address, external symbol, absolute value or register");
3477 bool needsTOCRestore =
false;
3489 needsTOCRestore =
true;
3498 Chain = DAG.
getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.
size());
3501 if (needsTOCRestore) {
3513 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3514 Ins, dl, DAG, InVals);
3532 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
3537 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
3538 isTailCall, Outs, OutVals, Ins,
3541 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
3542 isTailCall, Outs, OutVals, Ins,
3546 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
3547 isTailCall, Outs, OutVals, Ins,
3552 PPCTargetLowering::LowerCall_32SVR4(
SDValue Chain,
SDValue Callee,
3566 unsigned PtrByteSize = 4;
3595 unsigned NumArgs = Outs.
size();
3597 for (
unsigned i = 0; i != NumArgs; ++i) {
3598 MVT ArgVT = Outs[i].VT;
3602 if (Outs[i].IsFixed) {
3612 errs() <<
"Call operand #" << i <<
" has unhandled type "
3620 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
3629 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3631 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
3636 unsigned NumBytes = CCByValInfo.getNextStackOffset();
3651 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
false,
3663 bool seenFloatArg =
false;
3665 for (
unsigned i = 0, j = 0, e = ArgLocs.
size();
3677 assert((j < ByValArgLocs.
size()) &&
"Index out of bounds!");
3695 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
3698 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
3699 NewCallSeqStart.getNode());
3700 Chain = CallSeqStart = NewCallSeqStart;
3718 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
3721 MemOpChains.
push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
3732 if (!MemOpChains.
empty())
3734 &MemOpChains[0], MemOpChains.
size());
3739 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
3740 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3741 RegsToPass[i].second, InFlag);
3749 SDValue Ops[] = { Chain, InFlag };
3752 dl, VTs, Ops, InFlag.
getNode() ? 2 : 1);
3758 PrepareTailCall(DAG, InFlag, Chain, dl,
false, SPDiff, NumBytes, LROp, FPOp,
3759 false, TailCallArguments);
3761 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
3762 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
3769 PPCTargetLowering::createMemcpyOutsideCallSeq(
SDValue Arg,
SDValue PtrOff,
3778 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
3781 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
3782 NewCallSeqStart.getNode());
3783 return NewCallSeqStart;
3787 PPCTargetLowering::LowerCall_64SVR4(
SDValue Chain,
SDValue Callee,
3796 unsigned NumOps = Outs.
size();
3799 unsigned PtrByteSize = 8;
3812 unsigned nAltivecParamsAtEnd = 0;
3821 Outs, OutVals, nAltivecParamsAtEnd);
3841 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
true,
3854 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3856 static const uint16_t GPR[] = {
3857 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3858 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3860 static const uint16_t *FPR =
GetFPR();
3862 static const uint16_t VR[] = {
3864 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3867 const unsigned NumFPRs = 13;
3874 for (
unsigned i = 0; i != NumOps; ++i) {
3911 if (BVAlign % PtrByteSize != 0)
3913 "ByVal alignment is not a multiple of the pointer size");
3915 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
3919 if (Size==1 || Size==2 || Size==4) {
3921 if (GPR_idx != NumGPRs) {
3926 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
3928 ArgOffset += PtrByteSize;
3933 if (GPR_idx == NumGPRs && Size < 8) {
3937 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
3940 ArgOffset += PtrByteSize;
3957 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
3962 if (Size < 8 && GPR_idx != NumGPRs) {
3973 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
3980 false,
false,
false, 0);
3982 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
3985 ArgOffset += PtrByteSize;
3991 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
3994 if (GPR_idx != NumGPRs) {
3997 false,
false,
false, 0);
3999 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4000 ArgOffset += PtrByteSize;
4002 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
4013 if (GPR_idx != NumGPRs) {
4014 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
4017 true, isTailCall,
false, MemOpChains,
4018 TailCallArguments, dl);
4020 ArgOffset += PtrByteSize;
4024 if (FPR_idx != NumFPRs) {
4025 RegsToPass.
push_back(std::make_pair(FPR[FPR_idx++], Arg));
4043 if (GPR_idx != NumGPRs) {
4048 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4050 }
else if (GPR_idx != NumGPRs)
4062 true, isTailCall,
false, MemOpChains,
4063 TailCallArguments, dl);
4077 while (ArgOffset % 16 !=0) {
4078 ArgOffset += PtrByteSize;
4079 if (GPR_idx != NumGPRs)
4089 if (VR_idx != NumVRs) {
4092 false,
false,
false, 0);
4094 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
4097 for (
unsigned i=0; i<16; i+=PtrByteSize) {
4098 if (GPR_idx == NumGPRs)
4103 false,
false,
false, 0);
4105 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4112 if (VR_idx != NumVRs) {
4114 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
4117 true, isTailCall,
true, MemOpChains,
4118 TailCallArguments, dl);
4125 if (!MemOpChains.
empty())
4127 &MemOpChains[0], MemOpChains.
size());
4133 !dyn_cast<GlobalAddressSDNode>(Callee) &&
4146 RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
4152 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
4153 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
4154 RegsToPass[i].second, InFlag);
4160 FPOp,
true, TailCallArguments);
4162 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
4163 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
4168 PPCTargetLowering::LowerCall_Darwin(
SDValue Chain,
SDValue Callee,
4177 unsigned NumOps = Outs.
size();
4181 unsigned PtrByteSize = isPPC64 ? 8 : 4;
4194 unsigned nAltivecParamsAtEnd = 0;
4202 nAltivecParamsAtEnd);
4222 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
true,
4239 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4241 static const uint16_t GPR_32[] = {
4243 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4245 static const uint16_t GPR_64[] = {
4246 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4247 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4249 static const uint16_t *FPR =
GetFPR();
4251 static const uint16_t VR[] = {
4253 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4256 const unsigned NumFPRs = 13;
4259 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
4265 for (
unsigned i = 0; i != NumOps; ++i) {
4291 if (Size==1 || Size==2) {
4293 if (GPR_idx != NumGPRs) {
4298 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4300 ArgOffset += PtrByteSize;
4305 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
4308 ArgOffset += PtrByteSize;
4315 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
4322 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
4325 if (GPR_idx != NumGPRs) {
4328 false,
false,
false, 0);
4330 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4331 ArgOffset += PtrByteSize;
4333 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
4344 if (GPR_idx != NumGPRs) {
4345 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
4348 isPPC64, isTailCall,
false, MemOpChains,
4349 TailCallArguments, dl);
4351 ArgOffset += PtrByteSize;
4355 if (FPR_idx != NumFPRs) {
4356 RegsToPass.
push_back(std::make_pair(FPR[FPR_idx++], Arg));
4364 if (GPR_idx != NumGPRs) {
4369 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4376 false,
false,
false, 0);
4378 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4384 if (GPR_idx != NumGPRs)
4392 isPPC64, isTailCall,
false, MemOpChains,
4393 TailCallArguments, dl);
4409 while (ArgOffset % 16 !=0) {
4410 ArgOffset += PtrByteSize;
4411 if (GPR_idx != NumGPRs)
4421 if (VR_idx != NumVRs) {
4424 false,
false,
false, 0);
4426 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
4429 for (
unsigned i=0; i<16; i+=PtrByteSize) {
4430 if (GPR_idx == NumGPRs)
4435 false,
false,
false, 0);
4437 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4444 if (VR_idx != NumVRs) {
4446 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
4447 }
else if (nAltivecParamsAtEnd==0) {
4450 isPPC64, isTailCall,
true, MemOpChains,
4451 TailCallArguments, dl);
4462 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
4465 ArgOffset = ((ArgOffset+15)/16)*16;
4467 for (
unsigned i = 0; i != NumOps; ++i) {
4469 EVT ArgType = Outs[i].VT;
4476 isPPC64, isTailCall,
true, MemOpChains,
4477 TailCallArguments, dl);
4484 if (!MemOpChains.
empty())
4486 &MemOpChains[0], MemOpChains.
size());
4492 !dyn_cast<GlobalAddressSDNode>(Callee) &&
4495 RegsToPass.
push_back(std::make_pair((
unsigned)(isPPC64 ? PPC::X12 :
4496 PPC::R12), Callee));
4501 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
4502 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
4503 RegsToPass[i].second, InFlag);
4508 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
4509 FPOp,
true, TailCallArguments);
4511 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
4512 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
4524 return CCInfo.CheckReturn(Outs, RetCC_PPC);
4528 PPCTargetLowering::LowerReturn(
SDValue Chain,
4543 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
4545 assert(VA.
isRegLoc() &&
"Can only return in registers!");
4572 RetOps.push_back(Flag);
4575 &RetOps[0], RetOps.size());
4587 bool isPPC64 = Subtarget.
isPPC64();
4588 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
4598 false,
false,
false, 0);
4611 PPCTargetLowering::getReturnAddrFrameIndex(
SelectionDAG & DAG)
const {
4613 bool isPPC64 = PPCSubTarget.
isPPC64();
4635 PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
4637 bool isPPC64 = PPCSubTarget.
isPPC64();
4674 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
4676 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
4801 if (Src.getValueType() ==
MVT::f32)
4815 "i64 FP_TO_UINT is supported only with FPCVT");
4826 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
4841 MPI,
false,
false, 0);
4852 false,
false,
false, 0);
4863 "UINT_TO_FP is supported only with FPCVT");
4933 "Unhandled INT_TO_FP type in custom expander!");
4951 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
4952 "Expected an i32 store");
4962 assert(PPCSubTarget.
isPPC64() &&
4963 "i32->FP without LFIWAX supported only on PPC64");
4977 Ld = DAG.
getLoad(MVT::f64, dl, Store, FIdx,
4979 false,
false,
false, 0);
5033 false,
false,
false, 0);
5079 SDValue OutOps[] = { OutLo, OutHi };
5108 SDValue OutOps[] = { OutLo, OutHi };
5137 SDValue OutOps[] = { OutLo, OutHi };
5149 assert(Val >= -16 && Val <= 15 &&
"vsplti is out of range!");
5151 static const EVT VTys[] = {
5161 EVT CanonicalVT = VTys[SplatSize-1];
5168 &Ops[0], Ops.
size());
5212 for (
unsigned i = 0; i != 16; ++i)
5227 assert(BVN != 0 &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
5230 APInt APSplatBits, APSplatUndef;
5231 unsigned SplatBitSize;
5234 HasAnyUndefs, 0,
true) || SplatBitSize > 32)
5239 unsigned SplatSize = SplatBitSize / 8;
5244 if (SplatBits == 0) {
5255 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
5257 if (SextVal >= -16 && SextVal <= 15)
5270 if (SextVal >= -32 && SextVal <= 31) {
5284 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
5298 static const signed char SplatCsts[] = {
5299 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
5300 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
5306 int i = SplatCsts[idx];
5310 unsigned TypeShiftAmt = i & (SplatBitSize-1);
5313 if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
5315 static const unsigned IIDs[] = {
5324 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
5326 static const unsigned IIDs[] = {
5335 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
5337 static const unsigned IIDs[] = {
5346 if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
5347 ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
5349 static const unsigned IIDs[] = {
5358 if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
5363 if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
5368 if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
5382 unsigned OpNum = (PFEntry >> 26) & 0x0F;
5383 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
5384 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
5399 if (OpNum == OP_COPY) {
5400 if (LHSID == (1*9+2)*9+3)
return LHS;
5401 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
5413 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
5414 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
5415 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
5416 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
5419 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
5420 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
5421 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
5422 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
5425 for (
unsigned i = 0; i != 16; ++i)
5426 ShufIdxs[i] = (i&3)+0;
5429 for (
unsigned i = 0; i != 16; ++i)
5430 ShufIdxs[i] = (i&3)+4;
5433 for (
unsigned i = 0; i != 16; ++i)
5434 ShufIdxs[i] = (i&3)+8;
5437 for (
unsigned i = 0; i != 16; ++i)
5438 ShufIdxs[i] = (i&3)+12;
5504 unsigned PFIndexes[4];
5505 bool isFourElementShuffle =
true;
5506 for (
unsigned i = 0; i != 4 && isFourElementShuffle; ++i) {
5508 for (
unsigned j = 0; j != 4; ++j) {
5509 if (PermMask[i*4+j] < 0)
5512 unsigned ByteSource = PermMask[i*4+j];
5513 if ((ByteSource & 3) != j) {
5514 isFourElementShuffle =
false;
5519 EltNo = ByteSource/4;
5520 }
else if (EltNo != ByteSource/4) {
5521 isFourElementShuffle =
false;
5525 PFIndexes[i] = EltNo;
5531 if (isFourElementShuffle) {
5533 unsigned PFTableIndex =
5534 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
5537 unsigned Cost = (PFEntry >> 30);
5564 for (
unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5565 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
5567 for (
unsigned j = 0; j != BytesPerElement; ++j)
5573 &ResultMask[0], ResultMask.
size());
5582 unsigned IntrinsicID =
5583 cast<ConstantSDNode>(Intrin.
getOperand(0))->getZExtValue();
5586 switch (IntrinsicID) {
5587 default:
return false;
5659 switch (cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue()) {
5662 BitNo = 0; InvertBit =
false;
5665 BitNo = 0; InvertBit =
true;
5668 BitNo = 2; InvertBit =
false;
5671 BitNo = 2; InvertBit =
true;
5704 false,
false,
false, 0);
5740 LHS, RHS, Zero, DAG, dl);
5756 for (
unsigned i = 0; i != 8; ++i) {
5758 Ops[i*2+1] = 2*i+1+16;
5780 return LowerVASTART(Op, DAG, PPCSubTarget);
5783 return LowerVAARG(Op, DAG, PPCSubTarget);
5786 return LowerVACOPY(Op, DAG, PPCSubTarget);
5790 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
5813 case ISD::MUL:
return LowerMUL(Op, DAG);
5831 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
5833 if (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() !=
5838 "Unexpected result type for CTR decrement intrinsic");
5898 bool is64bit,
unsigned BinOpcode)
const {
5923 unsigned TmpReg = (!BinOpcode) ? incr :
5940 BuildMI(BB, dl, TII->
get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
5941 .addReg(ptrA).
addReg(ptrB);
5944 BuildMI(BB, dl, TII->
get(is64bit ? PPC::STDCX : PPC::STWCX))
5961 unsigned BinOpcode)
const {
5968 bool is64bit = PPCSubTarget.
isPPC64();
5969 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
6033 if (ptrA != ZeroReg) {
6035 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
6036 .addReg(ptrA).
addReg(ptrB);
6040 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
6042 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
6043 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
6045 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
6048 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
6050 BuildMI(BB, dl, TII->
get(PPC::SLW), Incr2Reg)
6051 .addReg(incr).
addReg(ShiftReg);
6056 BuildMI(BB, dl, TII->
get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).
addImm(65535);
6059 .addReg(Mask2Reg).
addReg(ShiftReg);
6062 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
6063 .addReg(ZeroReg).
addReg(PtrReg);
6066 .addReg(Incr2Reg).
addReg(TmpDestReg);
6067 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
6068 .addReg(TmpDestReg).
addReg(MaskReg);
6070 .addReg(TmpReg).
addReg(MaskReg);
6072 .addReg(Tmp3Reg).
addReg(Tmp2Reg);
6083 BuildMI(*BB, BB->
begin(), dl, TII->
get(PPC::SRW), dest).addReg(TmpDestReg)
6107 assert(RC->
hasType(MVT::i32) &&
"Invalid destination!");
6112 assert((PVT ==
MVT::i64 || PVT == MVT::i32) &&
6113 "Invalid Pointer Size!");
6165 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::STD))
6177 BaseReg = PPCSubTarget.
isPPC64() ? PPC::X1 : PPC::R1;
6179 BaseReg = PPCSubTarget.
isPPC64() ? PPC::BP8 : PPC::BP;
6181 MIB =
BuildMI(*thisMBB, MI, DL,
6182 TII->
get(PPCSubTarget.
isPPC64() ? PPC::STD : PPC::STW))
6189 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::BCLalways)).addMBB(mainMBB);
6196 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::EH_SjLj_Setup))
6198 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::B)).addMBB(sinkMBB);
6206 TII->
get(PPCSubTarget.
isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
6210 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STD))
6215 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STW))
6229 .addReg(mainDstReg).
addMBB(mainMBB)
6250 assert((PVT ==
MVT::i64 || PVT == MVT::i32) &&
6251 "Invalid Pointer Size!");
6254 (PVT ==
MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6257 unsigned FP = (PVT ==
MVT::i64) ? PPC::X31 : PPC::R31;
6258 unsigned SP = (PVT ==
MVT::i64) ? PPC::X1 : PPC::R1;
6259 unsigned BP = (PVT ==
MVT::i64) ? PPC::X30 : PPC::R30;
6278 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), FP)
6287 .addImm(LabelOffset)
6290 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), Tmp)
6291 .addImm(LabelOffset)
6302 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), SP)
6314 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), BP)
6341 if (MI->
getOpcode() == PPC::EH_SjLj_SetJmp32 ||
6342 MI->
getOpcode() == PPC::EH_SjLj_SetJmp64) {
6344 }
else if (MI->
getOpcode() == PPC::EH_SjLj_LongJmp32 ||
6345 MI->
getOpcode() == PPC::EH_SjLj_LongJmp64) {
6360 MI->
getOpcode() == PPC::SELECT_CC_I8)) {
6370 }
else if (MI->
getOpcode() == PPC::SELECT_CC_I4 ||
6374 MI->
getOpcode() == PPC::SELECT_CC_VRRC) {
6425 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
6427 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
6429 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
6431 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
6434 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
6436 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
6438 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
6440 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
6443 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
6445 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
6447 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
6449 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
6452 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
6454 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
6456 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
6458 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
6461 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
6463 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
6465 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
6467 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
6470 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
6472 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
6474 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
6476 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
6479 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I8)
6481 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I16)
6483 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I32)
6485 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I64)
6488 else if (MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
6489 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
6490 bool is64bit = MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
6529 BuildMI(BB, dl, TII->
get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
6530 .addReg(ptrA).
addReg(ptrB);
6531 BuildMI(BB, dl, TII->
get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
6532 .addReg(oldval).
addReg(dest);
6539 BuildMI(BB, dl, TII->
get(is64bit ? PPC::STDCX : PPC::STWCX))
6543 BuildMI(BB, dl, TII->
get(PPC::B)).addMBB(exitMBB);
6548 BuildMI(BB, dl, TII->
get(is64bit ? PPC::STDCX : PPC::STWCX))
6555 }
else if (MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
6556 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
6560 bool is64bit = PPCSubTarget.
isPPC64();
6561 bool is8bit = MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
6602 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
6635 if (ptrA != ZeroReg) {
6637 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
6638 .addReg(ptrA).
addReg(ptrB);
6642 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
6644 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
6645 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
6647 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
6650 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
6652 BuildMI(BB, dl, TII->
get(PPC::SLW), NewVal2Reg)
6653 .addReg(newval).
addReg(ShiftReg);
6654 BuildMI(BB, dl, TII->
get(PPC::SLW), OldVal2Reg)
6655 .addReg(oldval).
addReg(ShiftReg);
6660 BuildMI(BB, dl, TII->
get(PPC::ORI), Mask2Reg)
6661 .addReg(Mask3Reg).
addImm(65535);
6664 .addReg(Mask2Reg).
addReg(ShiftReg);
6666 .addReg(NewVal2Reg).
addReg(MaskReg);
6668 .addReg(OldVal2Reg).
addReg(MaskReg);
6671 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
6672 .addReg(ZeroReg).
addReg(PtrReg);
6674 .addReg(TmpDestReg).
addReg(MaskReg);
6675 BuildMI(BB, dl, TII->
get(PPC::CMPW), PPC::CR0)
6676 .addReg(TmpReg).
addReg(OldVal3Reg);
6684 .addReg(TmpDestReg).
addReg(MaskReg);
6686 .addReg(Tmp2Reg).
addReg(NewVal3Reg);
6687 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(Tmp4Reg)
6691 BuildMI(BB, dl, TII->
get(PPC::B)).addMBB(exitMBB);
6696 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(TmpDestReg)
6705 }
else if (MI->
getOpcode() == PPC::FADDrtz) {
6721 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB1)).addImm(31);
6722 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB0)).addImm(30);
6742 DAGCombinerInfo &DCI)
const {
6743 if (DCI.isAfterLegalizeVectorOps())
6749 (VT == MVT::f64 && PPCSubTarget.
hasFRE()) ||
6774 "Unknown vector type");
6776 FPOne, FPOne, FPOne, FPOne);
6780 DCI.AddToWorklist(Est.
getNode());
6783 for (
int i = 0; i < Iterations; ++i) {
6785 DCI.AddToWorklist(NewEst.
getNode());
6788 DCI.AddToWorklist(NewEst.
getNode());
6791 DCI.AddToWorklist(NewEst.
getNode());
6794 DCI.AddToWorklist(Est.
getNode());
6804 DAGCombinerInfo &DCI)
const {
6805 if (DCI.isAfterLegalizeVectorOps())
6811 (VT == MVT::f64 && PPCSubTarget.
hasFRSQRTE()) ||
6836 "Unknown vector type");
6838 FPThreeHalves, FPThreeHalves,
6839 FPThreeHalves, FPThreeHalves);
6843 DCI.AddToWorklist(Est.
getNode());
6848 DCI.AddToWorklist(HalfArg.
getNode());
6851 DCI.AddToWorklist(HalfArg.
getNode());
6854 for (
int i = 0; i < Iterations; ++i) {
6856 DCI.AddToWorklist(NewEst.
getNode());
6859 DCI.AddToWorklist(NewEst.
getNode());
6862 DCI.AddToWorklist(NewEst.
getNode());
6865 DCI.AddToWorklist(Est.
getNode());
6877 unsigned Bytes,
int Dist,
6889 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6890 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6893 if (FS != BFS || FS != (
int)Bytes)
return false;
6899 cast<ConstantSDNode>(Loc.
getOperand(1))->getSExtValue() == Dist*Bytes)
6905 int64_t Offset1 = 0;
6906 int64_t Offset2 = 0;
6909 if (isGA1 && isGA2 && GV1 == GV2)
6910 return Offset1 == (Offset2 + Dist*Bytes);
6930 while (!Queue.empty()) {
6931 SDNode *ChainNext = Queue.pop_back_val();
6932 if (!Visited.
insert(ChainNext))
6935 if (
LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) {
6939 if (!Visited.count(ChainLD->getChain().getNode()))
6940 Queue.push_back(ChainLD->getChain().getNode());
6943 OE = ChainNext->
op_end(); O != OE; ++O)
6944 if (!Visited.count(O->getNode()))
6945 Queue.push_back(O->getNode());
6947 LoadRoots.
insert(ChainNext);
6959 IE = LoadRoots.end();
I !=
IE; ++
I) {
6960 Queue.push_back(*
I);
6962 while (!Queue.empty()) {
6963 SDNode *LoadRoot = Queue.pop_back_val();
6964 if (!Visited.
insert(LoadRoot))
6967 if (
LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot))
6972 UE = LoadRoot->
use_end(); UI != UE; ++UI)
6973 if (((isa<LoadSDNode>(*UI) &&
6974 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
6976 Queue.push_back(*UI);
6992 if (C->isNullValue())
6998 if (C->isNullValue())
7004 if (C->isNullValue() ||
7005 C->isAllOnesValue())
7011 "Reciprocal estimates require UnsafeFPMath");
7061 "Reciprocal estimates require UnsafeFPMath");
7068 RV = DAGCombineFastRecip(RV, DCI);
7129 !cast<StoreSDNode>(
N)->isTruncatingStore() &&
7148 cast<StoreSDNode>(
N)->getMemoryVT(),
7149 cast<StoreSDNode>(
N)->getMemOperand());
7155 if (cast<StoreSDNode>(N)->isUnindexed() &&
7175 cast<StoreSDNode>(
N)->getMemoryVT(),
7176 cast<StoreSDNode>(
N)->getMemOperand());
7237 int IncOffset = VT.getSizeInBits() / 8;
7238 int IncValue = IncOffset;
7252 DAG.
getLoad(VT, dl, Chain, Ptr,
7267 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
7293 OE = User->
op_end(); O != OE; ++O) {
7308 if (cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue() ==
7318 UE = BasePtr->
use_end(); UI != UE; ++UI) {
7320 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
7406 FlagUser == 0; ++UI) {
7407 assert(UI != VCMPoNode->
use_end() &&
"Didn't find user!");
7409 for (
unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
7410 if (User->getOperand(i) ==
SDValue(VCMPoNode, 1)) {
7439 !cast<ConstantSDNode>(LHS.
getOperand(1))->getConstantIntValue()->
7444 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() ==
7446 isa<ConstantSDNode>(RHS)) {
7448 "Counter decrement comparison is not EQ or NE");
7450 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
7458 "Counter decrement has more than one use");
7470 assert(isDot &&
"Can't compare against a vector result!");
7474 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
7475 if (Val != 0 && Val != 1) {
7483 bool BranchOnWhenPredTrue = (CC ==
ISD::SETEQ) ^ (Val == 0);
7496 switch (cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue()) {
7532 unsigned Depth)
const {
7539 KnownZero = 0xFFFF0000;
7543 switch (cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue()) {
7570 if (Constraint.size() == 1) {
7571 switch (Constraint[0]) {
7602 if (CallOperandVal == NULL)
7606 switch (*constraint) {
7636 std::pair<unsigned, const TargetRegisterClass*>
7639 if (Constraint.size() == 1) {
7641 switch (Constraint[0]) {
7644 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
7645 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
7648 return std::make_pair(0U, &PPC::G8RCRegClass);
7649 return std::make_pair(0U, &PPC::GPRCRegClass);
7651 if (VT ==
MVT::f32 || VT == MVT::i32)
7652 return std::make_pair(0U, &PPC::F4RCRegClass);
7653 if (VT == MVT::f64 || VT ==
MVT::i64)
7654 return std::make_pair(0U, &PPC::F8RCRegClass);
7657 return std::make_pair(0U, &PPC::VRRCRegClass);
7659 return std::make_pair(0U, &PPC::CRRCRegClass);
7663 std::pair<unsigned, const TargetRegisterClass*> R =
7673 PPC::GPRCRegClass.contains(R.first)) {
7676 PPC::sub_32, &PPC::G8RCRegClass),
7677 &PPC::G8RCRegClass);
7687 std::string &Constraint,
7688 std::vector<SDValue>&Ops,
7693 if (Constraint.length() > 1)
return;
7695 char Letter = Constraint[0];
7712 if ((
short)Value == (
int)Value)
7717 if ((
short)Value == 0)
7721 if ((Value >> 16) == 0)
7737 if ((
short)-Value == (int)-Value)
7746 Ops.push_back(Result);
7797 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
7803 bool isPPC64 = PPCSubTarget.
isPPC64();
7807 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
7819 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
7827 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
7841 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
7843 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
7872 unsigned DstAlign,
unsigned SrcAlign,
7873 bool IsMemset,
bool ZeroMemset,
7876 if (this->PPCSubTarget.
isPPC64()) {
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
void setFrameAddressIsTaken(bool T)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void push_back(const T &Elt)
const MachineFunction * getParent() const
The memory access reads data.
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
SDValue getValue(unsigned R) const
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
The memory access writes data.
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
void setVarArgsNumGPR(unsigned Num)
bool use64BitRegs() const
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
uint64_t getZExtValue() const
Get zero extended value.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DstReg, const SmallVectorImpl< MachineOperand > &Cond, unsigned TrueReg, unsigned FalseReg) const
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize)
static const uint16_t * GetFPR()
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
int isVSLDOIShuffleMask(SDNode *N, bool isUnary)
Reloc::Model getRelocationModel() const
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, bool isDarwinABI, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
Return with a flag operand, matched by 'blr'.
LocInfo getLocInfo() const
void setSupportJumpTables(bool Val)
Indicate whether the target can generate code for jump tables.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast=0) const
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode) const
const TargetMachine & getTargetMachine() const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, SDLoc dl, EVT DestVT=MVT::Other)
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void setLRStoreRequired()
const TargetMachine & getTarget() const
virtual const uint32_t * getCallPreservedMask(CallingConv::ID) const
virtual FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
virtual ConstraintType getConstraintType(const std::string &Constraint) const
Given a constraint, return the type of constraint it is for this target.
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, bool isPPC64, bool isDarwinABI, SDLoc dl)
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
const GlobalValue * getGlobal() const
bool enableMachineScheduler() const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned getOpcode() const
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
void setFramePointerSaveIndex(int Idx)
Type * getTypeForEVT(LLVMContext &Context) const
unsigned getSizeInBits() const
iterator insert(iterator I, const T &Elt)
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getByValSize() const
unsigned getNumOperands() const
bool isDarwin() const
isDarwin - True if this is any darwin platform.
void setBooleanVectorContents(BooleanContent Ty)
unsigned getNumOperands() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
unsigned getVarArgsNumGPR() const
static bool isLocalCall(const SDValue &Callee)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
void ComputeMaskedBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
unsigned getValNo() const
const SDValue & getBasePtr() const
void setVarArgsNumFPR(unsigned Num)
int getReturnAddrSaveIndex() const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
LoopInfoBase< BlockT, LoopT > * LI
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
bool hasLazyResolverStub(const GlobalValue *GV, const TargetMachine &TM) const
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool isUnary)
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool isVector() const
isVector - Return true if this is a vector value type.
void setVarArgsStackOffset(int Offset)
static error_code advance(T &it, size_t Val)
std::string getEVTString() const
getEVTString - This function returns value type as a string, e.g. "i32".
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
int64_t getOffset() const
const HexagonInstrInfo * TII
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
#define llvm_unreachable(msg)
EVT getValueType(unsigned ResNo) const
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, SDLoc dl)
MachineFunction & getMachineFunction() const
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const
unsigned getMinReservedArea() const
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, unsigned MaxMaxAlign)
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
bool isJITCodeModel() const
const TargetRegisterClass * getRegClass(unsigned Reg) const
int getVarArgsFrameIndex() const
void assign(unsigned NumElts, const T &Elt)
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
MachinePointerInfo getWithOffset(int64_t O) const
EVT getScalarType() const
Abstract Stack Frame Information.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
int getMaskElt(unsigned Idx) const
SDVTList getVTList(EVT VT)
unsigned getStoreSize() const
virtual MVT getPointerTy(uint32_t=0) const
ID
LLVM Calling Convention Representation.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary)
const MachineInstrBuilder & addImm(int64_t Val) const
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
bool getBoolValue() const
Convert APInt to a boolean value.
SDValue getConstantFP(double Val, EVT VT, bool isTarget=false)
SmallVector< ISD::InputArg, 32 > Ins
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
MachineBasicBlock * EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, bool is64Bit, unsigned BinOpcode) const
EVT getVectorElementType() const
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
unsigned getLocReg() const
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
size_t array_lengthof(T(&)[N])
Find the length of an array.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
SDValue getRegisterMask(const uint32_t *RegMask)
static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI)
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue CombineTo(SDNode *N, const std::vector< SDValue > &To, bool AddTo=true)
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
SmallVector< ISD::OutputArg, 32 > Outs
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
Reciprocal estimate instructions (unary FP ops).
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
int getFramePointerSaveIndex() const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is 0.0 or -0.0.
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot)
const BasicBlock * getBasicBlock() const
UNDEF - An undefined node.
uint32_t FloatToBits(float Float)
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false)
void setTailCallSPDelta(int size)
mmo_iterator memoperands_end() const
SDNode * getNode() const
get the SDNode which holds the desired result
PPCTargetLowering(PPCTargetMachine &TM)
bundle_iterator< MachineInstr, instr_iterator > iterator
unsigned getStoreSize() const
unsigned int getFlags() const
getFlags - Return the raw flags of the source value,
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
virtual void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const
Control flow instructions. These all have token chains.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, SDLoc dl)
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool isUnary)
const SDValue & getBasePtr() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=0)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDLoc dl)
LLVM Basic Block Representation.
const Triple & getTargetTriple() const
const SDValue & getOperand(unsigned i) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
void setReturnAddrSaveIndex(int idx)
bool isNonTemporal() const
LLVM Constant Representation.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
bool isVector() const
isVector - Return true if this is a vector value type.
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=0)
const Constant * getConstVal() const
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
const MachineOperand & getOperand(unsigned i) const
unsigned getVarArgsNumFPR() const
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getLiveInVirtReg(unsigned PReg) const
void setBooleanContents(BooleanContent Ty)
void AddToWorklist(SDNode *N)
static Type * getVoidTy(LLVMContext &C)
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
ItTy next(ItTy it, Dist n)
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
static bool isIntS16Immediate(SDNode *N, short &Imm)
const DataLayout * getDataLayout() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
void setVarArgsFrameIndex(int Index)
unsigned getOpcode() const
TRAP - Trapping instruction.
static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
unsigned GuaranteedTailCallOpt
void setPrefFunctionAlignment(unsigned Align)
bool has64BitSupport() const
static const unsigned NumArgRegs
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, SDLoc dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
bool isAllNegativeZeroVector(SDNode *N)
use_iterator use_begin() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Like a regular LOAD but additionally taking/producing a flag.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
virtual const TargetFrameLowering * getFrameLowering() const
static unsigned getMinCallFrameSize(bool isPPC64, bool isDarwinABI)
std::vector< ArgListEntry > ArgListTy
void setMinReservedArea(unsigned size)
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, SDLoc dl)
virtual const PPCSubtarget * getSubtargetImpl() const
unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const
void setUseUnderscoreLongJmp(bool Val)
const MCInstrDesc & get(unsigned Opcode) const
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
bool hasRecipPrec() const
bool isEXTLoad(const SDNode *N)
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
unsigned getByValAlign() const
void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action)
ArrayRef< int > getMask() const
static bool isZero(Value *V, DataLayout *DL)
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
virtual const char * getTargetNodeName(unsigned Opcode) const
virtual const TargetInstrInfo * getInstrInfo() const
int getVarArgsStackOffset() const
const uint32_t * getNoPreservedMask() const
unsigned getABITypeAlignment(Type *Ty) const
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary)
const STC & getSubtarget() const
void setNode(SDNode *N)
set the SDNode
static unsigned getFramePointerSaveOffset(bool isPPC64, bool isDarwinABI)
bool isBaseWithConstantOffset(SDValue Op) const
void setExceptionPointerRegister(unsigned R)
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
bool isPredecessorOf(const SDNode *N) const
unsigned getObjectAlignment(int ObjectIdx) const
getObjectAlignment - Return the alignment of the specified stack object.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
CCValAssign - Represent assignment of one arg/retval to a location.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
SDValue getIntPtrConstant(uint64_t Val, bool isTarget=false)
const SDValue & getChain() const
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
getSetCCResultType - Return the ISD::SETCC ValueType
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const
Byte Swap and Counting operators.
MachineMemOperand * getMemOperand() const
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
MachineFrameInfo * getFrameInfo()
static unsigned CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, bool isPPC64, bool isVarArg, unsigned CC, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, unsigned &nAltivecParamsAtEnd)
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
unsigned Log2_32(uint32_t Value)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Class for arbitrary precision integers.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true)
void computeRegisterProperties()
void setExceptionSelectorRegister(unsigned R)
ConstraintType getConstraintType(const std::string &Constraint) const
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
op_iterator op_begin() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, SmallVectorImpl< std::pair< unsigned, SDValue > > &RegsToPass, SmallVectorImpl< SDValue > &Ops, std::vector< EVT > &NodeTys, const PPCSubtarget &PPCSubTarget)
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, bool MayNeedSP=false, const AllocaInst *Alloca=0)
SDValue getStackArgumentTokenFactor(SDValue Chain)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
static const unsigned PerfectShuffleTable[6561+1]
SmallVector< SDValue, 32 > OutVals
Bitwise operators - logical and, logical or, logical xor.
pointer data()
data - Return a pointer to the vector's buffer, even if empty().
static TargetLoweringObjectFile * CreateTLOF(const PPCTargetMachine &TM)
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
MachineRegisterInfo & getRegInfo()
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
int32_t SignExtend32(uint32_t x)
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
unsigned MaxStoresPerMemmoveOptSize
unsigned MaxStoresPerMemcpyOptSize
void setStackPointerRegisterToSaveRestore(unsigned R)
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, bool isNonTemporal, bool isVolatile, unsigned Alignment, const MDNode *TBAAInfo=0)
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
op_iterator op_end() const
const TargetMachine & getTarget() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
virtual const TargetRegisterInfo * getRegisterInfo() const
bool isNON_EXTLoad(const SDNode *N)
FSINCOS - Compute both fsin and fcos as a single operation.
bool hasType(EVT vt) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
static bool isConstantOrUndef(int Op, int Val)
TLSModel::Model getTLSModel(const GlobalValue *GV) const
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
unsigned getAlignment() const
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, bool Aligned) const
LLVM Value Representation.
SDValue getRegister(unsigned Reg, EVT VT)
void setUseUnderscoreSetJmp(bool Val)
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
void setInsertFencesForAtomic(bool fence)
SDValue getValueType(EVT)
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
unsigned getDarwinDirective() const
BasicBlockListType::iterator iterator
const TargetLowering & getTargetLoweringInfo() const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
bool isPowerOf2_32(uint32_t Value)
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const
Zero extend to a new width.
SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, SDLoc dl)
getMergeValues - Create a MERGE_VALUES node from the given operands.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
const MCRegisterInfo & MRI
SDValue getTargetConstant(uint64_t Val, EVT VT)
SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
unsigned getLocMemOffset() const
SDValue getEntryNode() const
void setPow2DivIsCheap(bool isCheap=true)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable)
TRUNCATE - Completely drop the high bits.
unsigned getAlignment() const
unsigned AllocateReg(unsigned Reg)
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
unsigned getByValTypeAlignment(Type *Ty) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
unsigned AllocateStack(unsigned Size, unsigned Align)
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
int64_t getObjectSize(int ObjectIdx) const
EVT changeVectorElementTypeToInteger() const
unsigned getResNo() const
getResNo - Convenience function for get().getResNo().
static unsigned getReturnSaveOffset(bool isPPC64, bool isDarwinABI)
DebugLoc getDebugLoc() const
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
void refineAlignment(const MachineMemOperand *NewMMO)
uint64_t getZExtValue() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
unsigned getVectorNumElements() const