33 void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
34 DEBUG(
dbgs() <<
"Scalarize node result " << ResNo <<
": ";
42 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
50 case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N);
break;
56 case ISD::FPOWI: R = ScalarizeVecRes_FPOWI(N);
break;
58 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));
break;
61 case ISD::VSELECT: R = ScalarizeVecRes_VSELECT(N);
break;
62 case ISD::SELECT: R = ScalarizeVecRes_SELECT(N);
break;
64 case ISD::SETCC: R = ScalarizeVecRes_SETCC(N);
break;
65 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N);
break;
95 R = ScalarizeVecRes_UnaryOp(N);
118 R = ScalarizeVecRes_BinOp(N);
121 R = ScalarizeVecRes_TernaryOp(N);
127 SetScalarizedVector(
SDValue(N, ResNo), R);
137 SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(
SDNode *N) {
145 SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(
SDNode *N,
147 SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
148 return GetScalarizedVector(Op);
151 SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(
SDNode *N) {
157 SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *N) {
167 SDValue DAGTypeLegalizer::ScalarizeVecRes_CONVERT_RNDSAT(
SDNode *N) {
175 cast<CvtRndSatSDNode>(
N)->getCvtCode());
178 SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *N) {
184 SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_ROUND(
SDNode *N) {
197 SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *N) {
209 assert(N->
isUnindexed() &&
"Indexed vector load?");
229 SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(
SDNode *N) {
236 SDValue DAGTypeLegalizer::ScalarizeVecRes_InregOp(
SDNode *N) {
244 SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *N) {
254 SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(
SDNode *N) {
259 if (ScalarBool != VecBool) {
261 switch (ScalarBool) {
282 LHS.getValueType(), Cond, LHS,
286 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(
SDNode *N) {
293 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(
SDNode *N) {
304 "Scalar/Vector type mismatch");
320 SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *N) {
325 unsigned Op = !cast<ConstantSDNode>(Arg)->isNullValue();
326 return GetScalarizedVector(N->
getOperand(Op));
329 SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(
SDNode *N) {
332 "Operand types must be vectors");
346 return DAG.
getNode(ExtendCode, DL, NVT, Res);
354 bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *N,
unsigned OpNo) {
355 DEBUG(
dbgs() <<
"Scalarize node operand " << OpNo <<
": ";
360 if (Res.getNode() == 0) {
364 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
370 Res = ScalarizeVecOp_BITCAST(N);
376 Res = ScalarizeVecOp_UnaryOp(N);
379 Res = ScalarizeVecOp_CONCAT_VECTORS(N);
382 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
385 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
391 if (!Res.getNode())
return false;
395 if (Res.getNode() ==
N)
399 "Invalid operand expansion");
401 ReplaceValueWith(
SDValue(N, 0), Res);
407 SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(
SDNode *N) {
415 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp(
SDNode *N) {
417 "Unexected vector type!");
430 SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *N) {
433 Ops[i] = GetScalarizedVector(N->
getOperand(i));
435 &Ops[0], Ops.size());
441 SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *N) {
452 assert(N->
isUnindexed() &&
"Indexed store of one-element vector?");
453 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
480 void DAGTypeLegalizer::SplitVectorResult(
SDNode *N,
unsigned ResNo) {
493 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
502 case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi);
break;
504 case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi);
break;
505 case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi);
break;
510 case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi);
break;
515 SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
518 SplitVecRes_SETCC(N, Lo, Hi);
521 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
553 SplitVecRes_UnaryOp(N, Lo, Hi);
559 SplitVecRes_ExtendOp(N, Lo, Hi);
582 SplitVecRes_BinOp(N, Lo, Hi);
585 SplitVecRes_TernaryOp(N, Lo, Hi);
594 void DAGTypeLegalizer::SplitVecRes_BinOp(
SDNode *N,
SDValue &Lo,
597 GetSplitVector(N->
getOperand(0), LHSLo, LHSHi);
599 GetSplitVector(N->
getOperand(1), RHSLo, RHSHi);
606 void DAGTypeLegalizer::SplitVecRes_TernaryOp(
SDNode *N,
SDValue &Lo,
609 GetSplitVector(N->
getOperand(0), Op0Lo, Op0Hi);
611 GetSplitVector(N->
getOperand(1), Op1Lo, Op1Hi);
613 GetSplitVector(N->
getOperand(2), Op2Lo, Op2Hi);
617 Op0Lo, Op1Lo, Op2Lo);
619 Op0Hi, Op1Hi, Op2Hi);
622 void DAGTypeLegalizer::SplitVecRes_BITCAST(
SDNode *N,
SDValue &Lo,
634 switch (getTypeAction(InVT)) {
647 GetExpandedOp(InOp, Lo, Hi);
658 GetSplitVector(InOp, Lo, Hi);
670 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
678 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(
SDNode *N,
SDValue &Lo,
691 void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(
SDNode *N,
SDValue &Lo,
693 assert(!(N->
getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
696 if (NumSubvectors == 1) {
712 void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *N,
SDValue &Lo,
722 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
728 void DAGTypeLegalizer::SplitVecRes_FPOWI(
SDNode *N,
SDValue &Lo,
736 void DAGTypeLegalizer::SplitVecRes_InregOp(
SDNode *N,
SDValue &Lo,
739 GetSplitVector(N->
getOperand(0), LHSLo, LHSHi);
752 void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *N,
SDValue &Lo,
758 GetSplitVector(Vec, Lo, Hi);
761 unsigned IdxVal = CIdx->getZExtValue();
763 if (IdxVal < LoNumElts)
782 SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
791 false,
false,
false, 0);
795 StackPtr = DAG.
getNode(
ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
796 DAG.
getConstant(IncrementSize, StackPtr.getValueType()));
800 false,
false,
false,
MinAlign(Alignment, IncrementSize));
803 void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(
SDNode *N,
SDValue &Lo,
830 EVT LoMemVT, HiMemVT;
835 isInvariant, Alignment, TBAAInfo);
842 HiMemVT, isVolatile, isNonTemporal, isInvariant, Alignment,
852 ReplaceValueWith(
SDValue(LD, 1), Ch);
858 "Operand types must be vectors");
873 void DAGTypeLegalizer::SplitVecRes_UnaryOp(
SDNode *N,
SDValue &Lo,
909 void DAGTypeLegalizer::SplitVecRes_ExtendOp(
SDNode *N,
SDValue &Lo,
931 if ((NumElements & 1) == 0 &&
940 EVT SplitLoVT, SplitHiVT;
944 DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
958 SplitVecRes_UnaryOp(N, Lo, Hi);
966 GetSplitVector(N->
getOperand(0), Inputs[0], Inputs[1]);
967 GetSplitVector(N->
getOperand(1), Inputs[2], Inputs[3]);
975 for (
unsigned High = 0; High < 2; ++High) {
982 unsigned InputUsed[2] = { -1U, -1U };
983 unsigned FirstMaskIdx = High * NewElts;
984 bool useBuildVector =
false;
985 for (
unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
987 int Idx = N->
getMaskElt(FirstMaskIdx + MaskOffset);
990 unsigned Input = (
unsigned)Idx / NewElts;
999 Idx -= Input * NewElts;
1004 if (InputUsed[OpNo] == Input) {
1007 }
else if (InputUsed[OpNo] == -1U) {
1009 InputUsed[OpNo] = Input;
1017 useBuildVector =
true;
1025 if (useBuildVector) {
1030 for (
unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
1032 int Idx = N->
getMaskElt(FirstMaskIdx + MaskOffset);
1035 unsigned Input = (
unsigned)Idx / NewElts;
1044 Idx -= Input * NewElts;
1054 }
else if (InputUsed[0] == -1U) {
1058 SDValue Op0 = Inputs[InputUsed[0]];
1060 SDValue Op1 = InputUsed[1] == -1U ?
1061 DAG.
getUNDEF(NewVT) : Inputs[InputUsed[1]];
1079 bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *N,
unsigned OpNo) {
1089 if (Res.getNode() == 0) {
1093 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
1100 case ISD::SETCC: Res = SplitVecOp_VSETCC(N);
break;
1108 Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
1111 Res = SplitVecOp_VSELECT(N, OpNo);
1125 Res = SplitVecOp_UnaryOp(N);
1131 if (!Res.getNode())
return false;
1135 if (Res.getNode() ==
N)
1139 "Invalid operand expansion");
1141 ReplaceValueWith(
SDValue(N, 0), Res);
1145 SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *N,
unsigned OpNo) {
1148 assert(OpNo == 0 &&
"Illegal operand must be mask");
1159 assert(Lo.getValueType() == Hi.getValueType() &&
1160 "Lo and Hi have differing types");
1164 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
1166 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
1202 Lo = BitConvertToInteger(Lo);
1203 Hi = BitConvertToInteger(Hi);
1209 JoinIntegers(Lo, Hi));
1212 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *N) {
1221 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
1223 if (IdxVal < LoElts) {
1225 "Extracted subvector crosses vector split!");
1233 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *N) {
1238 if (isa<ConstantSDNode>(Idx)) {
1239 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
1243 GetSplitVector(Vec, Lo, Hi);
1245 uint64_t LoElts = Lo.getValueType().getVectorNumElements();
1247 if (IdxVal < LoElts)
1262 StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
1268 assert(N->
isUnindexed() &&
"Indexed store of vector?");
1269 assert(OpNo == 1 &&
"Can only split the stored value");
1283 EVT LoMemVT, HiMemVT;
1286 unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
1290 LoMemVT, isVol, isNT, Alignment, TBAAInfo);
1293 isVol, isNT, Alignment, TBAAInfo);
1302 HiMemVT, isVol, isNT, Alignment, TBAAInfo);
1306 isVol, isNT, Alignment, TBAAInfo);
1311 SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(
SDNode *N) {
1332 &Elts[0], Elts.
size());
1359 assert(!(NumElements & 1) &&
"Splitting vector, but not in half!");
1367 if (InElementSize <= OutElementSize * 2)
1368 return SplitVecOp_UnaryOp(N);
1394 "Operand types must be vectors");
1396 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
1400 unsigned PartElements = Lo0.getValueType().getVectorNumElements();
1434 void DAGTypeLegalizer::WidenVectorResult(
SDNode *N,
unsigned ResNo) {
1435 DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
1447 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
1454 case ISD::BITCAST: Res = WidenVecRes_BITCAST(N);
break;
1461 case ISD::LOAD: Res = WidenVecRes_LOAD(N);
break;
1465 case ISD::SELECT: Res = WidenVecRes_SELECT(N);
break;
1467 case ISD::SETCC: Res = WidenVecRes_SETCC(N);
break;
1468 case ISD::UNDEF: Res = WidenVecRes_UNDEF(N);
break;
1470 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
1482 Res = WidenVecRes_Binary(N);
1496 Res = WidenVecRes_BinaryCanTrap(N);
1500 Res = WidenVecRes_POWI(N);
1506 Res = WidenVecRes_Shift(N);
1519 Res = WidenVecRes_Convert(N);
1541 Res = WidenVecRes_Unary(N);
1544 Res = WidenVecRes_Ternary(N);
1550 SetWidenedVector(
SDValue(N, ResNo), Res);
1572 SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(
SDNode *N) {
1581 NumElts = NumElts / 2;
1603 unsigned ConcatEnd = 0;
1611 while (CurNumElts != 0) {
1612 while (CurNumElts >= NumElts) {
1617 ConcatOps[ConcatEnd++] = DAG.
getNode(Opcode, dl, VT, EOp1, EOp2);
1619 CurNumElts -= NumElts;
1622 NumElts = NumElts / 2;
1627 for (
unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
1634 ConcatOps[ConcatEnd++] = DAG.
getNode(Opcode, dl, WidenEltVT,
1642 if (ConcatEnd == 1) {
1643 VT = ConcatOps[0].getValueType();
1645 return ConcatOps[0];
1652 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
1653 Idx = ConcatEnd - 1;
1654 VT = ConcatOps[Idx--].getValueType();
1655 while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
1668 unsigned NumToInsert = ConcatEnd - Idx - 1;
1669 for (
unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
1674 ConcatOps[Idx+1] = VecOp;
1675 ConcatEnd = Idx + 2;
1681 unsigned RealVals = ConcatEnd - Idx - 1;
1682 unsigned SubConcatEnd = 0;
1683 unsigned SubConcatIdx = Idx + 1;
1684 while (SubConcatEnd < RealVals)
1685 SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
1686 while (SubConcatEnd < OpsToConcat)
1687 SubConcatOps[SubConcatEnd++] = undefVec;
1689 NextVT, &SubConcatOps[0],
1691 ConcatEnd = SubConcatIdx + 1;
1696 if (ConcatEnd == 1) {
1697 VT = ConcatOps[0].getValueType();
1699 return ConcatOps[0];
1704 if (NumOps != ConcatEnd ) {
1706 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
1707 ConcatOps[j] = UndefVal;
1729 InVTNumElts = InVT.getVectorNumElements();
1730 if (InVTNumElts == WidenNumElts) {
1732 return DAG.
getNode(Opcode, DL, WidenVT, InOp);
1743 if (WidenNumElts % InVTNumElts == 0) {
1745 unsigned NumConcat = WidenNumElts/InVTNumElts;
1749 for (
unsigned i = 1; i != NumConcat; ++i)
1752 &Ops[0], NumConcat);
1754 return DAG.
getNode(Opcode, DL, WidenVT, InVec);
1758 if (InVTNumElts % WidenNumElts == 0) {
1764 return DAG.
getNode(Opcode, DL, WidenVT, InVal);
1772 unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
1774 for (i=0; i < MinElts; ++i) {
1778 Ops[i] = DAG.
getNode(Opcode, DL, EltVT, Val);
1784 for (; i < WidenNumElts; ++i)
1802 EVT ShVT = ShOp.getValueType();
1804 ShOp = GetWidenedVector(ShOp);
1805 ShVT = ShOp.getValueType();
1810 if (ShVT != ShWidenVT)
1811 ShOp = ModifyToType(ShOp, ShWidenVT);
1827 .getVectorElementType(),
1834 SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *N,
unsigned ResNo) {
1835 SDValue WidenVec = DisintegrateMERGE_VALUES(N, ResNo);
1836 return GetWidenedVector(WidenVec);
1846 switch (getTypeAction(InVT)) {
1858 InOp = GetPromotedInteger(InOp);
1860 if (WidenVT.
bitsEq(InVT))
1872 InOp = GetWidenedVector(InOp);
1874 if (WidenVT.
bitsEq(InVT))
1883 if (WidenSize % InSize == 0 && InVT !=
MVT::x86mmx) {
1888 unsigned NewNumElts = WidenSize / InSize;
1906 for (
unsigned i = 1; i < NewNumElts; ++i)
1912 NewInVT, &Ops[0], NewNumElts);
1915 NewInVT, &Ops[0], NewNumElts);
1920 return CreateStackStoreLoad(InOp, WidenVT);
1923 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(
SDNode *N) {
1937 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
1938 NewOps.append(WidenNumElts - NumElts, DAG.
getUNDEF(EltVT));
1943 SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(
SDNode *N) {
1951 bool InputWidened =
false;
1959 for (
unsigned i=0; i < NumOperands; ++i)
1961 for (
unsigned i = NumOperands; i != NumConcat; ++i)
1966 InputWidened =
true;
1970 for (i=1; i < NumOperands; ++i)
1974 if (i == NumOperands)
1979 if (NumOperands == 2) {
1982 for (
unsigned i = 0; i < NumInElts; ++i) {
1984 MaskOps[i + NumInElts] = i + WidenNumElts;
1998 for (
unsigned i=0; i < NumOperands; ++i) {
2001 InOp = GetWidenedVector(InOp);
2002 for (
unsigned j=0; j < NumInElts; ++j)
2007 for (; Idx < WidenNumElts; ++Idx)
2008 Ops[Idx] = UndefVal;
2012 SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(
SDNode *N) {
2027 ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(
N)->getCvtCode();
2031 InOp = GetWidenedVector(InOp);
2034 if (InVTNumElts == WidenNumElts)
2045 if (WidenNumElts % InVTNumElts == 0) {
2047 unsigned NumConcat = WidenNumElts/InVTNumElts;
2051 for (
unsigned i = 1; i != NumConcat; ++i)
2059 if (InVTNumElts % WidenNumElts == 0) {
2074 unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
2076 for (i=0; i < MinElts; ++i) {
2084 for (; i < WidenNumElts; ++i)
2090 SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *N) {
2099 InOp = GetWidenedVector(InOp);
2104 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
2105 if (IdxVal == 0 && InVT == WidenVT)
2110 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
2119 for (i=0; i < NumElts; ++i)
2124 for (; i < WidenNumElts; ++i)
2129 SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *N) {
2143 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
2145 Result = GenWidenVectorLoads(LdChain, LD);
2151 if (LdChain.
size() == 1)
2152 NewChain = LdChain[0];
2155 &LdChain[0], LdChain.
size());
2159 ReplaceValueWith(
SDValue(N, 1), NewChain);
2164 SDValue DAGTypeLegalizer::WidenVecRes_SCALAR_TO_VECTOR(
SDNode *N) {
2179 CondEltVT, WidenNumElts);
2181 Cond1 = GetWidenedVector(Cond1);
2184 Cond1 = ModifyToType(Cond1, CondWidenVT);
2189 assert(InOp1.
getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
2191 WidenVT, Cond1, InOp1, InOp2);
2194 SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(
SDNode *N) {
2205 "Scalar/Vector type mismatch");
2233 for (
unsigned i = 0; i != NumElts; ++i) {
2235 if (Idx < (
int)NumElts)
2236 NewMask.push_back(Idx);
2238 NewMask.push_back(Idx - NumElts + WidenNumElts);
2240 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
2241 NewMask.push_back(-1);
2248 "Operands must be vectors");
2250 unsigned WidenNumElts = WidenVT.getVectorNumElements();
2254 assert(InVT.
isVector() &&
"can not widen non vector type");
2257 InOp1 = GetWidenedVector(InOp1);
2262 assert(InOp1.getValueType() == WidenInVT &&
2264 "Input not widened to expected type!");
2274 bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *N,
unsigned OpNo) {
2275 DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
2287 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
2297 case ISD::STORE: Res = WidenVecOp_STORE(N);
break;
2298 case ISD::SETCC: Res = WidenVecOp_SETCC(N);
break;
2309 Res = WidenVecOp_Convert(N);
2314 if (!Res.getNode())
return false;
2318 if (Res.getNode() ==
N)
2323 "Invalid operand expansion");
2325 ReplaceValueWith(
SDValue(N, 0), Res);
2339 InOp = GetWidenedVector(InOp);
2345 for (
unsigned i=0; i < NumElts; ++i)
2346 Ops[i] = DAG.
getNode(Opcode, dl, EltVT,
2360 unsigned InWidenSize = InWidenVT.getSizeInBits();
2364 unsigned NewNumElts = InWidenSize / Size;
2373 return CreateStackStoreLoad(InOp, VT);
2376 SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(
SDNode *N) {
2391 for (
unsigned i=0; i < NumOperands; ++i) {
2394 InOp = GetWidenedVector(InOp);
2395 for (
unsigned j=0; j < NumInElts; ++j)
2402 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *N) {
2408 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *N) {
2421 GenWidenVectorTruncStores(StChain, ST);
2423 GenWidenVectorStores(StChain, ST);
2425 if (StChain.size() == 1)
2471 unsigned Width,
EVT WidenVT,
2472 unsigned Align = 0,
unsigned WidenEx = 0) {
2476 unsigned AlignInBits =
Align*8;
2479 EVT RetVT = WidenEltVT;
2480 if (Width == WidenEltWidth)
2491 if (TLI.
isTypeLegal(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
2493 (MemVTWidth <= Width ||
2494 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
2507 (WidenWidth % MemVTWidth) == 0 &&
2509 (MemVTWidth <= Width ||
2510 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
2525 unsigned Start,
unsigned End) {
2527 SDLoc dl(LdOps[Start]);
2528 EVT LdTy = LdOps[Start].getValueType();
2536 for (
unsigned i = Start + 1; i != End; ++i) {
2537 EVT NewLdTy = LdOps[i].getValueType();
2538 if (NewLdTy != LdTy) {
2575 int WidthDiff = WidenWidth - LdWidth;
2576 unsigned LdAlign = (isVolatile) ? 0 : Align;
2579 EVT NewVT =
FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
2582 isVolatile, isNonTemporal, isInvariant,
Align,
2587 if (LdWidth <= NewVTWidth) {
2589 unsigned NumElts = WidenWidth / NewVTWidth;
2594 if (NewVT == WidenVT)
2597 assert(WidenWidth % NewVTWidth == 0);
2598 unsigned NumConcat = WidenWidth / NewVTWidth;
2601 ConcatOps[0] = LdOp;
2602 for (
unsigned i = 1; i != NumConcat; ++i)
2603 ConcatOps[i] = UndefVal;
2612 LdWidth -= NewVTWidth;
2613 unsigned Offset = 0;
2615 while (LdWidth > 0) {
2616 unsigned Increment = NewVTWidth / 8;
2617 Offset += Increment;
2622 if (LdWidth < NewVTWidth) {
2624 NewVT =
FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
2626 L = DAG.
getLoad(NewVT, dl, Chain, BasePtr,
2628 isNonTemporal, isInvariant,
MinAlign(Align, Increment),
2635 while (size < LdOp->getValueSizeInBits(0)) {
2640 &Loads[0], Loads.
size());
2643 L = DAG.
getLoad(NewVT, dl, Chain, BasePtr,
2645 isNonTemporal, isInvariant,
MinAlign(Align, Increment),
2653 LdWidth -= NewVTWidth;
2657 unsigned End = LdOps.
size();
2658 if (!LdOps[0].getValueType().isVector())
2668 EVT LdTy = LdOps[i].getValueType();
2671 for (--i; i >= 0; --i) {
2672 LdTy = LdOps[i].getValueType();
2678 ConcatOps[--Idx] = LdOps[i];
2679 for (--i; i >= 0; --i) {
2680 EVT NewLdTy = LdOps[i].getValueType();
2681 if (NewLdTy != LdTy) {
2684 &ConcatOps[Idx], End - Idx);
2688 ConcatOps[--Idx] = LdOps[i];
2693 &ConcatOps[Idx], End - Idx);
2701 for (; i != End-Idx; ++i)
2702 WidenOps[i] = ConcatOps[Idx+i];
2703 for (; i != NumOps; ++i)
2704 WidenOps[i] = UndefVal;
2736 Ops[0] = DAG.
getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
2738 LdEltVT, isVolatile, isNonTemporal,
Align, TBAAInfo);
2740 unsigned i = 0, Offset = Increment;
2741 for (i=1; i < NumElts; ++i, Offset += Increment) {
2746 Ops[i] = DAG.
getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
2748 isVolatile, isNonTemporal,
Align, TBAAInfo);
2754 for (; i != WidenNumElts; ++i)
2784 unsigned Offset = 0;
2785 while (StWidth != 0) {
2789 unsigned Increment = NewVTWidth / 8;
2797 isVolatile, isNonTemporal,
2798 MinAlign(Align, Offset), TBAAInfo));
2799 StWidth -= NewVTWidth;
2800 Offset += Increment;
2804 }
while (StWidth != 0 && StWidth >= NewVTWidth);
2807 unsigned NumElts = ValWidth / NewVTWidth;
2811 Idx = Idx * ValEltWidth / NewVTWidth;
2817 isVolatile, isNonTemporal,
2818 MinAlign(Align, Offset), TBAAInfo));
2819 StWidth -= NewVTWidth;
2820 Offset += Increment;
2823 }
while (StWidth != 0 && StWidth >= NewVTWidth);
2825 Idx = Idx * NewVTWidth / ValEltWidth;
2863 isVolatile, isNonTemporal,
Align,
2865 unsigned Offset = Increment;
2866 for (
unsigned i=1; i < NumElts; ++i, Offset += Increment) {
2874 StEltVT, isVolatile, isNonTemporal,
2875 MinAlign(Align, Offset), TBAAInfo));
2886 "input and widen element type must match");
2895 if (WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0) {
2896 unsigned NumConcat = WidenNumElts / InNumElts;
2900 for (
unsigned i = 1; i != NumConcat; ++i)
2906 if (WidenNumElts < InNumElts && InNumElts % WidenNumElts)
2913 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
2915 for (Idx = 0; Idx < MinNumElts; ++Idx)
2920 for ( ; Idx < WidenNumElts; ++Idx)
2921 Ops[Idx] = UndefVal;
unsigned getValueSizeInBits(unsigned ResNo) const
void push_back(const T &Elt)
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
SDValue getValue(unsigned R) const
LLVMContext * getContext() const
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
void dump() const
dump - Dump this node, for debugging.
unsigned getPrefTypeAlignment(Type *Ty) const
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
unsigned getOpcode() const
static ISD::NodeType getExtendForContent(BooleanContent Content)
Type * getTypeForEVT(LLVMContext &Context) const
bool isUnindexed() const
isUnindexed - Return true if this is NOT a pre/post inc/dec load/store.
unsigned getNumOperands() const
MDNode - a tuple of other values.
const SDValue & getOperand(unsigned Num) const
const SDValue & getBasePtr() const
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool isVector() const
isVector - Return true if this is a vector value type.
virtual bool canOpTrap(unsigned Op, EVT VT) const
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
#define llvm_unreachable(msg)
EVT getValueType(unsigned ResNo) const
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
MachinePointerInfo getWithOffset(int64_t O) const
EVT getScalarType() const
int getMaskElt(unsigned Idx) const
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
EVT getVectorElementType() const
size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned getNumValues() const
Simple integer binary arithmetic operators.
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
const SDValue & getBasePtr() const
SDValue getUNDEF(EVT VT)
getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
virtual MVT getVectorIdxTy() const
UNDEF - An undefined node.
SDNode * getNode() const
get the SDNode which holds the desired result
bool isTypeLegal(EVT VT) const
static EVT FindMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
const SDValue & getOperand(unsigned i) const
Simple binary floating point operators.
bool isNonTemporal() const
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
unsigned getOriginalAlignment() const
Returns alignment and volatility of the memory access.
const DataLayout * getDataLayout() const
unsigned getOpcode() const
const SDValue & getValue() const
Bit counting operators with an undefined result for zero inputs.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
bool bitsEq(EVT VT) const
bitsEq - Return true if this has the same number of bits as VT.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
const MachinePointerInfo & getPointerInfo() const
bool isUNINDEXEDLoad(const SDNode *N)
SDValue getConvertRndSat(EVT VT, SDLoc dl, SDValue Val, SDValue DTy, SDValue STy, SDValue Rnd, SDValue Sat, ISD::CvtCode Code)
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
BooleanContent getBooleanContents(bool isVec) const
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
const SDValue & getChain() const
Byte Swap and Counting operators.
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
const MDNode * getTBAAInfo() const
Returns the TBAAInfo that describes the dereference.
ISD::LoadExtType getExtensionType() const
op_iterator op_begin() const
ZERO_EXTEND - Used for integer types, zeroing the new bits.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
uint64_t MinAlign(uint64_t A, uint64_t B)
Bitwise operators - logical and, logical or, logical xor.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, bool isNonTemporal, bool isVolatile, unsigned Alignment, const MDNode *TBAAInfo=0)
op_iterator op_end() const
SDValue getSelect(SDLoc DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
bool isTruncatingStore() const
SDValue getValueType(EVT)
const TargetLowering & getTargetLoweringInfo() const
bool isPowerOf2_32(uint32_t Value)
BooleanContent
Enum that describes how the target represents true/false values.
SDValue getEntryNode() const
TRUNCATE - Completely drop the high bits.
unsigned getAlignment() const
tier< T1, T2 > tie(T1 &f, T2 &s)
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
unsigned getVectorNumElements() const