44 class X86FastISel :
public FastISel {
61 X86ScalarSSEf64 = Subtarget->
hasSSE2();
62 X86ScalarSSEf32 = Subtarget->hasSSE1();
65 virtual bool TargetSelectInstruction(
const Instruction *
I);
74 virtual bool FastLowerArguments();
76 #include "X86GenFastISel.inc"
79 bool X86FastEmitCompare(
const Value *LHS,
const Value *RHS,
EVT VT);
84 bool Aligned =
false);
86 bool Aligned =
false);
120 bool DoSelectCall(
const Instruction *
I,
const char *MemIntName);
123 return getTargetMachine()->getInstrInfo();
131 unsigned TargetMaterializeConstant(
const Constant *
C);
133 unsigned TargetMaterializeAlloca(
const AllocaInst *
C);
135 unsigned TargetMaterializeFloatZero(
const ConstantFP *CF);
139 bool isScalarFPTypeInSSEReg(
EVT VT)
const {
140 return (VT ==
MVT::f64 && X86ScalarSSEf64) ||
141 (VT ==
MVT::f32 && X86ScalarSSEf32);
144 bool isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1 =
false);
146 bool IsMemcpySmall(uint64_t Len);
154 bool X86FastISel::isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1) {
155 EVT evt = TLI.getValueType(Ty,
true);
163 if (VT ==
MVT::f64 && !X86ScalarSSEf64)
165 if (VT ==
MVT::f32 && !X86ScalarSSEf32)
174 return (AllowI1 && VT ==
MVT::i1) || TLI.isTypeLegal(VT);
177 #include "X86GenCallingConv.inc"
183 unsigned &ResultReg) {
188 default:
return false;
192 RC = &X86::GR8RegClass;
196 RC = &X86::GR16RegClass;
200 RC = &X86::GR32RegClass;
205 RC = &X86::GR64RegClass;
208 if (X86ScalarSSEf32) {
209 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
210 RC = &X86::FR32RegClass;
213 RC = &X86::RFP32RegClass;
217 if (X86ScalarSSEf64) {
218 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
219 RC = &X86::FR64RegClass;
222 RC = &X86::RFP64RegClass;
230 ResultReg = createResultReg(RC);
232 DL,
TII.get(Opc), ResultReg), AM);
241 X86FastISel::X86FastEmitStore(
EVT VT,
unsigned ValReg,
247 default:
return false;
250 unsigned AndResult = createResultReg(&X86::GR8RegClass);
251 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
252 TII.get(X86::AND8ri), AndResult).addReg(ValReg).
addImm(1);
256 case MVT::i8: Opc = X86::MOV8mr;
break;
257 case MVT::i16: Opc = X86::MOV16mr;
break;
258 case MVT::i32: Opc = X86::MOV32mr;
break;
259 case MVT::i64: Opc = X86::MOV64mr;
break;
261 Opc = X86ScalarSSEf32 ?
262 (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;
265 Opc = X86ScalarSSEf64 ?
266 (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
270 Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
272 Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
276 Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr;
278 Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr;
285 Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr;
287 Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
296 bool X86FastISel::X86FastEmitStore(
EVT VT,
const Value *Val,
299 if (isa<ConstantPointerNull>(Val))
303 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
309 case MVT::i8: Opc = X86::MOV8mi;
break;
310 case MVT::i16: Opc = X86::MOV16mi;
break;
311 case MVT::i32: Opc = X86::MOV32mi;
break;
315 Opc = X86::MOV64mi32;
321 DL,
TII.get(Opc)), AM)
322 .
addImm(Signed ? (uint64_t) CI->getSExtValue() :
328 unsigned ValReg = getRegForValue(Val);
332 return X86FastEmitStore(VT, ValReg, AM, Aligned);
339 unsigned Src,
EVT SrcVT,
340 unsigned &ResultReg) {
352 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
359 if (GVar->isThreadLocal())
364 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
366 dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(
false)))
367 if (GVar->isThreadLocal())
373 if (!Subtarget->isPICStyleRIPRel() ||
379 unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV,
TM);
384 AM.
Base.
Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
390 if (Subtarget->isPICStyleRIPRel()) {
404 if (I != LocalValueMap.
end() && I->second != 0) {
416 SavePoint SaveInsertPt = enterLocalValueArea();
418 if (TLI.getPointerTy() ==
MVT::i64) {
420 RC = &X86::GR64RegClass;
422 if (Subtarget->isPICStyleRIPRel())
426 RC = &X86::GR32RegClass;
429 LoadReg = createResultReg(RC);
431 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), LoadReg);
435 leaveLocalValueArea(SaveInsertPt);
438 LocalValueMap[V] = LoadReg;
450 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
452 AM.
Base.
Reg = getRegForValue(V);
456 assert(AM.
Scale == 1 &&
"Scale with no index!");
470 const User *U = NULL;
471 unsigned Opcode = Instruction::UserOp1;
472 if (
const Instruction *I = dyn_cast<Instruction>(V)) {
476 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
477 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
478 Opcode = I->getOpcode();
481 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
482 Opcode =
C->getOpcode();
487 if (Ty->getAddressSpace() > 255)
494 case Instruction::BitCast:
496 return X86SelectAddress(U->
getOperand(0), AM);
498 case Instruction::IntToPtr:
501 return X86SelectAddress(U->
getOperand(0), AM);
504 case Instruction::PtrToInt:
506 if (TLI.getValueType(U->
getType()) == TLI.getPointerTy())
507 return X86SelectAddress(U->
getOperand(0), AM);
510 case Instruction::Alloca: {
514 FuncInfo.StaticAllocaMap.
find(A);
515 if (SI != FuncInfo.StaticAllocaMap.end()) {
523 case Instruction::Add: {
526 uint64_t Disp = (int32_t)AM.
Disp + (uint64_t)CI->getSExtValue();
529 AM.
Disp = (uint32_t)Disp;
530 return X86SelectAddress(U->
getOperand(0), AM);
536 case Instruction::GetElementPtr: {
540 uint64_t Disp = (int32_t)AM.
Disp;
542 unsigned Scale = AM.
Scale;
547 i != e; ++i, ++GTI) {
548 const Value *Op = *i;
549 if (
StructType *STy = dyn_cast<StructType>(*GTI)) {
557 uint64_t S =
TD.getTypeAllocSize(GTI.getIndexedType());
559 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
561 Disp += CI->getSExtValue() * S;
564 if (canFoldAddIntoGEP(U, Op)) {
567 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
570 Op = cast<AddOperator>(Op)->getOperand(0);
574 (!AM.
GV || !Subtarget->isPICStyleRIPRel()) &&
575 (S == 1 || S == 2 || S == 4 || S == 8)) {
578 IndexReg = getRegForGEPIndex(Op).first;
584 goto unsupported_gep;
594 AM.
Disp = (uint32_t)Disp;
598 dyn_cast<GetElementPtrInst>(U->
getOperand(0))) {
603 }
else if (X86SelectAddress(U->
getOperand(0), AM)) {
613 if (handleConstantAddresses(*I, AM))
623 return handleConstantAddresses(V, AM);
629 const User *U = NULL;
630 unsigned Opcode = Instruction::UserOp1;
659 InMBB = I->
getParent() == FuncInfo.MBB->getBasicBlock();
660 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
661 Opcode =
C->getOpcode();
667 case Instruction::BitCast:
670 return X86SelectCallAddress(U->
getOperand(0), AM);
673 case Instruction::IntToPtr:
677 return X86SelectCallAddress(U->
getOperand(0), AM);
680 case Instruction::PtrToInt:
683 TLI.getValueType(U->
getType()) == TLI.getPointerTy())
684 return X86SelectCallAddress(U->
getOperand(0), AM);
689 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
695 if (Subtarget->isPICStyleRIPRel() &&
700 if (GV->hasDLLImportLinkage())
705 if (GVar->isThreadLocal())
713 if (Subtarget->isPICStyleRIPRel()) {
718 }
else if (Subtarget->isPICStyleStubPIC()) {
720 }
else if (Subtarget->isPICStyleGOT()) {
728 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
730 AM.
Base.
Reg = getRegForValue(V);
734 assert(AM.
Scale == 1 &&
"Scale with no index!");
745 bool X86FastISel::X86SelectStore(
const Instruction *I) {
752 unsigned SABIAlignment =
764 return X86FastEmitStore(VT, I->
getOperand(0), AM, Aligned);
768 bool X86FastISel::X86SelectRet(
const Instruction *I) {
774 if (!FuncInfo.CanLowerReturn)
784 if (Subtarget->isCallingConvWin64(CC))
805 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
809 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF,
TM, ValLocs,
814 unsigned Reg = getRegForValue(RV);
819 if (ValLocs.size() != 1)
836 unsigned SrcReg = Reg + VA.
getValNo();
840 if (SrcVT != DstVT) {
844 if (!Outs[0].
Flags.isZExt() && !Outs[0].Flags.isSExt())
847 assert(DstVT ==
MVT::i32 &&
"X86 should always ext to i32");
850 if (Outs[0].
Flags.isSExt())
852 SrcReg = FastEmitZExtFromI1(
MVT::i8, SrcReg,
false);
868 DstReg).addReg(SrcReg);
878 if (F.hasStructRetAttr() &&
879 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) {
882 "SRetReturnReg should have been set in LowerFormalArguments()!");
883 unsigned RetReg = Subtarget->is64Bit() ? X86::RAX :
X86::EAX;
891 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::RET));
892 for (
unsigned i = 0, e = RetRegs.
size(); i != e; ++i)
899 bool X86FastISel::X86SelectLoad(
const Instruction *I) {
905 if (!isTypeLegal(I->
getType(), VT,
true))
912 unsigned ResultReg = 0;
913 if (X86FastEmitLoad(VT, AM, ResultReg)) {
914 UpdateValueMap(I, ResultReg);
921 bool HasAVX = Subtarget->
hasAVX();
922 bool X86ScalarSSEf32 = Subtarget->
hasSSE1();
923 bool X86ScalarSSEf64 = Subtarget->
hasSSE2();
927 case MVT::i8:
return X86::CMP8rr;
932 return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
934 return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
945 case MVT::i8:
return X86::CMP8ri;
952 return X86::CMP64ri32;
957 bool X86FastISel::X86FastEmitCompare(
const Value *Op0,
const Value *Op1,
959 unsigned Op0Reg = getRegForValue(Op0);
960 if (Op0Reg == 0)
return false;
963 if (isa<ConstantPointerNull>(Op1))
969 if (
const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
971 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CompareImmOpc))
973 .
addImm(Op1C->getSExtValue());
979 if (CompareOpc == 0)
return false;
981 unsigned Op1Reg = getRegForValue(Op1);
982 if (Op1Reg == 0)
return false;
983 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CompareOpc))
990 bool X86FastISel::X86SelectCmp(
const Instruction *I) {
991 const CmpInst *CI = cast<CmpInst>(
I);
997 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
1005 unsigned EReg = createResultReg(&X86::GR8RegClass);
1006 unsigned NPReg = createResultReg(&X86::GR8RegClass);
1007 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SETEr), EReg);
1008 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1009 TII.get(X86::SETNPr), NPReg);
1010 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1011 TII.get(X86::AND8rr), ResultReg).addReg(NPReg).
addReg(EReg);
1012 UpdateValueMap(I, ResultReg);
1019 unsigned NEReg = createResultReg(&X86::GR8RegClass);
1020 unsigned PReg = createResultReg(&X86::GR8RegClass);
1021 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SETNEr), NEReg);
1022 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SETPr), PReg);
1023 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::OR8rr),ResultReg)
1024 .addReg(PReg).
addReg(NEReg);
1025 UpdateValueMap(I, ResultReg);
1060 if (!X86FastEmitCompare(Op0, Op1, VT))
1063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(SetCCOpc), ResultReg);
1064 UpdateValueMap(I, ResultReg);
1068 bool X86FastISel::X86SelectZExt(
const Instruction *I) {
1070 if (!TLI.isTypeLegal(DstVT))
1073 unsigned ResultReg = getRegForValue(I->
getOperand(0));
1081 ResultReg = FastEmitZExtFromI1(
MVT::i8, ResultReg,
false);
1093 case MVT::i8: MovInst = X86::MOVZX32rr8;
break;
1094 case MVT::i16: MovInst = X86::MOVZX32rr16;
break;
1095 case MVT::i32: MovInst = X86::MOV32rr;
break;
1099 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(MovInst), Result32)
1103 ResultReg = createResultReg(&X86::GR64RegClass);
1107 }
else if (DstVT !=
MVT::i8) {
1114 UpdateValueMap(I, ResultReg);
1119 bool X86FastISel::X86SelectBranch(
const Instruction *I) {
1135 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1143 switch (Predicate) {
1181 if (!X86FastEmitCompare(Op0, Op1, VT))
1184 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(BranchOpc))
1190 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::JP_4))
1194 FastEmitBranch(FalseMBB, DL);
1195 FuncInfo.MBB->addSuccessor(TrueMBB);
1202 if (TI->hasOneUse() && TI->getParent() == I->
getParent() &&
1203 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1204 unsigned TestOpc = 0;
1207 case MVT::i8: TestOpc = X86::TEST8ri;
break;
1208 case MVT::i16: TestOpc = X86::TEST16ri;
break;
1209 case MVT::i32: TestOpc = X86::TEST32ri;
break;
1210 case MVT::i64: TestOpc = X86::TEST64ri32;
break;
1213 unsigned OpReg = getRegForValue(TI->getOperand(0));
1214 if (OpReg == 0)
return false;
1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TestOpc))
1216 .addReg(OpReg).
addImm(1);
1218 unsigned JmpOpc = X86::JNE_4;
1219 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1224 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(JmpOpc))
1226 FastEmitBranch(FalseMBB, DL);
1227 FuncInfo.MBB->addSuccessor(TrueMBB);
1237 if (OpReg == 0)
return false;
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::TEST8ri))
1240 .addReg(OpReg).
addImm(1);
1241 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::JNE_4))
1243 FastEmitBranch(FalseMBB, DL);
1244 FuncInfo.MBB->addSuccessor(TrueMBB);
1248 bool X86FastISel::X86SelectShift(
const Instruction *I) {
1249 unsigned CReg = 0, OpReg = 0;
1253 RC = &X86::GR8RegClass;
1255 case Instruction::LShr: OpReg = X86::SHR8rCL;
break;
1256 case Instruction::AShr: OpReg = X86::SAR8rCL;
break;
1257 case Instruction::Shl: OpReg = X86::SHL8rCL;
break;
1258 default:
return false;
1262 RC = &X86::GR16RegClass;
1264 case Instruction::LShr: OpReg = X86::SHR16rCL;
break;
1265 case Instruction::AShr: OpReg = X86::SAR16rCL;
break;
1266 case Instruction::Shl: OpReg = X86::SHL16rCL;
break;
1267 default:
return false;
1271 RC = &X86::GR32RegClass;
1273 case Instruction::LShr: OpReg = X86::SHR32rCL;
break;
1274 case Instruction::AShr: OpReg = X86::SAR32rCL;
break;
1275 case Instruction::Shl: OpReg = X86::SHL32rCL;
break;
1276 default:
return false;
1280 RC = &X86::GR64RegClass;
1282 case Instruction::LShr: OpReg = X86::SHR64rCL;
break;
1283 case Instruction::AShr: OpReg = X86::SAR64rCL;
break;
1284 case Instruction::Shl: OpReg = X86::SHL64rCL;
break;
1285 default:
return false;
1292 if (!isTypeLegal(I->
getType(), VT))
1295 unsigned Op0Reg = getRegForValue(I->
getOperand(0));
1296 if (Op0Reg == 0)
return false;
1298 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1299 if (Op1Reg == 0)
return false;
1301 CReg).addReg(Op1Reg);
1305 if (CReg != X86::CL)
1306 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1310 unsigned ResultReg = createResultReg(RC);
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(OpReg), ResultReg)
1313 UpdateValueMap(I, ResultReg);
1317 bool X86FastISel::X86SelectDivRem(
const Instruction *I) {
1318 const static unsigned NumTypes = 4;
1319 const static unsigned NumOps = 4;
1320 const static bool S =
true;
1321 const static bool U =
false;
1332 const static struct DivRemEntry {
1338 struct DivRemResult {
1340 unsigned OpSignExtend;
1344 unsigned DivRemResultReg;
1346 } ResultTable[NumOps];
1347 } OpTable[NumTypes] = {
1348 { &X86::GR8RegClass, X86::AX, 0, {
1349 { X86::IDIV8r, 0, X86::MOVSX16rr8,
X86::AL, S },
1350 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S },
1351 { X86::DIV8r, 0, X86::MOVZX16rr8,
X86::AL, U },
1352 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U },
1355 { &X86::GR16RegClass, X86::AX, X86::DX, {
1356 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S },
1357 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S },
1358 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U },
1359 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U },
1363 { X86::IDIV32r, X86::CDQ, Copy,
X86::EAX, S },
1364 { X86::IDIV32r, X86::CDQ, Copy,
X86::EDX, S },
1365 { X86::DIV32r, X86::MOV32r0, Copy,
X86::EAX, U },
1366 { X86::DIV32r, X86::MOV32r0, Copy,
X86::EDX, U },
1369 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1370 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S },
1371 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S },
1372 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U },
1373 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U },
1379 if (!isTypeLegal(I->
getType(), VT))
1382 unsigned TypeIndex, OpIndex;
1384 default:
return false;
1385 case MVT::i8: TypeIndex = 0;
break;
1386 case MVT::i16: TypeIndex = 1;
break;
1387 case MVT::i32: TypeIndex = 2;
break;
1389 if (!Subtarget->is64Bit())
1396 case Instruction::SDiv: OpIndex = 0;
break;
1397 case Instruction::SRem: OpIndex = 1;
break;
1398 case Instruction::UDiv: OpIndex = 2;
break;
1399 case Instruction::URem: OpIndex = 3;
break;
1402 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1403 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1404 unsigned Op0Reg = getRegForValue(I->
getOperand(0));
1407 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1412 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1413 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1415 if (OpEntry.OpSignExtend) {
1416 if (OpEntry.IsOpSigned)
1417 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1418 TII.get(OpEntry.OpSignExtend));
1420 unsigned Zero32 = createResultReg(&X86::GR32RegClass);
1421 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1422 TII.get(X86::MOV32r0), Zero32);
1428 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1429 TII.get(Copy), TypeEntry.HighInReg)
1430 .addReg(Zero32, 0, X86::sub_16bit);
1432 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1433 TII.get(Copy), TypeEntry.HighInReg)
1436 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1443 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1444 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1453 unsigned ResultReg = 0;
1454 if ((I->
getOpcode() == Instruction::SRem ||
1456 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1457 unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
1458 unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
1459 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1460 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1463 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SHR16ri),
1464 ResultSuperReg).addReg(SourceSuperReg).
addImm(8);
1467 ResultReg = FastEmitInst_extractsubreg(
MVT::i8, ResultSuperReg,
1468 true, X86::sub_8bit);
1472 ResultReg = createResultReg(TypeEntry.RC);
1473 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Copy), ResultReg)
1474 .addReg(OpEntry.DivRemResultReg);
1476 UpdateValueMap(I, ResultReg);
1481 bool X86FastISel::X86SelectSelect(
const Instruction *I) {
1483 if (!isTypeLegal(I->
getType(), VT))
1487 if (!Subtarget->hasCMov())
return false;
1492 Opc = X86::CMOVE16rr;
1493 RC = &X86::GR16RegClass;
1495 Opc = X86::CMOVE32rr;
1496 RC = &X86::GR32RegClass;
1498 Opc = X86::CMOVE64rr;
1499 RC = &X86::GR64RegClass;
1504 unsigned Op0Reg = getRegForValue(I->
getOperand(0));
1505 if (Op0Reg == 0)
return false;
1506 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1507 if (Op1Reg == 0)
return false;
1508 unsigned Op2Reg = getRegForValue(I->
getOperand(2));
1509 if (Op2Reg == 0)
return false;
1511 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::TEST8rr))
1512 .addReg(Op0Reg).
addReg(Op0Reg);
1513 unsigned ResultReg = createResultReg(RC);
1514 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
1515 .addReg(Op1Reg).
addReg(Op2Reg);
1516 UpdateValueMap(I, ResultReg);
1520 bool X86FastISel::X86SelectFPExt(
const Instruction *I) {
1522 if (X86ScalarSSEf64 &&
1526 unsigned OpReg = getRegForValue(V);
1527 if (OpReg == 0)
return false;
1528 unsigned ResultReg = createResultReg(&X86::FR64RegClass);
1529 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1530 TII.get(X86::CVTSS2SDrr), ResultReg)
1532 UpdateValueMap(I, ResultReg);
1540 bool X86FastISel::X86SelectFPTrunc(
const Instruction *I) {
1541 if (X86ScalarSSEf64) {
1545 unsigned OpReg = getRegForValue(V);
1546 if (OpReg == 0)
return false;
1547 unsigned ResultReg = createResultReg(&X86::FR32RegClass);
1548 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1549 TII.get(X86::CVTSD2SSrr), ResultReg)
1551 UpdateValueMap(I, ResultReg);
1560 bool X86FastISel::X86SelectTrunc(
const Instruction *I) {
1567 if (!TLI.isTypeLegal(SrcVT))
1570 unsigned InputReg = getRegForValue(I->
getOperand(0));
1577 UpdateValueMap(I, InputReg);
1581 if (!Subtarget->is64Bit()) {
1587 unsigned CopyReg = createResultReg(CopyRC);
1589 CopyReg).addReg(InputReg);
1594 unsigned ResultReg = FastEmitInst_extractsubreg(
MVT::i8,
1600 UpdateValueMap(I, ResultReg);
1604 bool X86FastISel::IsMemcpySmall(uint64_t Len) {
1605 return Len <= (Subtarget->is64Bit() ? 32 : 16);
1612 if (!IsMemcpySmall(Len))
1615 bool i64Legal = Subtarget->is64Bit();
1620 if (Len >= 8 && i64Legal)
1631 bool RV = X86FastEmitLoad(VT, SrcAM, Reg);
1632 RV &= X86FastEmitStore(VT, Reg, DestAM);
1633 assert(RV &&
"Failed to emit load or store??");
1637 DestAM.
Disp += Size;
1644 bool X86FastISel::X86VisitIntrinsicCall(
const IntrinsicInst &I) {
1647 default:
return false;
1654 if (isa<ConstantInt>(MCI.
getLength())) {
1657 uint64_t Len = cast<ConstantInt>(MCI.
getLength())->getZExtValue();
1658 if (IsMemcpySmall(Len)) {
1660 if (!X86SelectAddress(MCI.
getRawDest(), DestAM) ||
1663 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
1668 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
1675 return DoSelectCall(&I,
"memcpy");
1683 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
1690 return DoSelectCall(&I,
"memset");
1694 EVT PtrTy = TLI.getPointerTy();
1701 if (!X86SelectAddress(Slot, AM))
return false;
1702 if (!X86FastEmitStore(PtrTy, Op1, AM))
return false;
1708 assert(DI->
getAddress() &&
"Null address should be checked earlier!");
1730 cast<StructType>(Callee->
getReturnType())->getTypeAtIndex(
unsigned(0));
1733 if (!isTypeLegal(RetTy, VT))
1738 unsigned Reg1 = getRegForValue(Op1);
1739 unsigned Reg2 = getRegForValue(Op2);
1741 if (Reg1 == 0 || Reg2 == 0)
1755 unsigned ResultReg = FuncInfo.CreateRegs(I.
getType());
1756 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(OpC), ResultReg)
1757 .addReg(Reg1).
addReg(Reg2);
1759 unsigned Opc = X86::SETBr;
1762 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg+1);
1764 UpdateValueMap(&I, ResultReg, 2);
1770 bool X86FastISel::FastLowerArguments() {
1771 if (!FuncInfo.CanLowerReturn)
1782 if (Subtarget->isCallingConvWin64(CC))
1785 if (!Subtarget->is64Bit())
1791 I != E; ++
I, ++Idx) {
1805 EVT ArgVT = TLI.getValueType(ArgTy);
1806 if (!ArgVT.
isSimple())
return false;
1816 static const uint16_t GPR32ArgRegs[] = {
1819 static const uint16_t GPR64ArgRegs[] = {
1820 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
1827 I != E; ++
I, ++Idx) {
1830 unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx];
1831 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
1835 unsigned ResultReg = createResultReg(RC);
1838 UpdateValueMap(I, ResultReg);
1843 bool X86FastISel::X86SelectCall(
const Instruction *I) {
1848 if (isa<InlineAsm>(Callee))
1853 return X86VisitIntrinsicCall(*II);
1856 if (cast<CallInst>(I)->isTailCall())
1859 return DoSelectCall(I, 0);
1879 bool X86FastISel::DoSelectCall(
const Instruction *I,
const char *MemIntName) {
1886 bool isWin64 = Subtarget->isCallingConvWin64(CC);
1897 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
1903 if (isVarArg && isWin64)
1908 TM.Options.GuaranteedTailCallOpt))
1914 bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
1915 *FuncInfo.MF, FTy->isVarArg(),
1916 Outs, FTy->getContext());
1917 if (!CanLowerReturn)
1923 if (!X86SelectCallAddress(Callee, CalleeAM))
1925 unsigned CalleeOp = 0;
1927 if (CalleeAM.
GV != 0) {
1929 }
else if (CalleeAM.
Base.
Reg != 0) {
1939 unsigned arg_size = CS.arg_size();
1948 if (MemIntName && e-i <= 2)
1952 unsigned AttrInd = i - CS.arg_begin() + 1;
1961 unsigned FrameSize =
TD.getTypeAllocSize(ElementTy);
1962 unsigned FrameAlign = CS.getParamAlignment(AttrInd);
1964 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
1968 if (!IsMemcpySmall(FrameSize))
1980 if (
ConstantInt *CI = dyn_cast<ConstantInt>(ArgVal)) {
1981 if (CI->getBitWidth() == 1 || CI->getBitWidth() == 8 ||
1982 CI->getBitWidth() == 16) {
1995 cast<TruncInst>(ArgVal)->getParent() == I->
getParent() &&
1997 ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
1998 ArgReg = getRegForValue(ArgVal);
1999 if (ArgReg == 0)
return false;
2002 if (!isTypeLegal(ArgVal->
getType(), ArgVT))
return false;
2004 ArgReg = FastEmit_ri(ArgVT, ArgVT,
ISD::AND, ArgReg,
2007 ArgReg = getRegForValue(ArgVal);
2010 if (ArgReg == 0)
return false;
2014 if (!isTypeLegal(ArgTy, ArgVT))
2018 unsigned OriginalAlignment =
TD.getABITypeAlignment(ArgTy);
2029 CCState CCInfo(CC, isVarArg, *FuncInfo.MF,
TM, ArgLocs,
2036 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
2039 unsigned NumBytes = CCInfo.getNextStackOffset();
2042 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
2043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(AdjStackDown))
2049 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2051 unsigned Arg = Args[VA.
getValNo()];
2059 "Unexpected extend");
2062 assert(Emitted &&
"Failed to emit a sext!"); (void)Emitted;
2068 "Unexpected extend");
2071 assert(Emitted &&
"Failed to emit a zext!"); (void)Emitted;
2077 "Unexpected extend");
2087 assert(Emitted &&
"Failed to emit a aext!"); (void)Emitted;
2092 unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.
getLocVT(),
2094 assert(BC != 0 &&
"Failed to emit a bitcast!");
2117 getTargetMachine()->getRegisterInfo());
2119 AM.
Disp = LocMemOffset;
2126 bool Res = TryEmitSmallMemcpy(AM, SrcAM, Flags.
getByValSize());
2127 assert(Res &&
"memcpy length already checked!"); (void)Res;
2128 }
else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
2132 if (!X86FastEmitStore(ArgVT, ArgVal, AM))
2135 if (!X86FastEmitStore(ArgVT, Arg, AM))
2143 if (Subtarget->isPICStyleGOT()) {
2144 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
2149 if (Subtarget->is64Bit() && isVarArg && !isWin64) {
2151 static const uint16_t XMMArgRegs[] = {
2152 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2153 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2155 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
2156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::MOV8ri),
2165 if (Subtarget->is64Bit())
2166 CallOpc = X86::CALL64r;
2168 CallOpc = X86::CALL32r;
2169 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CallOpc))
2174 assert(GV &&
"Not a direct call");
2176 if (Subtarget->is64Bit())
2177 CallOpc = X86::CALL64pcrel32;
2179 CallOpc = X86::CALLpcrel32;
2182 unsigned char OpFlags = 0;
2188 if (Subtarget->isTargetELF() &&
2192 }
else if (Subtarget->isPICStyleStubAny() &&
2194 (!Subtarget->getTargetTriple().isMacOSX() ||
2195 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
2203 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CallOpc));
2212 MIB.
addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
2215 if (Subtarget->isPICStyleGOT())
2218 if (Subtarget->is64Bit() && isVarArg && !isWin64)
2222 for (
unsigned i = 0, e = RegArgs.
size(); i != e; ++i)
2226 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(AdjStackUp))
2229 .addImm(NumBytes).
addImm(NumBytesCallee);
2236 for (
unsigned i = 0, e = RetTys.
size(); i != e; ++i) {
2240 for (
unsigned j = 0; j != NumRegs; ++j) {
2242 MyFlags.
VT = RegisterVT;
2243 MyFlags.
Used = !CS.getInstruction()->use_empty();
2257 CCState CCRetInfo(CC,
false, *FuncInfo.MF,
TM, RVLocs,
2259 unsigned ResultReg = FuncInfo.CreateRegs(I->
getType());
2260 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
2261 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
2262 EVT CopyVT = RVLocs[i].getValVT();
2263 unsigned CopyReg = ResultReg + i;
2268 if ((RVLocs[i].getLocReg() == X86::ST0 ||
2269 RVLocs[i].getLocReg() == X86::ST1)) {
2270 if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
2272 CopyReg = createResultReg(&X86::RFP80RegClass);
2274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::FpPOP_RETVAL),
2278 CopyReg).addReg(RVLocs[i].getLocReg());
2279 UsedRegs.
push_back(RVLocs[i].getLocReg());
2282 if (CopyVT != RVLocs[i].getValVT()) {
2286 EVT ResVT = RVLocs[i].getValVT();
2287 unsigned Opc = ResVT ==
MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
2289 int FI = MFI.CreateStackObject(MemSize, MemSize,
false);
2293 Opc = ResVT ==
MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
2295 TII.get(Opc), ResultReg + i), FI);
2300 UpdateValueMap(I, ResultReg, RVLocs.
size());
2303 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2310 X86FastISel::TargetSelectInstruction(
const Instruction *I) {
2314 return X86SelectLoad(I);
2316 return X86SelectStore(I);
2318 return X86SelectRet(I);
2319 case Instruction::ICmp:
2320 case Instruction::FCmp:
2321 return X86SelectCmp(I);
2322 case Instruction::ZExt:
2323 return X86SelectZExt(I);
2324 case Instruction::Br:
2325 return X86SelectBranch(I);
2327 return X86SelectCall(I);
2328 case Instruction::LShr:
2329 case Instruction::AShr:
2330 case Instruction::Shl:
2331 return X86SelectShift(I);
2332 case Instruction::SDiv:
2333 case Instruction::UDiv:
2334 case Instruction::SRem:
2335 case Instruction::URem:
2336 return X86SelectDivRem(I);
2338 return X86SelectSelect(I);
2339 case Instruction::Trunc:
2340 return X86SelectTrunc(I);
2341 case Instruction::FPExt:
2342 return X86SelectFPExt(I);
2343 case Instruction::FPTrunc:
2344 return X86SelectFPTrunc(I);
2345 case Instruction::IntToPtr:
2346 case Instruction::PtrToInt: {
2350 return X86SelectZExt(I);
2352 return X86SelectTrunc(I);
2353 unsigned Reg = getRegForValue(I->
getOperand(0));
2354 if (Reg == 0)
return false;
2355 UpdateValueMap(I, Reg);
2363 unsigned X86FastISel::TargetMaterializeConstant(
const Constant *
C) {
2365 if (!isTypeLegal(C->
getType(), VT))
2379 RC = &X86::GR8RegClass;
2383 RC = &X86::GR16RegClass;
2387 RC = &X86::GR32RegClass;
2392 RC = &X86::GR64RegClass;
2395 if (X86ScalarSSEf32) {
2396 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
2397 RC = &X86::FR32RegClass;
2399 Opc = X86::LD_Fp32m;
2400 RC = &X86::RFP32RegClass;
2404 if (X86ScalarSSEf64) {
2405 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
2406 RC = &X86::FR64RegClass;
2408 Opc = X86::LD_Fp64m;
2409 RC = &X86::RFP64RegClass;
2418 if (isa<GlobalValue>(C)) {
2420 if (X86SelectAddress(C, AM)) {
2427 Opc = TLI.getPointerTy() ==
MVT::i32 ? X86::LEA32r : X86::LEA64r;
2428 unsigned ResultReg = createResultReg(RC);
2430 TII.get(Opc), ResultReg), AM);
2440 Align =
TD.getTypeAllocSize(C->
getType());
2444 unsigned PICBase = 0;
2445 unsigned char OpFlag = 0;
2446 if (Subtarget->isPICStyleStubPIC()) {
2448 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
2449 }
else if (Subtarget->isPICStyleGOT()) {
2451 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
2452 }
else if (Subtarget->isPICStyleRIPRel() &&
2458 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
2459 unsigned ResultReg = createResultReg(RC);
2461 TII.get(Opc), ResultReg),
2462 MCPOffset, PICBase, OpFlag);
2467 unsigned X86FastISel::TargetMaterializeAlloca(
const AllocaInst *C) {
2475 if (!FuncInfo.StaticAllocaMap.count(C))
2479 if (!X86SelectAddress(C, AM))
2481 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
2483 unsigned ResultReg = createResultReg(RC);
2485 TII.get(Opc), ResultReg), AM);
2489 unsigned X86FastISel::TargetMaterializeFloatZero(
const ConstantFP *CF) {
2491 if (!isTypeLegal(CF->
getType(), VT))
2500 if (X86ScalarSSEf32) {
2501 Opc = X86::FsFLD0SS;
2502 RC = &X86::FR32RegClass;
2504 Opc = X86::LD_Fp032;
2505 RC = &X86::RFP32RegClass;
2509 if (X86ScalarSSEf64) {
2510 Opc = X86::FsFLD0SD;
2511 RC = &X86::FR64RegClass;
2513 Opc = X86::LD_Fp064;
2514 RC = &X86::RFP64RegClass;
2522 unsigned ResultReg = createResultReg(RC);
2523 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
2528 bool X86FastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
2531 if (!X86SelectAddress(LI->
getOperand(0), AM))
2536 unsigned Size =
TD.getTypeAllocSize(LI->
getType());
2544 if (Result == 0)
return false;
2546 FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
2555 return new X86FastISel(funcInfo, libInfo);
bool isInt< 32 >(int64_t x)
Value * getValueOperand()
const Value * getCalledValue() const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void setByValAlign(unsigned A)
void push_back(const T &Elt)
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Abstract base class of comparison instructions.
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=0, uint64_t StartingOffset=0)
Sign extended before/after call.
LocInfo getLocInfo() const
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
Force argument to be passed in register.
Intrinsic::ID getIntrinsicID() const
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
unsigned getNumOperands() const
Nested function static chain.
Predicate getInversePredicate() const
Return the inverse of the instruction's predicate.
0 1 0 0 True if ordered and less than
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl< unsigned > &Ops, int FrameIndex) const
unsigned getSizeInBits() const
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getByValSize() const
1 1 1 0 True if unordered or not equal
Type * getReturnType() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
unsigned getValNo() const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
LoopInfoBase< BlockT, LoopT > * LI
CallingConv::ID getCallingConv() const
static Constant * getNullValue(Type *Ty)
1 0 0 1 True if unordered or equal
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
const HexagonInstrInfo * TII
bool hasDefaultVisibility() const
#define llvm_unreachable(msg)
static bool isGlobalStubReference(unsigned char TargetFlag)
0 1 0 1 True if ordered and less than or equal
static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget, const ImmutableCallSite &CS)
void setByValSize(unsigned S)
ID
LLVM Calling Convention Representation.
const MachineInstrBuilder & addImm(int64_t Val) const
Hidden pointer to structure to return.
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
unsigned getLocReg() const
union llvm::X86AddressMode::@223 Base
bool is64Bit() const
Is this x86_64? (disregarding specific ABI / programming model)
CallingConv::ID getCallingConv() const
BasicBlock * getSuccessor(unsigned i) const
MDNode * getVariable() const
void setOrigAlign(unsigned A)
Type * getElementType() const
This class represents a truncation of integer types.
unsigned getKillRegState(bool B)
static bool isWeakForLinker(LinkageTypes Linkage)
uint64_t getElementOffset(unsigned Idx) const
User::const_op_iterator arg_iterator
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI)
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
unsigned getAlignment() const
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value. See class MCOperandInfo.
LLVM Constant Representation.
bool isVector() const
isVector - Return true if this is a vector value type.
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
Value * getRawDest() const
TRAP - Trapping instruction.
Value * getOperand(unsigned i) const
Zero extended before/after call.
0 1 1 1 True if ordered (no nans)
Predicate getPredicate() const
Return the predicate for this instruction.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
LLVMContext & getContext() const
All values hold a context through their type.
static bool isAtomic(Instruction *I)
1 1 0 1 True if unordered, less than, or equal
unsigned getStackRegister() const
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
bool isTargetWindows() const
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
0 0 1 0 True if ordered and greater than
Class for constant integers.
1 1 0 0 True if unordered or less than
CCValAssign - Represent assignment of one arg/retval to a location.
Value * getLength() const
void getFullAddress(SmallVectorImpl< MachineOperand > &MO)
Function * getCalledFunction() const
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Value * getArgOperand(unsigned i) const
ZERO_EXTEND - Used for integer types, zeroing the new bits.
ANY_EXTEND - Used for integer types. The high bits are undefined.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
Value * getCondition() const
static Constant * getSExt(Constant *C, Type *Ty)
Bitwise operators - logical and, logical or, logical xor.
unsigned getSRetReturnReg() const
static IntegerType * getInt32Ty(LLVMContext &C)
static Constant * getZExt(Constant *C, Type *Ty)
bool isDeclaration() const
unsigned getBytesToPopOnReturn() const
unsigned greater or equal
static bool is32Bit(EVT VT)
unsigned getAlignment() const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
ImmutableCallSite - establish a view to a call site for examination.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
DBG_VALUE - a mapping of the llvm.dbg.value intrinsic.
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
0 1 1 0 True if ordered and operands are unequal
bool paramHasAttr(unsigned i, Attribute::AttrKind A) const
Return true if the call or the callee has the given attribute.
enum llvm::X86AddressMode::@222 BaseType
1 0 1 0 True if unordered or greater than
bool hasLocalLinkage() const
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool TailCallOpt)
Value * getRawSource() const
The C convention as implemented on Windows/x86-64. This convention differs from the more common X86_6...
reverse_iterator rbegin()
LLVMContext & getContext() const
Get the context in which this basic block lives.
0 0 0 1 True if ordered and equal
LLVM Value Representation.
1 0 1 1 True if unordered, greater than, or equal
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Value * getAddress() const
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC)
unsigned getDestAddressSpace() const
const MCRegisterInfo & MRI
unsigned getLocMemOffset() const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
int64_t getSExtValue() const
Return the sign extended value.
iterator find(const KeyT &Val)
0 0 1 1 True if ordered and greater than or equal
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
unsigned getSourceAddressSpace() const
unsigned AllocateStack(unsigned Size, unsigned Align)
const BasicBlock * getParent() const
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget)
gep_type_iterator gep_type_begin(const User *GEP)
bool contains(unsigned Reg) const