42 #define DEBUG_TYPE "isel"
67 STATISTIC(NumFastIselSuccessIndependent,
"Number of insts selected by "
68 "target-independent selector");
69 STATISTIC(NumFastIselSuccessTarget,
"Number of insts selected by "
70 "target-specific selector");
71 STATISTIC(NumFastIselDead,
"Number of dead insts removed on failure");
107 void FastISel::flushLocalValueMap() {
113 bool FastISel::hasTrivialKill(
const Value *V)
const {
120 if (
const CastInst *Cast = dyn_cast<CastInst>(I))
122 !hasTrivialKill(Cast->getOperand(0)))
127 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
133 !(I->
getOpcode() == Instruction::BitCast ||
134 I->
getOpcode() == Instruction::PtrToInt ||
135 I->
getOpcode() == Instruction::IntToPtr) &&
164 if (isa<Instruction>(V) &&
165 (!isa<AllocaInst>(V) ||
173 Reg = materializeRegForValue(V, VT);
183 unsigned FastISel::materializeRegForValue(
const Value *V,
MVT VT) {
186 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
187 if (CI->getValue().getActiveBits() <= 64)
189 }
else if (isa<AllocaInst>(V)) {
191 }
else if (isa<ConstantPointerNull>(V)) {
196 }
else if (
const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
197 if (CF->isNullValue()) {
206 const APFloat &Flt = CF->getValueAPF();
217 unsigned IntegerReg =
224 }
else if (
const Operator *Op = dyn_cast<Operator>(V)) {
226 if (!isa<Instruction>(Op) ||
230 }
else if (isa<UndefValue>(V)) {
238 if (!Reg && isa<Constant>(V))
268 if (!isa<Instruction>(I)) {
274 if (AssignedReg == 0)
277 else if (Reg != AssignedReg) {
279 for (
unsigned i = 0; i < NumRegs; i++)
290 return std::pair<unsigned, bool>(0,
false);
292 bool IdxNIsKill = hasTrivialKill(Idx);
297 if (IdxVT.
bitsLT(PtrVT)) {
302 else if (IdxVT.
bitsGT(PtrVT)) {
307 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
326 assert (I && E && std::distance(I, E) > 0 &&
"Invalid iterator!");
357 bool FastISel::SelectBinaryOp(
const User *I,
unsigned ISDOpcode) {
381 if (isa<Instruction>(I) && cast<Instruction>(
I)->isCommutative()) {
383 if (Op1 == 0)
return false;
385 bool Op1IsKill = hasTrivialKill(I->
getOperand(1));
388 Op1IsKill, CI->getZExtValue(),
390 if (ResultReg == 0)
return false;
402 bool Op0IsKill = hasTrivialKill(I->
getOperand(0));
406 uint64_t Imm = CI->getZExtValue();
409 if (ISDOpcode ==
ISD::SDIV && isa<BinaryOperator>(I) &&
410 cast<BinaryOperator>(I)->isExact() &&
417 if (ISDOpcode ==
ISD::UREM && isa<BinaryOperator>(I) &&
425 if (ResultReg == 0)
return false;
435 ISDOpcode, Op0, Op0IsKill, CF);
436 if (ResultReg != 0) {
448 bool Op1IsKill = hasTrivialKill(I->
getOperand(1));
465 bool FastISel::SelectGetElementPtr(
const User *I) {
471 bool NIsKill = hasTrivialKill(I->
getOperand(0));
475 uint64_t TotalOffs = 0;
477 uint64_t MaxOffs = 2048;
481 E = I->
op_end(); OI != E; ++OI) {
482 const Value *Idx = *OI;
483 if (
StructType *StTy = dyn_cast<StructType>(Ty)) {
484 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
488 if (TotalOffs >= MaxOffs) {
497 Ty = StTy->getElementType(Field);
499 Ty = cast<SequentialType>(Ty)->getElementType();
502 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
503 if (CI->isZero())
continue;
507 if (TotalOffs >= MaxOffs) {
529 unsigned IdxN = Pair.first;
530 bool IdxNIsKill = Pair.second;
535 if (ElementSize != 1) {
560 bool FastISel::SelectCall(
const User *I) {
566 if (!IA->getConstraintString().empty())
569 unsigned ExtraInfo = 0;
570 if (IA->hasSideEffects())
571 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
572 if (IA->isAlignStack())
573 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
577 .addExternalSymbol(IA->getAsmString().c_str())
586 if (!F)
return false;
601 assert((!DIVar || DIVar.isVariable()) &&
602 "Variable in DbgDeclareInst should be either null or a DIVariable.");
605 DEBUG(
dbgs() <<
"Dropping debug info for " << *DI <<
"\n");
610 if (!Address || isa<UndefValue>(Address)) {
611 DEBUG(
dbgs() <<
"Dropping debug info for " << *DI <<
"\n");
617 if (
const Argument *Arg = dyn_cast<Argument>(Address))
637 if (!Op && !Address->
use_empty() && isa<Instruction>(Address) &&
638 (!isa<AllocaInst>(Address) ||
658 DEBUG(
dbgs() <<
"Dropping debug info for " << *DI <<
"\n");
673 }
else if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
674 if (CI->getBitWidth() > 64)
682 }
else if (
const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
694 DEBUG(
dbgs() <<
"Dropping debug info for " << *DI <<
"\n");
700 unsigned long long Res = CI->
isZero() ? -1ULL : 0;
724 if (!isa<IntrinsicInst>(Call))
725 flushLocalValueMap();
731 bool FastISel::SelectCast(
const User *I,
unsigned Opcode) {
753 bool InputRegIsKill = hasTrivialKill(I->
getOperand(0));
758 InputReg, InputRegIsKill);
766 bool FastISel::SelectBitCast(
const User *I) {
791 bool Op0IsKill = hasTrivialKill(I->
getOperand(0));
794 unsigned ResultReg = 0;
795 if (SrcVT == DstVT) {
799 if (SrcClass == DstClass) {
802 ResultReg).addReg(Op0);
821 if (isa<TerminatorInst>(I))
822 if (!HandlePHINodesInSuccessorBlocks(I->
getParent()))
831 if (
const CallInst *Call = dyn_cast<CallInst>(I)) {
842 ++NumFastIselSuccessIndependent;
848 if (!isa<CallInst>(I)) {
857 ++NumFastIselSuccessTarget;
892 FastISel::SelectFNeg(
const User *I) {
894 if (OpReg == 0)
return false;
896 bool OpRegIsKill = hasTrivialKill(I);
902 if (ResultReg != 0) {
923 if (IntResultReg == 0)
936 FastISel::SelectExtractValue(
const User *U) {
957 ResultReg = I->second;
958 else if (isa<Instruction>(Op0))
969 for (
unsigned i = 0; i < VTIndex; i++)
979 case Instruction::Add:
981 case Instruction::FAdd:
983 case Instruction::Sub:
985 case Instruction::FSub:
988 return SelectFNeg(I);
990 case Instruction::Mul:
992 case Instruction::FMul:
994 case Instruction::SDiv:
996 case Instruction::UDiv:
998 case Instruction::FDiv:
1000 case Instruction::SRem:
1002 case Instruction::URem:
1004 case Instruction::FRem:
1006 case Instruction::Shl:
1007 return SelectBinaryOp(I,
ISD::SHL);
1008 case Instruction::LShr:
1009 return SelectBinaryOp(I,
ISD::SRL);
1010 case Instruction::AShr:
1011 return SelectBinaryOp(I,
ISD::SRA);
1013 return SelectBinaryOp(I,
ISD::AND);
1015 return SelectBinaryOp(I,
ISD::OR);
1017 return SelectBinaryOp(I,
ISD::XOR);
1019 case Instruction::GetElementPtr:
1020 return SelectGetElementPtr(I);
1022 case Instruction::Br: {
1037 case Instruction::Unreachable:
1041 case Instruction::Alloca:
1050 return SelectCall(I);
1052 case Instruction::BitCast:
1053 return SelectBitCast(I);
1055 case Instruction::FPToSI:
1057 case Instruction::ZExt:
1059 case Instruction::SExt:
1061 case Instruction::Trunc:
1063 case Instruction::SIToFP:
1066 case Instruction::IntToPtr:
1067 case Instruction::PtrToInt: {
1075 if (Reg == 0)
return false;
1080 case Instruction::ExtractValue:
1081 return SelectExtractValue(I);
1094 : FuncInfo(funcInfo),
1095 MRI(FuncInfo.MF->getRegInfo()),
1096 MFI(*FuncInfo.MF->getFrameInfo()),
1097 MCP(*FuncInfo.MF->getConstantPool()),
1098 TM(FuncInfo.MF->getTarget()),
1099 TD(*
TM.getDataLayout()),
1100 TII(*
TM.getInstrInfo()),
1101 TLI(*
TM.getTargetLowering()),
1102 TRI(*
TM.getRegisterInfo()),
1166 unsigned Op0,
bool Op0IsKill,
1167 uint64_t Imm,
MVT ImmType) {
1185 unsigned ResultReg =
FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1189 if (MaterialReg == 0) {
1195 assert (MaterialReg != 0 &&
"Unable to materialize imm.");
1196 if (MaterialReg == 0)
return 0;
1218 unsigned Op0,
bool Op0IsKill) {
1227 .
addReg(Op0, Op0IsKill * RegState::Kill);
1237 unsigned Op0,
bool Op0IsKill,
1238 unsigned Op1,
bool Op1IsKill) {
1245 .
addReg(Op1, Op1IsKill * RegState::Kill);
1248 .
addReg(Op0, Op0IsKill * RegState::Kill)
1249 .
addReg(Op1, Op1IsKill * RegState::Kill);
1258 unsigned Op0,
bool Op0IsKill,
1259 unsigned Op1,
bool Op1IsKill,
1260 unsigned Op2,
bool Op2IsKill) {
1267 .
addReg(Op1, Op1IsKill * RegState::Kill)
1268 .
addReg(Op2, Op2IsKill * RegState::Kill);
1271 .
addReg(Op0, Op0IsKill * RegState::Kill)
1272 .
addReg(Op1, Op1IsKill * RegState::Kill)
1273 .
addReg(Op2, Op2IsKill * RegState::Kill);
1282 unsigned Op0,
bool Op0IsKill,
1293 .
addReg(Op0, Op0IsKill * RegState::Kill)
1303 unsigned Op0,
bool Op0IsKill,
1304 uint64_t Imm1, uint64_t Imm2) {
1315 .
addReg(Op0, Op0IsKill * RegState::Kill)
1326 unsigned Op0,
bool Op0IsKill,
1337 .
addReg(Op0, Op0IsKill * RegState::Kill)
1347 unsigned Op0,
bool Op0IsKill,
1348 unsigned Op1,
bool Op1IsKill,
1356 .
addReg(Op1, Op1IsKill * RegState::Kill)
1360 .
addReg(Op0, Op0IsKill * RegState::Kill)
1361 .
addReg(Op1, Op1IsKill * RegState::Kill)
1371 unsigned Op0,
bool Op0IsKill,
1372 unsigned Op1,
bool Op1IsKill,
1373 uint64_t Imm1, uint64_t Imm2) {
1380 .
addReg(Op1, Op1IsKill * RegState::Kill)
1384 .
addReg(Op0, Op0IsKill * RegState::Kill)
1385 .
addReg(Op1, Op1IsKill * RegState::Kill)
1411 uint64_t Imm1, uint64_t Imm2) {
1427 unsigned Op0,
bool Op0IsKill,
1431 "Cannot yet extract from physregs");
1452 bool FastISel::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
1462 if (!isa<PHINode>(SuccBB->
begin()))
continue;
1467 if (!SuccsHandled.
insert(SuccMBB))
continue;
1502 if (
const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1503 DL = Inst->getDebugLoc();
1520 "tryToFoldLoad expected a LoadInst with a single use");
1524 unsigned MaxUsers = 6;
1527 while (TheUser != FoldInst &&
1540 if (TheUser != FoldInst)
1576 if (!isa<AddOperator>(Add))
1583 if (isa<Instruction>(Add) &&
1587 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
const Value * getCalledValue() const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
virtual unsigned FastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=0, uint64_t StartingOffset=0)
LLVMContext & getContext() const
LLVM Argument representation.
static const Value * getFNegArgument(const Value *BinOp)
unsigned FastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
unsigned FastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
unsigned FastEmitInst_ii(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with a two immediate operands.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
static bool isVirtualRegister(unsigned Reg)
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
virtual unsigned TargetMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
unsigned getSizeInBits() const
virtual bool TargetSelectInstruction(const Instruction *I)=0
unsigned FastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
uint64_t getOffset() const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
LoopInfoBase< BlockT, LoopT > * LI
EVT getValueType(Type *Ty, bool AllowUnknown=false) const
static Constant * getNullValue(Type *Ty)
StringRef getName() const
unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
virtual unsigned FastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm)
DenseMap< const Value *, unsigned > LocalValueMap
STATISTIC(NumFastIselSuccessIndependent,"Number of insts selected by ""target-independent selector")
bool isUnconditional() const
const HexagonInstrInfo * TII
const StructLayout * getStructLayout(StructType *Ty) const
Base class of casting instructions.
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const
#define llvm_unreachable(msg)
bool hasOptimizedCodeGen(LibFunc::Func F) const
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineInstr * EmitStartPt
const TargetRegisterClass * getRegClass(unsigned Reg) const
bool hasDebugInfo() const
virtual MVT getPointerTy(uint32_t=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
unsigned FastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
unsigned getRegForValue(const Value *V)
Simple integer binary arithmetic operators.
BasicBlock * getSuccessor(unsigned i) const
unsigned FastEmitInst_rf(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm)
unsigned FastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
MDNode * getVariable() const
MachineInstr * getLastLocalValue()
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
unsigned getKillRegState(bool B)
const BasicBlock * getBasicBlock() const
unsigned lookUpRegForValue(const Value *V)
const MachineBasicBlock * getParent() const
uint64_t getElementOffset(unsigned Idx) const
virtual unsigned TargetMaterializeConstant(const Constant *C)
unsigned getNumSuccessors() const
bundle_iterator< MachineInstr, instr_iterator > iterator
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
bool isTypeLegal(EVT VT) const
bool SelectInstruction(const Instruction *I)
unsigned FastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
LLVM Basic Block Representation.
bool SelectOperator(const User *I, unsigned Opcode)
BasicBlock * getSuccessor(unsigned idx) const
virtual unsigned FastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
Simple binary floating point operators.
unsigned getIntrinsicID() const LLVM_READONLY
LLVM Constant Representation.
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
unsigned FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst. Note that we could have a sequence where multi...
void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL)
unsigned FastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Value * getOperand(unsigned i) const
Integer representation type.
const TargetRegisterInfo & TRI
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
LLVMContext & getContext() const
All values hold a context through their type.
MDNode * getVariable() const
virtual unsigned FastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
const MCInstrDesc & get(unsigned Opcode) const
const TargetInstrInfo & TII
MachineBasicBlock * MBB
MBB - The current block.
MachineInstr * LastLocalValue
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
static IntegerType * get(LLVMContext &C, unsigned NumBits)
Get or create an IntegerType instance.
Class for constant integers.
uint64_t getTypeAllocSize(Type *Ty) const
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const TargetLibraryInfo * LibInfo
unsigned FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of FastEmit_ri.
bool hasOneUse(unsigned RegNo) const
unsigned getOperandNo() const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
Function * getCalledFunction() const
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl< MachineOperand > &Cond, DebugLoc DL) const
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
virtual bool FastLowerArguments()
virtual unsigned FastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Value * getArgOperand(unsigned i) const
Class for arbitrary precision integers.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
ZERO_EXTEND - Used for integer types, zeroing the new bits.
bool isPowerOf2_64(uint64_t Value)
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
SavePoint enterLocalValueArea()
virtual unsigned FastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Bitwise operators - logical and, logical or, logical xor.
MachineRegisterInfo & MRI
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
DBG_VALUE - a mapping of the llvm.dbg.value intrinsic.
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
iterator getFirstNonPHI()
FastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
DenseMap< const AllocaInst *, int > StaticAllocaMap
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
const TargetLowering & TLI
unsigned createResultReg(const TargetRegisterClass *RC)
MachineInstr * getVRegDef(unsigned Reg) const
bool hasLocalLinkage() const
unsigned FastEmitInst_rrii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm1, uint64_t Imm2)
unsigned getReg() const
getReg - Returns the register number.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
void UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
MachineBasicBlock::iterator InsertPt
const uint16_t * ImplicitDefs
LLVMContext & getContext() const
Get the context in which this basic block lives.
virtual unsigned FastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
LLVM Value Representation.
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Value * getAddress() const
static const Function * getParent(const Value *V)
uint64_t getTypeSizeInBits(Type *Ty) const
const Value * getValue() const
ItTy prior(ItTy it, Dist n)
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
MachineModuleInfo & getMMI() const
reg_iterator reg_begin(unsigned RegNo) const
const MCRegisterInfo & MRI
unsigned FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
virtual unsigned FastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
FunctionLoweringInfo & FuncInfo
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
iterator find(const KeyT &Val)
void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
virtual unsigned TargetMaterializeFloatZero(const ConstantFP *CF)
DenseMap< const Value *, unsigned > ValueMap
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
static MachineOperand CreateFI(int Idx)
unsigned Log2_64(uint64_t Value)
const BasicBlock * getParent() const
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
unsigned InitializeRegForValue(const Value *V)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
const Use * const_op_iterator