34 cl::desc(
"Disable hazard detection during preRA scheduling"));
78 bool atInsnStart =
true;
84 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
113 Tail->getDebugLoc());
130 Msg <<
"Don't know how to commute: " << *
MI;
135 "This only knows how to commute register operands so far");
146 if (HasDef && Reg0 == Reg1 &&
151 }
else if (HasDef && Reg0 == Reg2 &&
182 unsigned &SrcOpIdx2)
const {
184 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
187 if (!MCID.isCommutable())
191 SrcOpIdx1 = MCID.getNumDefs();
192 SrcOpIdx2 = SrcOpIdx1 + 1;
216 bool MadeChange =
false;
219 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
225 for (
unsigned j = 0, i = 0, e = MI->
getNumOperands(); i != e; ++i) {
226 if (MCID.OpInfo[i].isPredicate()) {
231 }
else if (MO.
isImm()) {
232 MO.
setImm(Pred[j].getImm());
234 }
else if (MO.
isMBB()) {
235 MO.
setMBB(Pred[j].getMBB());
251 if ((*o)->isLoad() && (*o)->getValue())
253 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
254 FrameIndex =
Value->getFrameIndex();
269 if ((*o)->isStore() && (*o)->getValue())
271 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
272 FrameIndex =
Value->getFrameIndex();
281 unsigned SubIdx,
unsigned &Size,
296 if (BitOffset < 0 || BitOffset % 8)
302 assert(RC->
getSize() >= (Offset + Size) &&
"bad subregister range");
305 Offset = RC->
getSize() - (Offset + Size);
331 "Instruction cannot be duplicated");
339 assert(MI->
isCopy() &&
"MI must be a COPY instruction");
342 assert(FoldIdx<2 &&
"FoldIdx refers no nonexistent operand");
350 unsigned FoldReg = FoldOp.
getReg();
351 unsigned LiveReg = LiveOp.
getReg();
354 "Cannot fold physregs");
386 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
387 if (MI->getOperand(Ops[i]).isDef())
393 assert(MBB &&
"foldMemoryOperand needs an inserted instruction");
398 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
401 NewMI->mayStore()) &&
402 "Folded a def to a non-store!");
405 "Folded a use to a non-load!");
412 NewMI->addMemOperand(MF, MMO);
415 return MBB->
insert(MI, NewMI);
419 if (!MI->isCopy() || Ops.
size() != 1)
446 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
447 assert(MI->getOperand(Ops[i]).isUse() &&
"Folding load into def!");
454 if (!NewMI)
return 0;
456 NewMI = MBB.
insert(MI, NewMI);
459 if (MI->memoperands_empty()) {
466 MI->memoperands_end());
475 bool TargetInstrInfo::
522 if (!MO.
isReg())
continue;
544 if (MO.
isDef() && Reg != DefReg)
617 SDNode *DefNode,
unsigned DefIdx,
618 SDNode *UseNode,
unsigned UseIdx)
const {
619 if (!ItinData || ItinData->
isEmpty())
634 if (!ItinData || ItinData->
isEmpty())
650 if (!ItinData || ItinData->
isEmpty())
682 unsigned *PredCost)
const {
693 unsigned DefIdx)
const {
694 if (!ItinData || ItinData->
isEmpty())
699 return (DefCycle != -1 && DefCycle <= 1);
750 assert(ItinData && !ItinData->
isEmpty() &&
"computeDefOperandLatency fail");
759 if (OperLatency >= 0)
766 InstrLatency = std::max(InstrLatency,
bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const
virtual MachineInstr * duplicate(MachineInstr *Orig, MachineFunction &MF) const
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
The memory access reads data.
const MachineFunction * getParent() const
virtual const TargetLowering * getTargetLowering() const
instr_iterator erase(instr_iterator I)
The memory access writes data.
virtual bool hasStoreToStackSlot(const MachineInstr *MI, const MachineMemOperand *&MMO, int &FrameIndex) const
bool isBranch(QueryType Type=AnyInBundle) const
unsigned computeOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
virtual bool hasLoadFromStackSlot(const MachineInstr *MI, const MachineMemOperand *&MMO, int &FrameIndex) const
bool usePreRAHazardRecognizer() const
bool hasSubClassEq(const TargetRegisterClass *RC) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool mayStore(QueryType Type=AnyInBundle) const
static bool isVirtualRegister(unsigned Reg)
bool isPredicable(QueryType Type=AllInBundle) const
bool readsVirtualRegister(unsigned Reg) const
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
const MCInstrDesc & getDesc() const
virtual bool isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig, const TargetRegisterInfo &TRI) const
const MCSchedModel * SchedModel
Basic machine properties.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
MachineInstr * foldMemoryOperand(MachineBasicBlock::iterator MI, const SmallVectorImpl< unsigned > &Ops, int FrameIndex) const
bool isTerminator(QueryType Type=AnyInBundle) const
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
int NumMicroOps
of micro-ops, -1 means it's variable
virtual unsigned getPredicationCost(const MachineInstr *MI) const
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
const HexagonInstrInfo * TII
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
const InstrItinerary * Itineraries
Array of itineraries selected.
const TargetRegisterClass * getRegClass(unsigned i) const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
const TargetRegisterClass * getRegClass(unsigned Reg) const
Abstract Stack Frame Information.
unsigned getNumOperands() const
virtual MachineInstr * commuteInstruction(MachineInstr *MI, bool NewMI=false) const
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const TargetMachine *TM) const
bool isLittleEndian() const
Layout endianness...
bool isImmutableObjectIndex(int ObjectIdx) const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
const MachineBasicBlock * getParent() const
mmo_iterator memoperands_end() const
bundle_iterator< MachineInstr, instr_iterator > iterator
virtual bool hasLowDefLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx) const
initializer< Ty > init(const Ty &Val)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
virtual bool produceSameValue(const MachineInstr *MI0, const MachineInstr *MI1, const MachineRegisterInfo *MRI=0) const
const MachineOperand & getOperand(unsigned i) const
unsigned getStageLatency(unsigned ItinClassIndx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
void setMBB(MachineBasicBlock *MBB)
void setImm(int64_t immVal)
bool hasUnmodeledSideEffects() const
bool isInvariantLoad(AliasAnalysis *AA) const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
unsigned getStackPointerRegisterToSaveRestore() const
succ_iterator succ_begin()
void removeSuccessor(MachineBasicBlock *succ)
unsigned getSubReg() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set. Returns -1 if it is not set...
void setIsKill(bool Val=true)
int64_t getObjectOffset(int ObjectIdx) const
virtual const TargetInstrInfo * getInstrInfo() const
virtual bool PredicateInstruction(MachineInstr *MI, const SmallVectorImpl< MachineOperand > &Pred) const
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
bool isIdenticalTo(const MachineInstr *Other, MICheckType Check=CheckDefs) const
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
virtual ~TargetInstrInfo()
unsigned getObjectAlignment(int ObjectIdx) const
getObjectAlignment - Return the alignment of the specified stack object.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr *MI) const
virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineFrameInfo * getFrameInfo()
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl< MachineOperand > &Cond, DebugLoc DL) const
size_t strlen(const char *s);
unsigned defaultDefLatency(const MCSchedModel *SchedModel, const MachineInstr *DefMI) const
Return the default expected latency for a def based on it's opcode.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr *MI, unsigned *PredCost=0) const
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetMachine *TM, const ScheduleDAG *DAG) const
static bool isPhysicalRegister(unsigned Reg)
unsigned getSchedClass() const
Return the scheduling class for this instruction. The scheduling class is an index into the InstrItin...
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
MachineRegisterInfo & getRegInfo()
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl< unsigned > &Ops, int FrameIndex) const
const char * getSeparatorString() const
void setReg(unsigned Reg)
virtual const DataLayout * getDataLayout() const
void setSubReg(unsigned subReg)
virtual bool canFoldMemoryOperand(const MachineInstr *MI, const SmallVectorImpl< unsigned > &Ops) const
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
const TargetMachine & getTarget() const
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
bool isLookupPtrRegClass() const
Currently no other information.
instr_iterator insert(instr_iterator I, MachineInstr *M)
virtual const TargetRegisterInfo * getRegisterInfo() const
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index. If an Offset doesn't make sense (the...
static const TargetRegisterClass * canFoldCopy(const MachineInstr *MI, unsigned FoldIdx)
unsigned getReg() const
getReg - Returns the register number.
LLVM Value Representation.
unsigned getMaxInstLength() const
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI) const
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction. Note that variadic (isVari...
const MCOperandInfo * OpInfo
BasicBlockListType::iterator iterator
virtual bool isPredicated(const MachineInstr *MI) const
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
const MCRegisterInfo & MRI
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
int strncmp(const char *s1, const char *s2, size_t n);
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index. If the index isn't continuous...
const char * getCommentString() const
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
int64_t getObjectSize(int ObjectIdx) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
bool isBarrier(QueryType Type=AnyInBundle) const
virtual bool isHighLatencyDef(int opc) const
bool isMachineOpcode() const
unsigned getMachineOpcode() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool contains(unsigned Reg) const