154 bool *IsFast)
const {
168 unsigned Offset)
const {
199 uint32_t Skipped = 0;
201 for (
unsigned i = 0, e = Ins.
size(), PSInputNum = 0; i != e; ++i) {
208 assert((PSInputNum <= 15) &&
"Too many PS inputs!");
232 for (
unsigned j = 0; j != NumElements; ++j) {
249 CCInfo.AllocateReg(AMDGPU::VGPR0);
250 CCInfo.AllocateReg(AMDGPU::VGPR1);
255 CCInfo.AllocateReg(AMDGPU::SGPR0);
256 CCInfo.AllocateReg(AMDGPU::SGPR1);
257 MF.
addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
267 for (
unsigned i = 0, e = Ins.
size(), ArgIdx = 0; i != e; ++i) {
270 if (Skipped & (1 << i)) {
280 EVT MemVT = Splits[i].VT;
288 assert(VA.
isRegLoc() &&
"Parameter must be in a register!");
295 &AMDGPU::SReg_64RegClass);
296 Reg = MF.
addLiveIn(Reg, &AMDGPU::SReg_64RegClass);
314 for (
unsigned j = 1; j != NumElements; ++j) {
315 Reg = ArgLocs[ArgIdx++].getLocReg();
322 for (
unsigned j = 0; j != NumElements; ++j)
343 case AMDGPU::BRANCH:
return BB;
344 case AMDGPU::SI_ADDR64_RSRC: {
366 .
addImm(AMDGPU::sub0_sub1)
368 .
addImm(AMDGPU::sub2_sub3);
372 case AMDGPU::V_SUB_F64: {
387 case AMDGPU::SI_RegisterStorePseudo: {
442 case ISD::ADD:
return LowerADD(Op, DAG);
455 return LowerLOAD(Op, DAG);
466 unsigned IntrinsicID =
467 cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
471 unsigned NumUserSGPRs = 2;
472 switch (IntrinsicID) {
475 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 0);
477 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 4);
479 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 8);
481 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 12);
483 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 16);
485 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 20);
487 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 24);
489 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 28);
491 return LowerParameter(DAG, VT, VT, DL, DAG.
getEntryNode(), 32);
494 AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
497 AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
500 AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
510 case AMDGPUIntrinsic::SI_load_const: {
512 ResourceDescriptorToi128(Op.
getOperand(1), DAG),
523 case AMDGPUIntrinsic::SI_sample:
525 case AMDGPUIntrinsic::SI_sampleb:
527 case AMDGPUIntrinsic::SI_sampled:
529 case AMDGPUIntrinsic::SI_samplel:
531 case AMDGPUIntrinsic::SI_vs_load_input:
533 ResourceDescriptorToi128(Op.
getOperand(1), DAG),
541 unsigned IntrinsicID = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
543 switch (IntrinsicID) {
544 case AMDGPUIntrinsic::SI_tbuffer_store: {
548 ResourceDescriptorToi128(Op.
getOperand(2), DAG),
570 sizeof(Ops)/
sizeof(Ops[0]), VT, MMO);
613 if (
I.getUse().get() != Value)
616 if (
I->getOpcode() == Opcode)
651 for (
unsigned i = 1, e = Intr->getNumValues(); i != e; ++i)
657 for (
unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i)
678 for (
unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
694 SDValue(Intr, Intr->getNumValues() - 1),
738 SDValue SITargetLowering::LowerSampleIntrinsic(
unsigned Opcode,
743 ResourceDescriptorToi128(Op.
getOperand(3), DAG),
790 if (VT.isVector() && VT.getVectorNumElements() >= 8)
803 for (
unsigned i = 0; i < 2; ++i) {
808 for (
unsigned i = 0; i < 2; ++i) {
809 for (
unsigned j = 0; j < 2; ++j) {
820 for (
unsigned i = 0; i < Values.
size(); ++i) {
824 Chain, Values[i], PartPtr,
859 if ((True = dyn_cast<ConstantSDNode>(N->
getOperand(2)))
895 return AMDGPU::VSrc_32RegClassID == RegClass ||
896 AMDGPU::VSrc_64RegClassID == RegClass;
901 return AMDGPU::SSrc_32RegClassID == RegClass ||
902 AMDGPU::SSrc_64RegClassID == RegClass;
917 if (Node->getZExtValue() >> 32) {
920 Imm.I = Node->getSExtValue();
922 Imm.F = Node->getValueAPF().convertToFloat();
926 if ((Imm.I >= -16 && Imm.I <= 64) ||
927 Imm.F == 0.5f || Imm.F == -0.5f ||
928 Imm.F == 1.0f || Imm.F == -1.0f ||
929 Imm.F == 2.0f || Imm.F == -2.0f ||
930 Imm.F == 4.0f || Imm.F == -4.0f)
937 bool SITargetLowering::foldImm(
SDValue &Operand, int32_t &Immediate,
938 bool &ScalarSlotUsed)
const {
952 }
else if (Value == 0) {
957 }
else if (Value == Immediate) {
962 }
else if (!ScalarSlotUsed && !Immediate) {
964 ScalarSlotUsed =
true;
990 default:
return NULL;
995 if (OpClassID != -1) {
996 return TRI.getRegClass(OpClassID);
1001 OpClassID = cast<ConstantSDNode>(Op->
getOperand(1))->getZExtValue();
1006 if (OpClassID == AMDGPU::VSrc_32RegClassID ||
1007 OpClassID == AMDGPU::VSrc_64RegClassID) {
1008 return getRegClassForNode(DAG, Op.
getOperand(0));
1010 return TRI.getRegClass(OpClassID);
1012 int SubIdx = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
1015 return TRI.getSubClassWithSubReg(SuperClass, SubIdx);
1019 return TRI.getRegClass(
1020 cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue());
1028 unsigned RegClass)
const {
1040 bool &ScalarSlotUsed)
const {
1043 if (RegClass == AMDGPU::VSrc_32RegClassID)
1044 RegClass = AMDGPU::VReg_32RegClassID;
1045 else if (RegClass == AMDGPU::VSrc_64RegClassID)
1046 RegClass = AMDGPU::VReg_64RegClassID;
1051 if (fitsRegClass(DAG, Operand, RegClass))
1055 if (!ScalarSlotUsed) {
1056 ScalarSlotUsed =
true;
1094 const MCInstrDesc *DescRev = OpcodeRev == -1 ? 0 : &TII->get(OpcodeRev);
1096 assert(!DescRev || DescRev->
getNumDefs() == NumDefs);
1101 const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64);
1103 assert(!DescE64 || DescE64->
getNumDefs() == NumDefs);
1106 int32_t Immediate = Desc->
getSize() == 4 ? 0 : -1;
1107 bool HaveVSrc =
false, HaveSSrc =
false;
1111 i != e && Op < NumOps; ++i, ++Op) {
1116 else if (
isSSrc(RegClass))
1122 if (Imm != -1 && Imm != 0) {
1129 if (!HaveVSrc && !HaveSSrc)
1133 bool ScalarSlotUsed = HaveVSrc && HaveSSrc;
1136 std::vector<SDValue> Ops;
1137 bool Promote2e64 =
false;
1139 i != e && Op < NumOps; ++i, ++Op) {
1142 Ops.push_back(Operand);
1145 if (isa<ConstantSDNode>(Operand.
getNode()) ||
1146 isa<ConstantFPSDNode>(Operand.
getNode()))
1153 if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
1155 ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed);
1160 if (i == 1 && DescRev && fitsRegClass(DAG, Ops[0], RegClass)) {
1163 assert(
isVSrc(OtherRegClass) ||
isSSrc(OtherRegClass));
1166 if (foldImm(Ops[1], Immediate, ScalarSlotUsed) ||
1167 (!fitsRegClass(DAG, Ops[1], RegClass) &&
1168 fitsRegClass(DAG, Ops[1], OtherRegClass))) {
1181 if (DescE64 && !Immediate) {
1188 int32_t TmpImm = -1;
1189 if (foldImm(Ops[i], TmpImm, ScalarSlotUsed) ||
1190 (!fitsRegClass(DAG, Ops[i], RegClass) &&
1191 fitsRegClass(DAG, Ops[1], OtherRegClass))) {
1204 for (
unsigned i = 0; i < 4; ++i)
1209 for (
unsigned i = NumOps - NumDefs, e = Node->
getNumOperands(); i < e; ++i)
1227 case AMDGPU::sub0:
return 0;
1228 case AMDGPU::sub1:
return 1;
1229 case AMDGPU::sub2:
return 2;
1230 case AMDGPU::sub3:
return 3;
1235 void SITargetLowering::adjustWritemask(
MachineSDNode *&Node,
1240 unsigned NewDmask = 0;
1247 if (!
I->isMachineOpcode() ||
1259 for (
unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
1262 Dmask &= ~(1 << Comp);
1270 NewDmask |= 1 << Comp;
1274 if (NewDmask == OldDmask)
1278 std::vector<SDValue> Ops;
1286 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
1296 for (
unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
1307 case AMDGPU::sub0: Idx = AMDGPU::sub1;
break;
1308 case AMDGPU::sub1: Idx = AMDGPU::sub2;
break;
1309 case AMDGPU::sub2: Idx = AMDGPU::sub3;
break;
1319 Node = AdjustRegClass(Node, DAG);
1322 adjustWritemask(Node, DAG);
1324 return foldOperands(Node, DAG);
1338 unsigned BitsSet = 0;
1339 for (
unsigned i = 0; i < 4; ++i)
1340 BitsSet += Writemask & (1 << i) ? 1 : 0;
1345 case 1: RC = &AMDGPU::VReg_32RegClass;
break;
1346 case 2: RC = &AMDGPU::VReg_64RegClass;
break;
1347 case 3: RC = &AMDGPU::VReg_96RegClass;
break;
1351 MI->
setDesc(TII->get(NewOpcode));
1364 case AMDGPU::S_LOAD_DWORD_IMM:
1365 NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
1367 case AMDGPU::S_LOAD_DWORDX2_SGPR:
1369 NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
1372 case AMDGPU::S_LOAD_DWORDX4_IMM:
1373 case AMDGPU::S_LOAD_DWORDX4_SGPR: {
1375 NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
1377 if (fitsRegClass(DAG, N->
getOperand(0), AMDGPU::SReg_64RegClassID)) {
1394 unsigned Reg,
EVT VT)
const {
1398 cast<RegisterSDNode>(VReg)->getReg(), VT);
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
void push_back(const T &Elt)
The memory access reads data.
const MachineFunction * getParent() const
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
Interface definition for SIRegisterInfo.
SDValue getValue(unsigned R) const
The memory access writes data.
int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const
Given a MIMG Opcode that writes all 4 channels, return the equivalent opcode that writes Channels Cha...
LLVMContext * getContext() const
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, EVT VT=MVT::Other) const
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const
int getVOPe64(uint16_t Opcode)
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isCommutable() const
Address space for local memory.
void AnalyzeFormalArguments(CCState &State, const SmallVectorImpl< ISD::InputArg > &Ins) const
const TargetMachine & getTargetMachine() const
SDVTList getVTList() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions. Register definitions always occur...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
bool hasSubClassEq(const TargetRegisterClass *RC) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
static bool isVirtualRegister(unsigned Reg)
static PointerType * get(Type *ElementType, unsigned AddressSpace)
unsigned getOpcode() const
Type * getTypeForEVT(LLVMContext &Context) const
SITargetLowering(TargetMachine &tm)
unsigned getNumOperands() const
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
const uint64_t RSRC_DATA_FORMAT
iv Induction Variable Users
const SDValue & getBasePtr() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
bool isAllOnesValue() const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
EVT getValueType(Type *Ty, bool AllowUnknown=false) const
bool isVector() const
isVector - Return true if this is a vector value type.
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, const SDValue *Ops, unsigned NumOps)
const HexagonInstrInfo * TII
EVT getValueType(unsigned ResNo) const
const TargetRegisterClass * getRegClass(unsigned i) const
MachineFunction & getMachineFunction() const
virtual bool shouldSplitVectorElementType(EVT VT) const
int32_t analyzeImmediate(const SDNode *N) const
Analyze the possible immediate value Op.
static unsigned SubIdx2Lane(unsigned Idx)
Helper function for adjustWritemask.
virtual SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, unsigned Reg, EVT VT) const
Helper function that adds Reg to the LiveIn list of the DAG's MachineFunction.
const TargetRegisterClass * getRegClass(unsigned Reg) const
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
EVT getScalarType() const
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
unsigned getMachineOpcode() const
SDVTList getVTList(EVT VT)
unsigned getStoreSize() const
bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const
Determine if the target supports unaligned memory accesses.
static bool isNodeChanged(const SDNode *Node, const std::vector< SDValue > &Ops)
ID
LLVM Calling Convention Representation.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, SDLoc dl) const
const MachineInstrBuilder & addImm(int64_t Val) const
unsigned getNumOperands() const
unsigned getLocReg() const
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
int isMIMG(uint16_t Opcode) const
unsigned getNumValues() const
enable_if_c< std::numeric_limits< T >::is_integer &&!std::numeric_limits< T >::is_signed, std::size_t >::type countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
Simple integer binary arithmetic operators.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
const SDValue & getBasePtr() const
SDValue getUNDEF(EVT VT)
getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
bool bitsLE(EVT VT) const
bitsLE - Return true if this has no more bits than VT.
virtual MVT getScalarShiftAmountTy(EVT VT) const
UNDEF - An undefined node.
const MachineBasicBlock * getParent() const
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const
SDNode * getNode() const
get the SDNode which holds the desired result
bundle_iterator< MachineInstr, instr_iterator > iterator
Type * getParamType(unsigned i) const
Parameter type accessors.
Control flow instructions. These all have token chains.
unsigned getVectorNumElements() const
const SDValue & getOperand(unsigned i) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
void setTargetDAGCombine(ISD::NodeType NT)
bool isVector() const
isVector - Return true if this is a vector value type.
const MachineOperand & getOperand(unsigned i) const
unsigned getLiveInVirtReg(unsigned PReg) const
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
SI DAG Lowering interface definition.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
unsigned getOpcode() const
static bool isSSrc(unsigned RegClass)
Test if RegClass is one of the SSrc classes.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
use_iterator use_begin() const
const SDValue & getValue() const
virtual void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const
Assign the register class depending on the number of bits set in the writemask.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
static UndefValue * get(Type *T)
uint64_t getConstantOperandVal(unsigned Num) const
const SDValue & getRoot() const
The memory access is invariant.
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action)
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const
virtual const TargetInstrInfo * getInstrInfo() const
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
void setDesc(const MCInstrDesc &tid)
unsigned getVectorNumElements() const
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
CCValAssign - Represent assignment of one arg/retval to a location.
const SDValue & getChain() const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const
static SDNode * findUser(SDValue Value, unsigned Opcode)
Helper function for LowerBRCOND.
static bool isVSrc(unsigned RegClass)
Test if RegClass is one of the VSrc classes.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true)
void computeRegisterProperties()
int64_t getSExtValue() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
Address space for private memory.
unsigned getAddressSpace() const
getAddressSpace - Return the address space for the associated pointer
unsigned commuteOpcode(unsigned Opcode) const
const TargetRegisterClass * getPhysRegClass(unsigned Reg) const
Return the 'base' register class for this register. e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR...
Interface definition for SIInstrInfo.
pointer data()
data - Return a pointer to the vector's buffer, even if empty().
MachineRegisterInfo & getRegInfo()
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
void ReplaceAllUsesWith(SDValue From, SDValue Op)
FunctionType * getFunctionType() const
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
const SIRegisterInfo & getRegisterInfo() const
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
virtual const TargetRegisterInfo * getRegisterInfo() const
SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const
Split a vector load into multiple scalar loads.
void getOriginalFunctionArgs(SelectionDAG &DAG, const Function *F, const SmallVectorImpl< ISD::InputArg > &Ins, SmallVectorImpl< ISD::InputArg > &OrigIns) const
unsigned getReg() const
getReg - Returns the register number.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
virtual SDNode * PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const
Fold the instructions after slecting them.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
SDValue getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
LLVM Value Representation.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction. Note that variadic (isVari...
const MCOperandInfo * OpInfo
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
Address space for constant memory.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
void setRegClass(unsigned Reg, const TargetRegisterClass *RC)
Interface for the AMDGPU Implementation of the Intrinsic Info class.
SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, SDLoc dl)
getMergeValues - Create a MERGE_VALUES node from the given operands.
const MCRegisterInfo & MRI
SDValue getTargetConstant(uint64_t Val, EVT VT)
unsigned getLocMemOffset() const
MVT getVectorElementType() const
SDValue getEntryNode() const
SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, unsigned Reg, EVT VT) const
Helper function that adds Reg to the LiveIn list of the DAG's MachineFunction.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const
Generate Min/Max node.
DebugLoc getDebugLoc() const
bool isMachineOpcode() const
unsigned getMachineOpcode() const
unsigned getVectorNumElements() const