40 #define DEBUG_TYPE "nvptx-lower"
79 uint64_t StartingOffset = 0) {
84 for (
unsigned i = 0, e = TempVTs.
size(); i != e; ++i) {
86 uint64_t Off = TempOffsets[i];
96 Offsets->push_back(Off);
250 return "NVPTXISD::CALL";
252 return "NVPTXISD::RET_FLAG";
254 return "NVPTXISD::Wrapper";
256 return "NVPTXISD::DeclareParam";
258 return "NVPTXISD::DeclareScalarParam";
260 return "NVPTXISD::DeclareRet";
262 return "NVPTXISD::DeclareRetParam";
264 return "NVPTXISD::PrintCall";
266 return "NVPTXISD::LoadParam";
268 return "NVPTXISD::LoadParamV2";
270 return "NVPTXISD::LoadParamV4";
272 return "NVPTXISD::StoreParam";
274 return "NVPTXISD::StoreParamV2";
276 return "NVPTXISD::StoreParamV4";
278 return "NVPTXISD::StoreParamS32";
280 return "NVPTXISD::StoreParamU32";
282 return "NVPTXISD::CallArgBegin";
284 return "NVPTXISD::CallArg";
286 return "NVPTXISD::LastCallArg";
288 return "NVPTXISD::CallArgEnd";
290 return "NVPTXISD::CallVoid";
292 return "NVPTXISD::CallVal";
294 return "NVPTXISD::CallSymbol";
296 return "NVPTXISD::Prototype";
298 return "NVPTXISD::MoveParam";
300 return "NVPTXISD::StoreRetval";
302 return "NVPTXISD::StoreRetvalV2";
304 return "NVPTXISD::StoreRetvalV4";
306 return "NVPTXISD::PseudoUseParam";
308 return "NVPTXISD::RETURN";
310 return "NVPTXISD::CallSeqBegin";
312 return "NVPTXISD::CallSeqEnd";
314 return "NVPTXISD::CallPrototype";
316 return "NVPTXISD::LoadV2";
318 return "NVPTXISD::LoadV4";
320 return "NVPTXISD::LDGV2";
322 return "NVPTXISD::LDGV4";
324 return "NVPTXISD::LDUV2";
326 return "NVPTXISD::LDUV4";
328 return "NVPTXISD::StoreV2";
330 return "NVPTXISD::StoreV4";
341 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
349 unsigned retAlignment,
353 assert(isABI &&
"Non-ABI compilation is not supported");
366 if (
const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
367 size = ITy->getBitWidth();
372 "Floating point type expected here");
376 O <<
".param .b" << size <<
" _";
377 }
else if (isa<PointerType>(retTy)) {
383 unsigned totalsz = 0;
384 for (
unsigned i = 0, e = vtparts.
size(); i != e; ++i) {
386 EVT elemtype = vtparts[i];
387 if (vtparts[i].isVector()) {
388 elems = vtparts[i].getVectorNumElements();
389 elemtype = vtparts[i].getVectorElementType();
392 for (
unsigned j = 0, je = elems; j != je; ++j) {
399 O <<
".param .align " << retAlignment <<
" .b8 _[" << totalsz <<
"]";
401 assert(
false &&
"Unknown return type");
412 for (
unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
413 Type *Ty = Args[i].Ty;
419 if (Outs[OIdx].
Flags.isByVal() ==
false) {
426 align = TD->getABITypeAlignment(Ty);
427 unsigned sz = TD->getTypeAllocSize(Ty);
428 O <<
".param .align " << align <<
" .b8 ";
430 O <<
"[" << sz <<
"]";
434 if (
unsigned len = vtparts.
size())
441 "type mismatch between callee prototype and arguments");
444 if (isa<IntegerType>(Ty)) {
448 }
else if (isa<PointerType>(Ty))
452 O <<
".param .b" << sz <<
" ";
457 assert(PTy &&
"Param with byval attribute should be a pointer type");
460 unsigned align = Outs[OIdx].Flags.getByValAlign();
462 O <<
".param .align " << align <<
" .b8 ";
464 O <<
"[" << sz <<
"]";
471 NVPTXTargetLowering::getArgumentAlignment(
SDValue Callee,
474 unsigned Idx)
const {
483 assert(CalleeI &&
"Call target is not a function or derived value?");
486 if (isa<CallInst>(CalleeI)) {
491 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
493 while(isa<ConstantExpr>(CalleeV)) {
498 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
503 if (isa<Function>(CalleeV))
504 DirectCallee = CalleeV;
534 assert(isABI &&
"Non-ABI compilation is not supported");
547 unsigned paramCount = 0;
560 for (
unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
561 EVT VT = Outs[OIdx].VT;
562 Type *Ty = Args[i].Ty;
564 if (Outs[OIdx].
Flags.isByVal() ==
false) {
570 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
580 unsigned curOffset = 0;
581 for (
unsigned j = 0, je = vtparts.
size(); j != je; ++j) {
583 EVT elemtype = vtparts[j];
584 if (vtparts[j].isVector()) {
585 elems = vtparts[j].getVectorNumElements();
586 elemtype = vtparts[j].getVectorElementType();
588 for (
unsigned k = 0, ke = elems; k != ke; ++k) {
597 SDValue CopyParamOps[] = { Chain,
602 CopyParamVTs, &CopyParamOps[0], 5,
609 if (vtparts.
size() > 0)
616 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
629 bool NeedExtend =
false;
642 SDValue CopyParamOps[] = { Chain,
647 CopyParamVTs, &CopyParamOps[0], 5,
650 }
else if (NumElts == 2) {
651 SDValue Elt0 = OutVals[OIdx++];
652 SDValue Elt1 = OutVals[OIdx++];
659 SDValue CopyParamOps[] = { Chain,
664 CopyParamVTs, &CopyParamOps[0], 6,
668 unsigned curOffset = 0;
679 unsigned VecSize = 4;
687 for (
unsigned i = 0; i < NumElts; i += VecSize) {
697 StoreVal = OutVals[OIdx++];
702 if (i + 1 < NumElts) {
703 StoreVal = OutVals[OIdx++];
714 if (i + 2 < NumElts) {
715 StoreVal = OutVals[OIdx++];
724 if (i + 3 < NumElts) {
725 StoreVal = OutVals[OIdx++];
742 curOffset += PerStoreOffset;
752 bool needExtend =
false;
760 SDValue DeclareParamOps[] = { Chain,
771 if (Outs[OIdx].
Flags.isSExt())
780 if (Outs[OIdx].
Flags.isZExt())
782 else if (Outs[OIdx].
Flags.isSExt())
794 assert(PTy &&
"Type of a byval parameter should be pointer");
798 unsigned sz = Outs[OIdx].Flags.getByValSize();
811 unsigned curOffset = 0;
812 for (
unsigned j = 0, je = vtparts.
size(); j != je; ++j) {
814 EVT elemtype = vtparts[j];
815 if (vtparts[j].isVector()) {
816 elems = vtparts[j].getVectorNumElements();
817 elemtype = vtparts[j].getVectorElementType();
819 for (
unsigned k = 0, ke = elems; k != ke; ++k) {
837 CopyParamOps, 5, elemtype,
848 unsigned retAlignment = 0;
851 if (Ins.
size() > 0) {
872 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
874 SDValue DeclareRetOps[] = { Chain,
893 std::string Proto =
getPrototype(retTy, Args, Outs, retAlignment, CS);
894 const char *ProtoStr =
908 dl, PrintCallVTs, PrintCallOps, 3);
913 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
919 SDValue CallArgBeginOps[] = { Chain, InFlag };
924 for (
unsigned i = 0, e = paramCount; i != e; ++i) {
933 Chain = DAG.
getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
952 if (Ins.
size() > 0) {
953 unsigned resoffset = 0;
959 ObjectVT) == NumElts &&
960 "Vector was not scalarized");
962 bool needTruncate = sz < 16 ?
true :
false;
966 std::vector<EVT> LoadRetVTs;
973 LoadRetVTs.push_back(EltVT);
976 std::vector<SDValue> LoadRetOps;
977 LoadRetOps.push_back(Chain);
980 LoadRetOps.push_back(InFlag);
983 DAG.
getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
991 }
else if (NumElts == 2) {
993 std::vector<EVT> LoadRetVTs;
1001 LoadRetVTs.push_back(EltVT);
1002 LoadRetVTs.push_back(EltVT);
1006 std::vector<SDValue> LoadRetOps;
1007 LoadRetOps.push_back(Chain);
1010 LoadRetOps.push_back(InFlag);
1013 DAG.
getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
1031 unsigned VecSize = 4;
1038 for (
unsigned i = 0; i < NumElts; i += VecSize) {
1044 for (
unsigned j = 0; j < VecSize; ++j)
1047 for (
unsigned j = 0; j < VecSize; ++j)
1068 for (
unsigned j = 0; j < VecSize; ++j) {
1069 if (i + j >= NumElts)
1082 assert(VTs.
size() == Ins.
size() &&
"Bad value decomposition");
1083 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
1084 unsigned sz = VTs[i].getSizeInBits();
1085 bool needTruncate = sz < 8 ?
true :
false;
1086 if (VTs[i].isInteger() && (sz < 8))
1090 EVT TheLoadType = VTs[i];
1097 }
else if (sz < 16) {
1114 DAG.
getVTList(&LoadRetVTs[0], LoadRetVTs.
size()), &LoadRetOps[0],
1122 resoffset += sz / 8;
1147 for (
unsigned i = 0; i < NumOperands; ++i) {
1152 for (
unsigned j = 0; j < NumSubElem; ++j) {
1176 return LowerCONCAT_VECTORS(Op, DAG);
1178 return LowerSTORE(Op, DAG);
1180 return LowerLOAD(Op, DAG);
1188 return LowerLOADi1(Op, DAG);
1203 "Custom lowering for i1 load only");
1219 return LowerSTOREi1(Op, DAG);
1221 return LowerSTOREVector(Op, DAG);
1256 unsigned Opcode = 0;
1263 bool NeedExt =
false;
1285 for (
unsigned i = 0; i < NumElts; ++i) {
1329 isVolatile, Alignment);
1334 int idx,
EVT v)
const {
1336 std::stringstream suffix;
1338 *name += suffix.str();
1343 NVPTXTargetLowering::getParamSymbol(
SelectionDAG &DAG,
int idx,
EVT v)
const {
1344 std::string ParamSym;
1350 std::string *SavedStr =
1356 return getExtSymb(DAG,
".HLPPARAM", idx);
1362 static const char *
const specialTypes[] = {
"struct._image2d_t",
1363 "struct._image3d_t",
1364 "struct._sampler_t" };
1376 const std::string TypeName = STy && !STy->
isLiteral() ? STy->
getName() :
"";
1379 if (TypeName == specialTypes[i])
1397 std::vector<SDValue> OutChains;
1401 assert(isABI &&
"Non-ABI compilation is not supported");
1405 std::vector<Type *> argTypes;
1406 std::vector<const Argument *> theArgs;
1409 theArgs.push_back(
I);
1410 argTypes.push_back(
I->getType());
1421 unsigned InsIdx = 0;
1424 for (
unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1425 Type *Ty = argTypes[i];
1434 assert(isKernel &&
"Only kernels can have image/sampler params");
1439 if (theArgs[i]->use_empty()) {
1445 assert(vtparts.
size() > 0 &&
"empty aggregate type not expected");
1446 for (
unsigned parti = 0, parte = vtparts.
size(); parti != parte;
1448 EVT partVT = vtparts[parti];
1452 if (vtparts.
size() > 0)
1459 for (
unsigned parti = 0; parti < NumRegs; ++parti) {
1484 assert(vtparts.
size() > 0 &&
"empty aggregate type not expected");
1485 bool aggregateIsPacked =
false;
1486 if (
StructType *STy = llvm::dyn_cast<StructType>(Ty))
1487 aggregateIsPacked = STy->isPacked();
1490 for (
unsigned parti = 0, parte = vtparts.
size(); parti != parte;
1492 EVT partVT = vtparts[parti];
1499 unsigned partAlign =
1500 aggregateIsPacked ? 1
1504 if (Ins[InsIdx].VT.getSizeInBits() > partVT.
getSizeInBits()) {
1507 p = DAG.
getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1511 p = DAG.
getLoad(partVT, dl, Root, srcAddr,
1520 if (vtparts.
size() > 0)
1529 "Vector was not scalarized");
1553 }
else if (NumElts == 2) {
1573 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.
getSizeInBits()) {
1593 unsigned VecSize = 4;
1598 for (
unsigned i = 0; i < NumElts; i += VecSize) {
1612 for (
unsigned j = 0; j < VecSize; ++j) {
1613 if (i + j >= NumElts)
1637 if (ObjectVT.
getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1640 p = DAG.
getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1644 p = DAG.
getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1662 assert(ObjectVT == Ins[InsIdx].VT &&
1663 "Ins type did not match function type");
1686 if (!OutChains.empty())
1706 assert(isABI &&
"Non-ABI compilation is not supported");
1710 if (
VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1713 unsigned NumElts = VTy->getNumElements();
1714 assert(NumElts == Outs.
size() &&
"Bad scalarization of return value");
1718 bool NeedExtend =
false;
1719 if (EltVT.getSizeInBits() < 16)
1724 SDValue StoreVal = OutVals[0];
1733 }
else if (NumElts == 2) {
1735 SDValue StoreVal0 = OutVals[0];
1736 SDValue StoreVal1 = OutVals[1];
1759 unsigned VecSize = 4;
1763 unsigned Offset = 0;
1767 unsigned PerStoreOffset =
1770 for (
unsigned i = 0; i < NumElts; i += VecSize) {
1779 StoreVal = OutVals[i];
1784 if (i + 1 < NumElts) {
1785 StoreVal = OutVals[i + 1];
1789 StoreVal = DAG.
getUNDEF(ExtendedVT);
1795 if (i + 2 < NumElts) {
1796 StoreVal = OutVals[i + 2];
1801 StoreVal = DAG.
getUNDEF(ExtendedVT);
1805 if (i + 3 < NumElts) {
1806 StoreVal = OutVals[i + 3];
1811 StoreVal = DAG.
getUNDEF(ExtendedVT);
1820 Offset += PerStoreOffset;
1828 assert(ValVTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
1830 unsigned SizeSoFar = 0;
1831 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
1834 unsigned numElems = 1;
1837 for (
unsigned j = 0, je = numElems; j != je; ++j) {
1843 EVT TheStoreType = ValVTs[i];
1861 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
1863 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
1873 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
1875 if (Constraint.length() > 1)
1902 switch (Intrinsic) {
1998 if (Constraint.size() == 1) {
1999 switch (Constraint[0]) {
2016 std::pair<unsigned, const TargetRegisterClass *>
2019 if (Constraint.size() == 1) {
2020 switch (Constraint[0]) {
2022 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2024 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2026 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2029 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2031 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2033 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2050 assert(ResVT.
isVector() &&
"Vector load must have vector type");
2055 assert(ResVT.
isSimple() &&
"Can only handle simple types");
2079 bool NeedTrunc =
false;
2085 unsigned Opcode = 0;
2121 for (
unsigned i = 0; i < NumElts; ++i) {
2144 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.
getNode())->getZExtValue();
2166 bool NeedTrunc =
false;
2172 unsigned Opcode = 0;
2229 Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.
size(),
2234 for (
unsigned i = 0; i < NumElts; ++i) {
2252 "Custom handling of non-i8 ldu/ldg?");
2271 NewLD.getValue(0)));
2278 void NVPTXTargetLowering::ReplaceNodeResults(
2293 void NVPTXSection::anchor() {}
const MCSection * DwarfARangesSection
const MCSection * DwarfFrameSection
void push_back(const T &Elt)
SDValue getConstant(uint64_t Val, EVT VT, bool isTarget=false)
SDValue getValue(unsigned R) const
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=0, uint64_t StartingOffset=0)
LLVMContext & getContext() const
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
bool isPrimitiveType() const
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
The main container class for the LLVM Intermediate Representation.
const MCSection * StaticDtorSection
enable_if_c<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
virtual ConstraintType getConstraintType(const std::string &Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const
bool isImageOrSamplerVal(const Value *, const Module *)
static PointerType * get(Type *ElementType, unsigned AddressSpace)
unsigned getOpcode() const
Type * getTypeForEVT(LLVMContext &Context) const
unsigned getSizeInBits() const
unsigned getNumOperands() const
Type * getReturnType() const
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
bool isKernelFunction(const llvm::Function &)
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
const SDValue & setRoot(SDValue N)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
const MCSection * DwarfStrSection
const SDValue & getBasePtr() const
const MCSection * DwarfInfoSection
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
uint64_t getTypeAllocSizeInBits(Type *Ty) const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
EVT getValueType(Type *Ty, bool AllowUnknown=false) const
static Constant * getNullValue(Type *Ty)
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo=0, const MDNode *Ranges=0)
bool isVector() const
isVector - Return true if this is a vector value type.
static unsigned int uniqueCallSite
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
void setJumpIsExpensive(bool isExpensive=true)
const MCSection * DwarfAbbrevSection
static unsigned getBitWidth(Type *Ty, const DataLayout *TD)
virtual unsigned getFunctionAlignment(const Function *F) const
getFunctionAlignment - Return the Log2 alignment of this function.
#define llvm_unreachable(msg)
const MCSection * DwarfLocSection
EVT getValueType(unsigned ResNo) const
MachineFunction & getMachineFunction() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
SDVTList getVTList(EVT VT)
virtual MVT getPointerTy(uint32_t=0) const
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
ID
LLVM Calling Convention Representation.
bool isTypeSupportedInIntrinsic(MVT VT) const
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
SmallVector< ISD::InputArg, 32 > Ins
EVT getVectorElementType() const
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
const MCSection * DwarfDebugInlineSection
size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned int getSmVersion() const
Simple integer binary arithmetic operators.
SmallVector< ISD::OutputArg, 32 > Outs
bool isFloatingPointTy() const
const SDValue & getBasePtr() const
SDValue getUNDEF(EVT VT)
getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
const MCSection * DwarfMacroInfoSection
EVT getMemoryVT() const
getMemoryVT - Return the type of the in-memory value.
void setIROrder(unsigned Order)
const MCSection * DwarfLineSection
Type * getElementType() const
UNDEF - An undefined node.
SDNode * getNode() const
get the SDNode which holds the desired result
static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=0, uint64_t StartingOffset=0)
virtual ~NVPTXTargetObjectFile()
unsigned getStoreSize() const
bool isTypeLegal(EVT VT) const
initializer< Ty > init(const Ty &Val)
unsigned getStoreSizeInBits() const
const MCSection * BSSSection
BSSSection - Section that is default initialized to zero.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
const SDValue & getOperand(unsigned i) const
InstrTy * getInstruction() const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
bool isNonTemporal() const
ConstraintType getConstraintType(const std::string &Constraint) const
const MCSection * TextSection
bool isVector() const
isVector - Return true if this is a vector value type.
virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
void setBooleanContents(BooleanContent Ty)
const DataLayout * getDataLayout() const
unsigned getOpcode() const
TRAP - Trapping instruction.
ManagedStringPool * getManagedStrPool() const
Integer representation type.
virtual NVPTXTargetLowering * getTargetLowering() const
const SDValue & getValue() const
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
std::vector< ArgListEntry > ArgListTy
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
static bool IsPTXVectorType(MVT VT)
const SDValue & getRoot() const
const MachinePointerInfo & getPointerInfo() const
const MCSection * DwarfPubTypesSection
void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action)
unsigned getABITypeAlignment(Type *Ty) const
const MCSection * LSDASection
SmallVectorImpl< T >::const_pointer c_str(SmallVectorImpl< T > &str)
std::string getPrototype(Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, unsigned retAlignment, const ImmutableCallSite *CS) const
std::string * getManagedString(const char *S)
uint64_t getTypeAllocSize(Type *Ty) const
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo=0)
virtual SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool getAlign(const llvm::Function &, unsigned index, unsigned &)
SDValue getIntPtrConstant(uint64_t Val, bool isTarget=false)
const SDValue & getChain() const
Byte Swap and Counting operators.
const MCSection * ReadOnlySection
MachineMemOperand * getMemOperand() const
Abstact virtual class for operations for memory operations.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Value * getArgOperand(unsigned i) const
ISD::LoadExtType getExtensionType() const
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true)
void computeRegisterProperties()
bool isCast() const
Return true if this is a convert constant expression.
const MCSection * EHFrameSection
virtual bool shouldSplitVectorElementType(EVT VT) const
StringRef getName() const
ZERO_EXTEND - Used for integer types, zeroing the new bits.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT)
ANY_EXTEND - Used for integer types. The high bits are undefined.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(DefaultAlign), cl::values(clEnumValN(DefaultAlign,"arm-default-align","Generate unaligned accesses only on hardware/OS ""combinations that are known to support them"), clEnumValN(StrictAlign,"arm-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"arm-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
SmallVector< SDValue, 32 > OutVals
bool isAggregateType() const
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
const MCSection * StaticCtorSection
ImmutableCallSite - establish a view to a call site for examination.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, SDLoc dl, SelectionDAG &DAG) const
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, bool isNonTemporal, bool isVolatile, unsigned Alignment, const MDNode *TBAAInfo=0)
NVPTXTargetLowering(NVPTXTargetMachine &TM)
unsigned getPrimitiveSizeInBits() const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
const MCSection * DataSection
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LLVM Value Representation.
static const Function * getParent(const Value *V)
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, SDLoc dl)
getMergeValues - Create a MERGE_VALUES node from the given operands.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
StringRef getName() const
MVT getVectorElementType() const
TRUNCATE - Completely drop the high bits.
NVPTXTargetMachine * nvTM
unsigned getAlignment() const
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
INITIALIZE_PASS(GlobalMerge,"global-merge","Global Merge", false, false) bool GlobalMerge const DataLayout * TD
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
FunTy * getCalledFunction() const
const MCSection * DwarfRangesSection
unsigned getVectorNumElements() const