LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
AMDGPUInstrInfo.cpp
Go to the documentation of this file.
1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
12 /// AMD GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
22 
23 #define GET_INSTRINFO_CTOR_DTOR
24 #define GET_INSTRINFO_NAMED_OPS
25 #define GET_INSTRMAP_INFO
26 #include "AMDGPUGenInstrInfo.inc"
27 
28 using namespace llvm;
29 
30 
31 // Pin the vtable to this file.
32 void AMDGPUInstrInfo::anchor() {}
33 
35  : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
36 
38  return RI;
39 }
40 
42  unsigned &SrcReg, unsigned &DstReg,
43  unsigned &SubIdx) const {
44 // TODO: Implement this function
45  return false;
46 }
47 
49  int &FrameIndex) const {
50 // TODO: Implement this function
51  return 0;
52 }
53 
55  int &FrameIndex) const {
56 // TODO: Implement this function
57  return 0;
58 }
59 
61  const MachineMemOperand *&MMO,
62  int &FrameIndex) const {
63 // TODO: Implement this function
64  return false;
65 }
67  int &FrameIndex) const {
68 // TODO: Implement this function
69  return 0;
70 }
72  int &FrameIndex) const {
73 // TODO: Implement this function
74  return 0;
75 }
77  const MachineMemOperand *&MMO,
78  int &FrameIndex) const {
79 // TODO: Implement this function
80  return false;
81 }
82 
86  LiveVariables *LV) const {
87 // TODO: Implement this function
88  return NULL;
89 }
90 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
91  MachineBasicBlock &MBB) const {
92  while (iter != MBB.end()) {
93  switch (iter->getOpcode()) {
94  default:
95  break;
96  case AMDGPU::BRANCH_COND_i32:
97  case AMDGPU::BRANCH_COND_f32:
98  case AMDGPU::BRANCH:
99  return true;
100  };
101  ++iter;
102  }
103  return false;
104 }
105 
106 void
109  unsigned SrcReg, bool isKill,
110  int FrameIndex,
111  const TargetRegisterClass *RC,
112  const TargetRegisterInfo *TRI) const {
113  assert(!"Not Implemented");
114 }
115 
116 void
119  unsigned DestReg, int FrameIndex,
120  const TargetRegisterClass *RC,
121  const TargetRegisterInfo *TRI) const {
122  assert(!"Not Implemented");
123 }
124 
126  MachineBasicBlock *MBB = MI->getParent();
127  int OffsetOpIdx =
128  AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
129  // addr is a custom operand with multiple MI operands, and only the
130  // first MI operand is given a name.
131  int RegOpIdx = OffsetOpIdx + 1;
132  int ChanOpIdx =
133  AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
134 
135  if (isRegisterLoad(*MI)) {
136  int DstOpIdx =
137  AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
138  unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
139  unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
140  unsigned Address = calculateIndirectAddress(RegIndex, Channel);
141  unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
142  if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
143  buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
145  } else {
146  buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
147  Address, OffsetReg);
148  }
149  } else if (isRegisterStore(*MI)) {
150  int ValOpIdx =
151  AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
152  AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
153  unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
154  unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
155  unsigned Address = calculateIndirectAddress(RegIndex, Channel);
156  unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
157  if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
158  buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
159  MI->getOperand(ValOpIdx).getReg());
160  } else {
161  buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
162  calculateIndirectAddress(RegIndex, Channel),
163  OffsetReg);
164  }
165  } else {
166  return false;
167  }
168 
169  MBB->erase(MI);
170  return true;
171 }
172 
173 
174 MachineInstr *
176  MachineInstr *MI,
177  const SmallVectorImpl<unsigned> &Ops,
178  int FrameIndex) const {
179 // TODO: Implement this function
180  return 0;
181 }
184  MachineInstr *MI,
185  const SmallVectorImpl<unsigned> &Ops,
186  MachineInstr *LoadMI) const {
187  // TODO: Implement this function
188  return 0;
189 }
190 bool
192  const SmallVectorImpl<unsigned> &Ops) const {
193  // TODO: Implement this function
194  return false;
195 }
196 bool
198  unsigned Reg, bool UnfoldLoad,
199  bool UnfoldStore,
200  SmallVectorImpl<MachineInstr*> &NewMIs) const {
201  // TODO: Implement this function
202  return false;
203 }
204 
205 bool
207  SmallVectorImpl<SDNode*> &NewNodes) const {
208  // TODO: Implement this function
209  return false;
210 }
211 
212 unsigned
214  bool UnfoldLoad, bool UnfoldStore,
215  unsigned *LoadRegIndex) const {
216  // TODO: Implement this function
217  return 0;
218 }
219 
221  int64_t Offset1, int64_t Offset2,
222  unsigned NumLoads) const {
223  assert(Offset2 > Offset1
224  && "Second offset should be larger than first offset!");
225  // If we have less than 16 loads in a row, and the offsets are within 16,
226  // then schedule together.
227  // TODO: Make the loads schedule near if it fits in a cacheline
228  return (NumLoads < 16 && (Offset2 - Offset1) < 16);
229 }
230 
231 bool
233  const {
234  // TODO: Implement this function
235  return true;
236 }
239  // TODO: Implement this function
240 }
241 
243  // TODO: Implement this function
244  return false;
245 }
246 bool
248  const SmallVectorImpl<MachineOperand> &Pred2)
249  const {
250  // TODO: Implement this function
251  return false;
252 }
253 
255  std::vector<MachineOperand> &Pred) const {
256  // TODO: Implement this function
257  return false;
258 }
259 
261  // TODO: Implement this function
262  return MI->getDesc().isPredicable();
263 }
264 
265 bool
267  // TODO: Implement this function
268  return true;
269 }
270 
272  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
273 }
274 
276  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
277 }
278 
280  const MachineRegisterInfo &MRI = MF.getRegInfo();
281  const MachineFrameInfo *MFI = MF.getFrameInfo();
282  int Offset = -1;
283 
284  if (MFI->getNumObjects() == 0) {
285  return -1;
286  }
287 
288  if (MRI.livein_empty()) {
289  return 0;
290  }
291 
292  const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
294  LE = MRI.livein_end();
295  LI != LE; ++LI) {
296  unsigned Reg = LI->first;
298  !IndirectRC->contains(Reg))
299  continue;
300 
301  unsigned RegIndex;
302  unsigned RegEnd;
303  for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
304  ++RegIndex) {
305  if (IndirectRC->getRegister(RegIndex) == Reg)
306  break;
307  }
308  Offset = std::max(Offset, (int)RegIndex);
309  }
310 
311  return Offset + 1;
312 }
313 
315  int Offset = 0;
316  const MachineFrameInfo *MFI = MF.getFrameInfo();
317 
318  // Variable sized objects are not supported
319  assert(!MFI->hasVarSizedObjects());
320 
321  if (MFI->getNumObjects() == 0) {
322  return -1;
323  }
324 
325  Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
326 
327  return getIndirectIndexBegin(MF) + Offset;
328 }
329 
330 
332  DebugLoc DL) const {
334  const AMDGPURegisterInfo & RI = getRegisterInfo();
335 
336  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
337  MachineOperand &MO = MI.getOperand(i);
338  // Convert dst regclass to one that is supported by the ISA
339  if (MO.isReg() && MO.isDef()) {
341  const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
342  const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
343 
344  assert(newRegClass);
345 
346  MRI.setRegClass(MO.getReg(), newRegClass);
347  }
348  }
349  }
350 }
351 
352 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
353  switch (Channels) {
354  default: return Opcode;
355  case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
356  case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
357  case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
358  }
359 }
const MachineFunction * getParent() const
instr_iterator erase(instr_iterator I)
int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const
Given a MIMG Opcode that writes all 4 channels, return the equivalent opcode that writes Channels Cha...
bool ReverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
unsigned getRegister(unsigned i) const
livein_iterator livein_end() const
#define AMDGPU_FLAG_REGISTER_STORE
static bool isVirtualRegister(unsigned Reg)
#define AMDGPU_FLAG_REGISTER_LOAD
unsigned getNumObjects() const
const MCInstrDesc & getDesc() const
Definition: MachineInstr.h:257
MachineInstr * convertToThreeAddress(MachineFunction::iterator &MFI, MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl< unsigned > &Ops, int FrameIndex) const
bool hasStoreFromStackSlot(const MachineInstr *MI, const MachineMemOperand *&MMO, int &FrameIndex) const
AMDGPUInstrInfo(TargetMachine &tm)
LoopInfoBase< BlockT, LoopT > * LI
Definition: LoopInfoImpl.h:411
virtual const AMDGPURegisterInfo & getRegisterInfo() const =0
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Abstract Stack Frame Information.
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=0) const
unsigned getNumOperands() const
Definition: MachineInstr.h:265
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution. It may be set to 'al...
Definition: MCInstrDesc.h:310
TargetRegisterInfo interface that is implemented by all hw codegen targets.
int getOpcode() const
Definition: MachineInstr.h:261
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool isPredicable(MachineInstr *MI) const
bool SubsumesPredicate(const SmallVectorImpl< MachineOperand > &Pred1, const SmallVectorImpl< MachineOperand > &Pred2) const
virtual unsigned calculateIndirectAddress(unsigned RegIndex, unsigned Channel) const =0
Calculate the "Indirect Address" for the given RegIndex and Channel.
bool canFoldMemoryOperand(const MachineInstr *MI, const SmallVectorImpl< unsigned > &Ops) const
bundle_iterator< MachineInstr, instr_iterator > iterator
virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, unsigned ValueReg, unsigned Address, unsigned OffsetReg) const =0
Build instruction(s) for an indirect register write.
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:267
TargetMachine & TM
bool isRegisterLoad(const MachineInstr &MI) const
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const
unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
virtual const TargetRegisterClass * getIndirectAddrRegClass() const =0
virtual MachineInstr * buildMovInstr(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, unsigned DstReg, unsigned SrcReg) const =0
Build a MOV instruction.
The AMDGPU TargetMachine interface definition for hw codgen targets.
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, int &FrameIndex) const
virtual const TargetFrameLowering * getFrameLowering() const
bool isPredicated(const MachineInstr *MI) const
virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const
bool hasLoadFromStackSlot(const MachineInstr *MI, const MachineMemOperand *&MMO, int &FrameIndex) const
unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
livein_iterator livein_begin() const
MachineFrameInfo * getFrameInfo()
virtual void convertToISA(MachineInstr &MI, MachineFunction &MF, DebugLoc DL) const
Convert the AMDIL MachineInstr to a supported ISA MachineInstr.
unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI, int &FrameIndex) const
unsigned getNumRegs() const
bool DefinesPredicate(MachineInstr *MI, std::vector< MachineOperand > &Pred) const
virtual int getIndirectIndexEnd(const MachineFunction &MF) const
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
MachineRegisterInfo & getRegInfo()
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
#define N
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool isRegisterStore(const MachineInstr &MI) const
virtual const TargetRegisterClass * getISARegClass(const TargetRegisterClass *RC) const
bool hasVarSizedObjects() const
std::vector< std::pair< unsigned, unsigned > >::const_iterator livein_iterator
unsigned getReg() const
getReg - Returns the register number.
virtual int getIndirectIndexBegin(const MachineFunction &MF) const
BasicBlockListType::iterator iterator
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
void setRegClass(unsigned Reg, const TargetRegisterClass *RC)
const MCRegisterInfo & MRI
virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, unsigned ValueReg, unsigned Address, unsigned OffsetReg) const =0
Build instruction(s) for an indirect register read.
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, unsigned &SubIdx) const
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
bool contains(unsigned Reg) const