LLVM API Documentation

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
X86JITInfo.cpp
Go to the documentation of this file.
1 //===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the JIT interfaces for the X86 target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "jit"
15 #include "X86JITInfo.h"
16 #include "X86Relocations.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/Support/Compiler.h"
22 #include "llvm/Support/Valgrind.h"
23 #include <cstdlib>
24 #include <cstring>
25 using namespace llvm;
26 
27 // Determine the platform we're running on
28 #if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64)
29 # define X86_64_JIT
30 #elif defined(__i386__) || defined(i386) || defined(_M_IX86)
31 # define X86_32_JIT
32 #endif
33 
34 void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
35  unsigned char *OldByte = (unsigned char *)Old;
36  *OldByte++ = 0xE9; // Emit JMP opcode.
37  unsigned *OldWord = (unsigned *)OldByte;
38  unsigned NewAddr = (intptr_t)New;
39  unsigned OldAddr = (intptr_t)OldWord;
40  *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
41 
42  // X86 doesn't need to invalidate the processor cache, so just invalidate
43  // Valgrind's cache directly.
45 }
46 
47 
48 /// JITCompilerFunction - This contains the address of the JIT function used to
49 /// compile a function lazily.
51 
52 // Get the ASMPREFIX for the current host. This is often '_'.
53 #ifndef __USER_LABEL_PREFIX__
54 #define __USER_LABEL_PREFIX__
55 #endif
56 #define GETASMPREFIX2(X) #X
57 #define GETASMPREFIX(X) GETASMPREFIX2(X)
58 #define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
59 
60 // For ELF targets, use a .size and .type directive, to let tools
61 // know the extent of functions defined in assembler.
62 #if defined(__ELF__)
63 # define SIZE(sym) ".size " #sym ", . - " #sym "\n"
64 # define TYPE_FUNCTION(sym) ".type " #sym ", @function\n"
65 #else
66 # define SIZE(sym)
67 # define TYPE_FUNCTION(sym)
68 #endif
69 
70 // Provide a convenient way for disabling usage of CFI directives.
71 // This is needed for old/broken assemblers (for example, gas on
72 // Darwin is pretty old and doesn't support these directives)
73 #if defined(__APPLE__)
74 # define CFI(x)
75 #else
76 // FIXME: Disable this until we really want to use it. Also, we will
77 // need to add some workarounds for compilers, which support
78 // only subset of these directives.
79 # define CFI(x)
80 #endif
81 
82 // Provide a wrapper for LLVMX86CompilationCallback2 that saves non-traditional
83 // callee saved registers, for the fastcc calling convention.
84 extern "C" {
85 #if defined(X86_64_JIT)
86 # ifndef _MSC_VER
87  // No need to save EAX/EDX for X86-64.
88  void X86CompilationCallback(void);
89  asm(
90  ".text\n"
91  ".align 8\n"
92  ".globl " ASMPREFIX "X86CompilationCallback\n"
93  TYPE_FUNCTION(X86CompilationCallback)
94  ASMPREFIX "X86CompilationCallback:\n"
95  CFI(".cfi_startproc\n")
96  // Save RBP
97  "pushq %rbp\n"
98  CFI(".cfi_def_cfa_offset 16\n")
99  CFI(".cfi_offset %rbp, -16\n")
100  // Save RSP
101  "movq %rsp, %rbp\n"
102  CFI(".cfi_def_cfa_register %rbp\n")
103  // Save all int arg registers
104  "pushq %rdi\n"
105  CFI(".cfi_rel_offset %rdi, 0\n")
106  "pushq %rsi\n"
107  CFI(".cfi_rel_offset %rsi, 8\n")
108  "pushq %rdx\n"
109  CFI(".cfi_rel_offset %rdx, 16\n")
110  "pushq %rcx\n"
111  CFI(".cfi_rel_offset %rcx, 24\n")
112  "pushq %r8\n"
113  CFI(".cfi_rel_offset %r8, 32\n")
114  "pushq %r9\n"
115  CFI(".cfi_rel_offset %r9, 40\n")
116  // Align stack on 16-byte boundary. ESP might not be properly aligned
117  // (8 byte) if this is called from an indirect stub.
118  "andq $-16, %rsp\n"
119  // Save all XMM arg registers
120  "subq $128, %rsp\n"
121  "movaps %xmm0, (%rsp)\n"
122  "movaps %xmm1, 16(%rsp)\n"
123  "movaps %xmm2, 32(%rsp)\n"
124  "movaps %xmm3, 48(%rsp)\n"
125  "movaps %xmm4, 64(%rsp)\n"
126  "movaps %xmm5, 80(%rsp)\n"
127  "movaps %xmm6, 96(%rsp)\n"
128  "movaps %xmm7, 112(%rsp)\n"
129  // JIT callee
130 #if defined(_WIN64) || defined(__CYGWIN__)
131  "subq $32, %rsp\n"
132  "movq %rbp, %rcx\n" // Pass prev frame and return address
133  "movq 8(%rbp), %rdx\n"
134  "call " ASMPREFIX "LLVMX86CompilationCallback2\n"
135  "addq $32, %rsp\n"
136 #else
137  "movq %rbp, %rdi\n" // Pass prev frame and return address
138  "movq 8(%rbp), %rsi\n"
139  "call " ASMPREFIX "LLVMX86CompilationCallback2\n"
140 #endif
141  // Restore all XMM arg registers
142  "movaps 112(%rsp), %xmm7\n"
143  "movaps 96(%rsp), %xmm6\n"
144  "movaps 80(%rsp), %xmm5\n"
145  "movaps 64(%rsp), %xmm4\n"
146  "movaps 48(%rsp), %xmm3\n"
147  "movaps 32(%rsp), %xmm2\n"
148  "movaps 16(%rsp), %xmm1\n"
149  "movaps (%rsp), %xmm0\n"
150  // Restore RSP
151  "movq %rbp, %rsp\n"
152  CFI(".cfi_def_cfa_register %rsp\n")
153  // Restore all int arg registers
154  "subq $48, %rsp\n"
155  CFI(".cfi_adjust_cfa_offset 48\n")
156  "popq %r9\n"
157  CFI(".cfi_adjust_cfa_offset -8\n")
158  CFI(".cfi_restore %r9\n")
159  "popq %r8\n"
160  CFI(".cfi_adjust_cfa_offset -8\n")
161  CFI(".cfi_restore %r8\n")
162  "popq %rcx\n"
163  CFI(".cfi_adjust_cfa_offset -8\n")
164  CFI(".cfi_restore %rcx\n")
165  "popq %rdx\n"
166  CFI(".cfi_adjust_cfa_offset -8\n")
167  CFI(".cfi_restore %rdx\n")
168  "popq %rsi\n"
169  CFI(".cfi_adjust_cfa_offset -8\n")
170  CFI(".cfi_restore %rsi\n")
171  "popq %rdi\n"
172  CFI(".cfi_adjust_cfa_offset -8\n")
173  CFI(".cfi_restore %rdi\n")
174  // Restore RBP
175  "popq %rbp\n"
176  CFI(".cfi_adjust_cfa_offset -8\n")
177  CFI(".cfi_restore %rbp\n")
178  "ret\n"
179  CFI(".cfi_endproc\n")
180  SIZE(X86CompilationCallback)
181  );
182 # else
183  // No inline assembler support on this platform. The routine is in external
184  // file.
185  void X86CompilationCallback();
186 
187 # endif
188 #elif defined (X86_32_JIT)
189 # ifndef _MSC_VER
190  void X86CompilationCallback(void);
191  asm(
192  ".text\n"
193  ".align 8\n"
194  ".globl " ASMPREFIX "X86CompilationCallback\n"
195  TYPE_FUNCTION(X86CompilationCallback)
196  ASMPREFIX "X86CompilationCallback:\n"
197  CFI(".cfi_startproc\n")
198  "pushl %ebp\n"
199  CFI(".cfi_def_cfa_offset 8\n")
200  CFI(".cfi_offset %ebp, -8\n")
201  "movl %esp, %ebp\n" // Standard prologue
202  CFI(".cfi_def_cfa_register %ebp\n")
203  "pushl %eax\n"
204  CFI(".cfi_rel_offset %eax, 0\n")
205  "pushl %edx\n" // Save EAX/EDX/ECX
206  CFI(".cfi_rel_offset %edx, 4\n")
207  "pushl %ecx\n"
208  CFI(".cfi_rel_offset %ecx, 8\n")
209 # if defined(__APPLE__)
210  "andl $-16, %esp\n" // Align ESP on 16-byte boundary
211 # endif
212  "subl $16, %esp\n"
213  "movl 4(%ebp), %eax\n" // Pass prev frame and return address
214  "movl %eax, 4(%esp)\n"
215  "movl %ebp, (%esp)\n"
216  "call " ASMPREFIX "LLVMX86CompilationCallback2\n"
217  "movl %ebp, %esp\n" // Restore ESP
218  CFI(".cfi_def_cfa_register %esp\n")
219  "subl $12, %esp\n"
220  CFI(".cfi_adjust_cfa_offset 12\n")
221  "popl %ecx\n"
222  CFI(".cfi_adjust_cfa_offset -4\n")
223  CFI(".cfi_restore %ecx\n")
224  "popl %edx\n"
225  CFI(".cfi_adjust_cfa_offset -4\n")
226  CFI(".cfi_restore %edx\n")
227  "popl %eax\n"
228  CFI(".cfi_adjust_cfa_offset -4\n")
229  CFI(".cfi_restore %eax\n")
230  "popl %ebp\n"
231  CFI(".cfi_adjust_cfa_offset -4\n")
232  CFI(".cfi_restore %ebp\n")
233  "ret\n"
234  CFI(".cfi_endproc\n")
235  SIZE(X86CompilationCallback)
236  );
237 
238  // Same as X86CompilationCallback but also saves XMM argument registers.
239  void X86CompilationCallback_SSE(void);
240  asm(
241  ".text\n"
242  ".align 8\n"
243  ".globl " ASMPREFIX "X86CompilationCallback_SSE\n"
244  TYPE_FUNCTION(X86CompilationCallback_SSE)
245  ASMPREFIX "X86CompilationCallback_SSE:\n"
246  CFI(".cfi_startproc\n")
247  "pushl %ebp\n"
248  CFI(".cfi_def_cfa_offset 8\n")
249  CFI(".cfi_offset %ebp, -8\n")
250  "movl %esp, %ebp\n" // Standard prologue
251  CFI(".cfi_def_cfa_register %ebp\n")
252  "pushl %eax\n"
253  CFI(".cfi_rel_offset %eax, 0\n")
254  "pushl %edx\n" // Save EAX/EDX/ECX
255  CFI(".cfi_rel_offset %edx, 4\n")
256  "pushl %ecx\n"
257  CFI(".cfi_rel_offset %ecx, 8\n")
258  "andl $-16, %esp\n" // Align ESP on 16-byte boundary
259  // Save all XMM arg registers
260  "subl $64, %esp\n"
261  // FIXME: provide frame move information for xmm registers.
262  // This can be tricky, because CFA register is ebp (unaligned)
263  // and we need to produce offsets relative to it.
264  "movaps %xmm0, (%esp)\n"
265  "movaps %xmm1, 16(%esp)\n"
266  "movaps %xmm2, 32(%esp)\n"
267  "movaps %xmm3, 48(%esp)\n"
268  "subl $16, %esp\n"
269  "movl 4(%ebp), %eax\n" // Pass prev frame and return address
270  "movl %eax, 4(%esp)\n"
271  "movl %ebp, (%esp)\n"
272  "call " ASMPREFIX "LLVMX86CompilationCallback2\n"
273  "addl $16, %esp\n"
274  "movaps 48(%esp), %xmm3\n"
275  CFI(".cfi_restore %xmm3\n")
276  "movaps 32(%esp), %xmm2\n"
277  CFI(".cfi_restore %xmm2\n")
278  "movaps 16(%esp), %xmm1\n"
279  CFI(".cfi_restore %xmm1\n")
280  "movaps (%esp), %xmm0\n"
281  CFI(".cfi_restore %xmm0\n")
282  "movl %ebp, %esp\n" // Restore ESP
283  CFI(".cfi_def_cfa_register esp\n")
284  "subl $12, %esp\n"
285  CFI(".cfi_adjust_cfa_offset 12\n")
286  "popl %ecx\n"
287  CFI(".cfi_adjust_cfa_offset -4\n")
288  CFI(".cfi_restore %ecx\n")
289  "popl %edx\n"
290  CFI(".cfi_adjust_cfa_offset -4\n")
291  CFI(".cfi_restore %edx\n")
292  "popl %eax\n"
293  CFI(".cfi_adjust_cfa_offset -4\n")
294  CFI(".cfi_restore %eax\n")
295  "popl %ebp\n"
296  CFI(".cfi_adjust_cfa_offset -4\n")
297  CFI(".cfi_restore %ebp\n")
298  "ret\n"
299  CFI(".cfi_endproc\n")
300  SIZE(X86CompilationCallback_SSE)
301  );
302 # else
303  void LLVMX86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);
304 
305  _declspec(naked) void X86CompilationCallback(void) {
306  __asm {
307  push ebp
308  mov ebp, esp
309  push eax
310  push edx
311  push ecx
312  and esp, -16
313  sub esp, 16
314  mov eax, dword ptr [ebp+4]
315  mov dword ptr [esp+4], eax
316  mov dword ptr [esp], ebp
317  call LLVMX86CompilationCallback2
318  mov esp, ebp
319  sub esp, 12
320  pop ecx
321  pop edx
322  pop eax
323  pop ebp
324  ret
325  }
326  }
327 
328 # endif // _MSC_VER
329 
330 #else // Not an i386 host
332  llvm_unreachable("Cannot call X86CompilationCallback() on a non-x86 arch!");
333  }
334 #endif
335 }
336 
337 /// This is the target-specific function invoked by the
338 /// function stub when we did not know the real target of a call. This function
339 /// must locate the start of the stub or call site and pass it into the JIT
340 /// compiler function.
341 extern "C" {
342 LLVM_ATTRIBUTE_USED // Referenced from inline asm.
344  intptr_t RetAddr) {
345  intptr_t *RetAddrLoc = &StackPtr[1];
346  // We are reading raw stack data here. Tell MemorySanitizer that it is
347  // sufficiently initialized.
348  __msan_unpoison(RetAddrLoc, sizeof(*RetAddrLoc));
349  assert(*RetAddrLoc == RetAddr &&
350  "Could not find return address on the stack!");
351 
352  // It's a stub if there is an interrupt marker after the call.
353  bool isStub = ((unsigned char*)RetAddr)[0] == 0xCE;
354 
355  // The call instruction should have pushed the return value onto the stack...
356 #if defined (X86_64_JIT)
357  RetAddr--; // Backtrack to the reference itself...
358 #else
359  RetAddr -= 4; // Backtrack to the reference itself...
360 #endif
361 
362 #if 0
363  DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr
364  << " ESP=" << (void*)StackPtr
365  << ": Resolving call to function: "
366  << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n");
367 #endif
368 
369  // Sanity check to make sure this really is a call instruction.
370 #if defined (X86_64_JIT)
371  assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
372  assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
373 #else
374  assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
375 #endif
376 
377  intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);
378 
379  // Rewrite the call target... so that we don't end up here every time we
380  // execute the call.
381 #if defined (X86_64_JIT)
382  assert(isStub &&
383  "X86-64 doesn't support rewriting non-stub lazy compilation calls:"
384  " the call instruction varies too much.");
385 #else
386  *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
387 #endif
388 
389  if (isStub) {
390  // If this is a stub, rewrite the call into an unconditional branch
391  // instruction so that two return addresses are not pushed onto the stack
392  // when the requested function finally gets called. This also makes the
393  // 0xCE byte (interrupt) dead, so the marker doesn't effect anything.
394 #if defined (X86_64_JIT)
395  // If the target address is within 32-bit range of the stub, use a
396  // PC-relative branch instead of loading the actual address. (This is
397  // considerably shorter than the 64-bit immediate load already there.)
398  // We assume here intptr_t is 64 bits.
399  intptr_t diff = NewVal-RetAddr+7;
400  if (diff >= -2147483648LL && diff <= 2147483647LL) {
401  *(unsigned char*)(RetAddr-0xc) = 0xE9;
402  *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff;
403  } else {
404  *(intptr_t *)(RetAddr - 0xa) = NewVal;
405  ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
406  }
407  sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
408 #else
409  ((unsigned char*)RetAddr)[-1] = 0xE9;
410  sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
411 #endif
412  }
413 
414  // Change the return address to reexecute the call instruction...
415 #if defined (X86_64_JIT)
416  *RetAddrLoc -= 0xd;
417 #else
418  *RetAddrLoc -= 5;
419 #endif
420 }
421 }
422 
428 
429 #if defined (X86_32_JIT) && !defined (_MSC_VER)
430  if (Subtarget->hasSSE1())
431  return X86CompilationCallback_SSE;
432 #endif
433 
434  return X86CompilationCallback;
435 }
436 
438  Subtarget = &TM.getSubtarget<X86Subtarget>();
439  useGOT = 0;
440  TLSOffset = 0;
441 }
442 
444  JITCodeEmitter &JCE) {
445 #if defined (X86_64_JIT)
446  const unsigned Alignment = 8;
447  uint8_t Buffer[8];
448  uint8_t *Cur = Buffer;
449  MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr);
450  MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32));
451 #else
452  const unsigned Alignment = 4;
453  uint8_t Buffer[4];
454  uint8_t *Cur = Buffer;
456 #endif
457  return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment);
458 }
459 
461  // The 64-bit stub contains:
462  // movabs r10 <- 8-byte-target-address # 10 bytes
463  // call|jmp *r10 # 3 bytes
464  // The 32-bit stub contains a 5-byte call|jmp.
465  // If the stub is a call to the compilation callback, an extra byte is added
466  // to mark it as a stub.
467  StubLayout Result = {14, 4};
468  return Result;
469 }
470 
472  JITCodeEmitter &JCE) {
473  // Note, we cast to intptr_t here to silence a -pedantic warning that
474  // complains about casting a function pointer to a normal pointer.
475 #if defined (X86_32_JIT) && !defined (_MSC_VER)
476  bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback &&
477  Target != (void*)(intptr_t)X86CompilationCallback_SSE);
478 #else
479  bool NotCC = Target != (void*)(intptr_t)X86CompilationCallback;
480 #endif
481  JCE.emitAlignment(4);
482  void *Result = (void*)JCE.getCurrentPCValue();
483  if (NotCC) {
484 #if defined (X86_64_JIT)
485  JCE.emitByte(0x49); // REX prefix
486  JCE.emitByte(0xB8+2); // movabsq r10
487  JCE.emitWordLE((unsigned)(intptr_t)Target);
488  JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
489  JCE.emitByte(0x41); // REX prefix
490  JCE.emitByte(0xFF); // jmpq *r10
491  JCE.emitByte(2 | (4 << 3) | (3 << 6));
492 #else
493  JCE.emitByte(0xE9);
494  JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
495 #endif
496  return Result;
497  }
498 
499 #if defined (X86_64_JIT)
500  JCE.emitByte(0x49); // REX prefix
501  JCE.emitByte(0xB8+2); // movabsq r10
502  JCE.emitWordLE((unsigned)(intptr_t)Target);
503  JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
504  JCE.emitByte(0x41); // REX prefix
505  JCE.emitByte(0xFF); // callq *r10
506  JCE.emitByte(2 | (2 << 3) | (3 << 6));
507 #else
508  JCE.emitByte(0xE8); // Call with 32 bit pc-rel destination...
509 
510  JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
511 #endif
512 
513  // This used to use 0xCD, but that value is used by JITMemoryManager to
514  // initialize the buffer with garbage, which means it may follow a
515  // noreturn function call, confusing LLVMX86CompilationCallback2. PR 4929.
516  JCE.emitByte(0xCE); // Interrupt - Just a marker identifying the stub!
517  return Result;
518 }
519 
520 /// getPICJumpTableEntry - Returns the value of the jumptable entry for the
521 /// specific basic block.
522 uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) {
523 #if defined(X86_64_JIT)
524  return BB - Entry;
525 #else
526  return BB - PICBase;
527 #endif
528 }
529 
530 template<typename T> static void addUnaligned(void *Pos, T Delta) {
531  T Value;
532  std::memcpy(reinterpret_cast<char*>(&Value), reinterpret_cast<char*>(Pos),
533  sizeof(T));
534  Value += Delta;
535  std::memcpy(reinterpret_cast<char*>(Pos), reinterpret_cast<char*>(&Value),
536  sizeof(T));
537 }
538 
539 /// relocate - Before the JIT can run a block of code that has been emitted,
540 /// it must rewrite the code to contain the actual addresses of any
541 /// referenced global symbols.
543  unsigned NumRelocs, unsigned char* GOTBase) {
544  for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
545  void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
546  intptr_t ResultPtr = (intptr_t)MR->getResultPointer();
547  switch ((X86::RelocationType)MR->getRelocationType()) {
548  case X86::reloc_pcrel_word: {
549  // PC relative relocation, add the relocated value to the value already in
550  // memory, after we adjust it for where the PC is.
551  ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal();
552  addUnaligned<unsigned>(RelocPos, ResultPtr);
553  break;
554  }
555  case X86::reloc_picrel_word: {
556  // PIC base relative relocation, add the relocated value to the value
557  // already in memory, after we adjust it for where the PIC base is.
558  ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal());
559  addUnaligned<unsigned>(RelocPos, ResultPtr);
560  break;
561  }
564  // Absolute relocation, just add the relocated value to the value already
565  // in memory.
566  addUnaligned<unsigned>(RelocPos, ResultPtr);
567  break;
569  addUnaligned<intptr_t>(RelocPos, ResultPtr);
570  break;
571  }
572  }
573 }
574 
576 #if defined(X86_32_JIT) && !defined(__APPLE__) && !defined(_MSC_VER)
577  TLSOffset -= size;
578  return TLSOffset;
579 #else
580  llvm_unreachable("Cannot allocate thread local storage on this arch!");
581 #endif
582 }
void emitAlignment(unsigned Alignment)
static void addUnaligned(void *Pos, T Delta)
Definition: X86JITInfo.cpp:530
unsigned getRelocationType() const
static TargetJITInfo::JITCompilerFn JITCompilerFunction
Definition: X86JITInfo.cpp:50
virtual void * emitFunctionStub(const Function *F, void *Target, JITCodeEmitter &JCE)
Definition: X86JITInfo.cpp:471
void emitByte(uint8_t B)
F(f)
void ValgrindDiscardTranslations(const void *Addr, size_t Len)
Definition: Valgrind.cpp:51
void *(* JITCompilerFn)(void *)
Definition: TargetJITInfo.h:90
#define llvm_unreachable(msg)
intptr_t getConstantVal() const
virtual void * emitGlobalValueIndirectSym(const GlobalValue *GV, void *ptr, JITCodeEmitter &JCE)
Definition: X86JITInfo.cpp:443
void(* LazyResolverFn)()
Definition: TargetJITInfo.h:83
static bool sub(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len)
Generalized subtraction of 64-bit integer arrays.
Definition: APInt.cpp:264
virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase)
Definition: X86JITInfo.cpp:522
static void emitWordLEInto(uint8_t *&Buf, uint32_t W)
virtual void replaceMachineCodeForFunction(void *Old, void *New)
Definition: X86JITInfo.cpp:34
#define SIZE(sym)
Definition: X86JITInfo.cpp:66
intptr_t getMachineCodeOffset() const
#define LLVM_LIBRARY_VISIBILITY
Definition: Compiler.h:173
void * getResultPointer() const
#define ASMPREFIX
Definition: X86JITInfo.cpp:58
const STC & getSubtarget() const
virtual uintptr_t getCurrentPCValue() const
#define __msan_unpoison(p, size)
Definition: Compiler.h:342
virtual StubLayout getStubLayout()
Returns the maximum size and alignment for a call stub on this target.
Definition: X86JITInfo.cpp:460
virtual void * allocIndirectGV(const GlobalValue *GV, const uint8_t *Buffer, size_t Size, unsigned Alignment)=0
raw_ostream & dbgs()
dbgs - Return a circular-buffered debug stream.
Definition: Debug.cpp:101
X86JITInfo(X86TargetMachine &tm)
Definition: X86JITInfo.cpp:437
bool hasSSE1() const
Definition: X86Subtarget.h:259
#define TsanIgnoreWritesEnd()
Definition: Valgrind.h:64
void emitWordLE(uint32_t W)
void X86CompilationCallback()
Definition: X86JITInfo.cpp:331
#define TsanIgnoreWritesBegin()
Definition: Valgrind.h:60
virtual char * allocateThreadLocalMemory(size_t size)
Definition: X86JITInfo.cpp:575
virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn)
getLazyResolverFunction - Expose the lazy resolver to the JIT.
Definition: X86JITInfo.cpp:424
#define CFI(x)
Definition: X86JITInfo.cpp:79
#define TYPE_FUNCTION(sym)
Definition: X86JITInfo.cpp:67
LLVM Value Representation.
Definition: Value.h:66
#define DEBUG(X)
Definition: Debug.h:97
LLVM_ATTRIBUTE_USED LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr)
Definition: X86JITInfo.cpp:343
virtual void relocate(void *Function, MachineRelocation *MR, unsigned NumRelocs, unsigned char *GOTBase)
Definition: X86JITInfo.cpp:542
#define LLVM_ATTRIBUTE_USED
Definition: Compiler.h:179
Records the required size and alignment for a call stub in bytes.
Definition: TargetJITInfo.h:54