15 #include "llvm/Support/DataTypes.h"
19 #ifdef HAVE_SYS_MMAN_H
24 #include <mach/mach.h>
28 # if defined(__OpenBSD__)
29 # include <mips64/sysarch.h>
31 # include <sys/cachectl.h>
36 extern "C" void sys_icache_invalidate(
const void *Addr,
size_t len);
38 extern "C" void __clear_cache(
void *,
void*);
43 int getPosixProtectionFlags(
unsigned Flags) {
50 return PROT_READ | PROT_WRITE;
52 return PROT_READ | PROT_EXEC;
56 return PROT_READ | PROT_WRITE | PROT_EXEC;
58 #if defined(__FreeBSD__)
66 return PROT_READ | PROT_EXEC;
84 const MemoryBlock *
const NearBlock,
92 const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
95 #ifdef NEED_DEV_ZERO_FOR_MMAP
96 static int zero_fd =
open(
"/dev/zero", O_RDWR);
104 int MMFlags = MAP_PRIVATE |
105 #ifdef HAVE_MMAP_ANONYMOUS
112 int Protect = getPosixProtectionFlags(PFlags);
115 uintptr_t Start = NearBlock ?
reinterpret_cast<uintptr_t
>(NearBlock->base()) +
116 NearBlock->size() : 0;
117 if (Start && Start % PageSize)
118 Start += PageSize - Start % PageSize;
120 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
121 Protect, MMFlags, fd, 0);
122 if (Addr == MAP_FAILED) {
127 return MemoryBlock();
131 Result.Address = Addr;
132 Result.Size = NumPages*PageSize;
142 if (M.Address == 0 || M.Size == 0)
145 if (0 != ::munmap(M.Address, M.Size))
156 if (M.Address == 0 || M.Size == 0)
162 int Protect = getPosixProtectionFlags(Flags);
164 int Result = ::mprotect(M.Address, M.Size, Protect);
181 std::string *ErrMsg) {
182 if (NumBytes == 0)
return MemoryBlock();
185 size_t NumPages = (NumBytes+PageSize-1)/PageSize;
188 #ifdef NEED_DEV_ZERO_FOR_MMAP
189 static int zero_fd =
open(
"/dev/zero", O_RDWR);
191 MakeErrMsg(ErrMsg,
"Can't open /dev/zero device");
192 return MemoryBlock();
197 int flags = MAP_PRIVATE |
198 #ifdef HAVE_MMAP_ANONYMOUS
205 void* start = NearBlock ? (
unsigned char*)NearBlock->base() +
206 NearBlock->size() : 0;
208 #if defined(__APPLE__) && defined(__arm__)
209 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
212 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
215 if (pa == MAP_FAILED) {
219 MakeErrMsg(ErrMsg,
"Can't allocate RWX Memory");
220 return MemoryBlock();
223 #if defined(__APPLE__) && defined(__arm__)
224 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
225 (vm_size_t)(PageSize*NumPages), 0,
227 if (KERN_SUCCESS != kr) {
228 MakeErrMsg(ErrMsg,
"vm_protect max RX failed");
229 return MemoryBlock();
232 kr = vm_protect(mach_task_self(), (vm_address_t)pa,
233 (vm_size_t)(PageSize*NumPages), 0,
235 if (KERN_SUCCESS != kr) {
237 return MemoryBlock();
243 result.Size = NumPages*PageSize;
249 if (M.Address == 0 || M.Size == 0)
return false;
250 if (0 != ::munmap(M.Address, M.Size))
251 return MakeErrMsg(ErrMsg,
"Can't release RWX Memory");
256 #if defined(__APPLE__) && defined(__arm__)
257 if (M.Address == 0 || M.Size == 0)
return false;
259 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
261 return KERN_SUCCESS == kr;
268 #if defined(__APPLE__) && defined(__arm__)
269 if (M.Address == 0 || M.Size == 0)
return false;
271 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
273 return KERN_SUCCESS == kr;
274 #elif defined(__arm__) || defined(__aarch64__)
283 #if defined(__APPLE__) && defined(__arm__)
284 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
287 return KERN_SUCCESS == kr;
294 #if defined(__APPLE__) && defined(__arm__)
295 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
298 return KERN_SUCCESS == kr;
311 #if defined(__APPLE__)
313 # if (defined(__POWERPC__) || defined (__ppc__) || \
314 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
315 sys_icache_invalidate(const_cast<void *>(Addr), Len);
320 # if (defined(__POWERPC__) || defined (__ppc__) || \
321 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
322 const size_t LineSize = 32;
324 const intptr_t Mask = ~(LineSize - 1);
328 for (
intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
329 asm volatile(
"dcbf 0, %0" : :
"r"(Line));
330 asm volatile(
"sync");
332 for (
intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
333 asm volatile(
"icbi 0, %0" : :
"r"(Line));
334 asm volatile(
"isync");
335 # elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__)
337 const char *Start =
static_cast<const char *
>(Addr);
338 const char *End = Start + Len;
339 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
340 # elif defined(__mips__)
341 const char *Start =
static_cast<const char *
>(Addr);
342 # if defined(ANDROID)
345 const char *End = Start + Len;
346 long LStart =
reinterpret_cast<long>(
const_cast<char *
>(Start));
347 long LEnd =
reinterpret_cast<long>(
const_cast<char *
>(End));
348 cacheflush(LStart, LEnd, BCACHE);
350 cacheflush(const_cast<char *>(Start), Len, BCACHE);
const error_category & system_category()
static bool setRangeWritable(const void *Addr, size_t Size)
int open(const char *path, int oflag, ... );
void ValgrindDiscardTranslations(const void *Addr, size_t Len)
static void InvalidateInstructionCache(const void *Addr, size_t Len)
#define llvm_unreachable(msg)
static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg=0)
Release Read/Write/Execute memory.
static bool setExecutable(MemoryBlock &M, std::string *ErrMsg=0)
static error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
Set memory protection state.
static bool setRangeExecutable(const void *Addr, size_t Size)
static self_process * get_self()
Get the process object for the current process.
static MemoryBlock AllocateRWX(size_t NumBytes, const MemoryBlock *NearBlock, std::string *ErrMsg=0)
Allocate Read/Write/Execute memory.
static bool setWritable(MemoryBlock &M, std::string *ErrMsg=0)
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, error_code &EC)
Allocate mapped memory.
bool MakeErrMsg(std::string *ErrMsg, const std::string &prefix)
static error_code success()
size_t page_size() const
Get the virtual memory page size.
const error_category & generic_category()
static error_code releaseMappedMemory(MemoryBlock &Block)
Release mapped memory.