forked from emul8/tlib
-
Notifications
You must be signed in to change notification settings - Fork 24
/
tlib-alloc.c
190 lines (177 loc) · 5.57 KB
/
tlib-alloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
#if defined(__linux__)
#define _GNU_SOURCE
#endif
#if defined(__linux__) || defined(__APPLE__)
#include <sys/mman.h>
#elif defined(_WIN32)
#include <memoryapi.h>
#include <handleapi.h>
#endif
#if defined(__APPLE__)
#include <mach/mach.h>
#include <mach/mach_vm.h>
#endif
#include <unistd.h>
#include <stdint.h>
#include <errno.h>
#include <string.h>
#include "infrastructure.h"
#include "tlib-alloc.h"
#include "tcg/tcg.h"
uint8_t *tcg_rw_buffer;
uint8_t *tcg_rx_buffer;
uint64_t code_gen_buffer_size;
intptr_t tcg_wx_diff;
bool is_ptr_in_rw_buf(const void *ptr)
{
return (ptr >= (void*)tcg_rw_buffer) && (ptr < ((void*)tcg_rw_buffer + code_gen_buffer_size + TCG_PROLOGUE_SIZE));
}
bool is_ptr_in_rx_buf(const void *ptr)
{
return (ptr >= (void*)tcg_rx_buffer) && (ptr < ((void*)tcg_rx_buffer + code_gen_buffer_size + TCG_PROLOGUE_SIZE));
}
void* rw_ptr_to_rx(void *ptr)
{
if (ptr == NULL) {
// null pointers should not be changed
return ptr;
}
tlib_assert(is_ptr_in_rx_buf(ptr - tcg_wx_diff));
return ptr - tcg_wx_diff;
}
void* rx_ptr_to_rw(const void *ptr)
{
if (ptr == NULL) {
// null pointers should not be changed
return (void*) ptr;
}
tlib_assert(is_ptr_in_rw_buf(ptr + tcg_wx_diff));
return (void*) (ptr + tcg_wx_diff);
}
#if (defined(__linux__) || defined(__APPLE__)) && (!defined(__aarch64__))
static bool alloc_code_gen_buf_unified(uint64_t size)
{
// No write/execute splitting
int flags = MAP_ANON | MAP_PRIVATE;
void *rwx = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
if (rwx == MAP_FAILED) {
tlib_printf(LOG_LEVEL_DEBUG, "Failed to mmap rwx buffer, error: %s", strerror(errno));
return false;
}
tcg_rx_buffer = tcg_rw_buffer = rwx;
code_gen_buffer_size = size;
tcg_wx_diff = 0;
return true;
}
#endif
#if (defined(__linux__) || defined(__APPLE__))
void free_code_gen_buf()
{
// If not using split buffers the second one will fail, but this causes no issues
munmap(tcg_rw_buffer, code_gen_buffer_size + TCG_PROLOGUE_SIZE);
munmap(tcg_rx_buffer, code_gen_buffer_size + TCG_PROLOGUE_SIZE);
}
#endif
#if defined(__linux__) && defined(__aarch64__)
static bool alloc_code_gen_buf_split(uint64_t size)
{
// Split writable and executable mapping
int fd = memfd_create("code_gen_buffer", 0);
if (fd == -1) {
tlib_abortf("Failed to create backing file for code_gen_buffer, error: %s", strerror(errno));
}
if (ftruncate(fd, size) == -1) {
tlib_printf(LOG_LEVEL_DEBUG, "Failed to allocate %u bytes for codegen buffer, error: %s", size, strerror(errno));
// Cleanup the fd
close(fd);
return false;
}
// Backing file creation succeded, mmap buffers
int flags = MAP_SHARED;
void *rw = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0);
if (rw == MAP_FAILED) {
tlib_printf(LOG_LEVEL_DEBUG, "Failed to mmap rw buffer, error: %s", strerror(errno));
close(fd);
return false;
}
void *rx = mmap(NULL, size, PROT_READ | PROT_EXEC, flags, fd, 0);
if (rw == MAP_FAILED) {
tlib_printf(LOG_LEVEL_DEBUG, "Failed to mmap rx buffer, error: %s", strerror(errno));
close(fd);
// unmap region so it does not leak
munmap(rw, size);
return false;
}
// Mapping succeded, we can now close the fd safely
close(fd);
tcg_rw_buffer = (uint8_t*) rw;
tcg_rx_buffer = (uint8_t*) rx;
code_gen_buffer_size = size;
tcg_wx_diff = tcg_rw_buffer - tcg_rx_buffer;
return true;
}
#elif defined(__APPLE__) && defined(__aarch64__)
static bool alloc_code_gen_buf_split(uint64_t size)
{
mach_vm_address_t rw, rx;
int flags = MAP_ANONYMOUS | MAP_SHARED;
rw = (mach_vm_address_t) mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
if(rw == (mach_vm_address_t) MAP_FAILED) {
tlib_printf(LOG_LEVEL_ERROR, "Failed to mmap rw buffer, error: %s", strerror(errno));
return false;
}
rx = 0;
vm_prot_t current_prot, max_prot;
kern_return_t res = mach_vm_remap(mach_task_self(), &rx, size, 0, VM_FLAGS_ANYWHERE, mach_task_self(), rw, false, ¤t_prot, &max_prot, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) {
tlib_printf(LOG_LEVEL_ERROR, "Failed to mach_vm_remap rx buffer, error: %i", res);
munmap((void*)rw, size);
return false;
}
if(mprotect((void*) rx, size, PROT_READ | PROT_EXEC) != 0) {
tlib_printf(LOG_LEVEL_ERROR, "Failed to mprotect rx buffer");
// Unmap the memory regions so they don't leak
munmap((void*) rw, size);
munmap((void*) rx, size);
return false;
}
tcg_rw_buffer = (uint8_t *) rw;
tcg_rx_buffer = (uint8_t *) rx;
tcg_wx_diff = tcg_rw_buffer - tcg_rx_buffer;
code_gen_buffer_size = size;
return true;
}
#elif defined(_WIN32)
static bool map_exec(void *addr, long size)
{
DWORD old_protect;
return (bool) VirtualProtect(addr, size, PAGE_EXECUTE_READWRITE, &old_protect);
}
static bool alloc_code_gen_buf_unified(uint64_t size)
{
uint8_t *buf = tlib_malloc(size);
if (buf == NULL) {
return false;
}
if(!map_exec(buf, size)) {
tlib_printf(LOG_LEVEL_ERROR, "Failed to VirtualProtect code_gen_buffer");
return false;
}
tcg_rw_buffer = tcg_rx_buffer = buf;
code_gen_buffer_size = size;
tcg_wx_diff = 0;
return true;
}
void free_code_gen_buf()
{
tlib_free(tcg_rw_buffer);
}
#endif
bool alloc_code_gen_buf(uint64_t size)
{
#if defined(__aarch64__)
return alloc_code_gen_buf_split(size);
#else
return alloc_code_gen_buf_unified(size);
#endif
}