forked from ocaml/ocaml
-
Notifications
You must be signed in to change notification settings - Fork 0
/
platform.h
217 lines (182 loc) · 7.43 KB
/
platform.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
/**************************************************************************/
/* */
/* OCaml */
/* */
/* KC Sivaramakrishnan, Indian Institute of Technology, Madras */
/* Stephen Dolan, University of Cambridge */
/* */
/* Copyright 2016 Indian Institute of Technology, Madras */
/* Copyright 2016 University of Cambridge */
/* */
/* All rights reserved. This file is distributed under the terms of */
/* the GNU Lesser General Public License version 2.1, with the */
/* special exception on linking described in the file LICENSE. */
/* */
/**************************************************************************/
#ifndef CAML_PLAT_THREADS_H
#define CAML_PLAT_THREADS_H
/* Platform-specific concurrency and memory primitives */
#ifdef CAML_INTERNALS
#include <pthread.h>
#include <errno.h>
#include <string.h>
#include "config.h"
#include "mlvalues.h"
#include "sys.h"
#ifdef _MSC_VER
#include <intrin.h>
#endif
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
/* Hint for busy-waiting loops */
Caml_inline void cpu_relax(void) {
#ifdef __GNUC__
#if defined(__x86_64__) || defined(__i386__)
__asm__ volatile("pause" ::: "memory");
#elif defined(__aarch64__)
__asm__ volatile ("yield" ::: "memory");
#elif defined(__riscv)
/* Encoding of the pause instruction */
__asm__ volatile (".4byte 0x100000F");
#elif defined(__ppc64__)
__asm__ volatile ("or 1, 1, 1 # low priority");
__asm__ volatile ("or 2, 2, 2 # medium priority");
__asm__ volatile ("" ::: "memory");
#else
/* Just a compiler barrier */
__asm__ volatile ("" ::: "memory");
#endif
#elif defined(_MSC_VER)
/* It would be better to use YieldProcessor to have a portable implementation
but this would require windows.h which we can't include here (it would
conflict with caml/instruct.h on ATOM, for instance)
*/
#if defined(_M_IX86) || defined(_M_X64)
_mm_pause();
#endif
#endif
}
/* Spin-wait loops */
#define Max_spins 1000
CAMLextern unsigned caml_plat_spin_wait(unsigned spins,
const char* file, int line,
const char* function);
#define GENSYM_3(name, l) name##l
#define GENSYM_2(name, l) GENSYM_3(name, l)
#define GENSYM(name) GENSYM_2(name, __LINE__)
#define SPIN_WAIT \
unsigned GENSYM(caml__spins) = 0; \
for (; 1; cpu_relax(), \
GENSYM(caml__spins) = \
CAMLlikely(GENSYM(caml__spins) < Max_spins) ? \
GENSYM(caml__spins) + 1 : \
caml_plat_spin_wait(GENSYM(caml__spins), \
__FILE__, __LINE__, __func__))
Caml_inline uintnat atomic_load_wait_nonzero(atomic_uintnat* p) {
SPIN_WAIT {
uintnat v = atomic_load_acquire(p);
if (v) return v;
}
}
/* Atomic read-modify-write instructions, with full fences */
Caml_inline uintnat atomic_fetch_add_verify_ge0(atomic_uintnat* p, uintnat v) {
uintnat result = atomic_fetch_add(p,v);
CAMLassert ((intnat)result > 0);
return result;
}
/* Warning: blocking functions.
Blocking functions are for use in the runtime outside of the
mutator, or when the domain lock is not held.
In order to use them inside the mutator and while holding the
domain lock, one must make sure that the wait is very short, and
that no deadlock can arise from the interaction with the domain
locks and the stop-the-world sections.
In particular one must not call [caml_plat_lock_blocking] on a
mutex while the domain lock is held:
- if any critical section of the mutex crosses an allocation, a
blocking section releasing the domain lock, or any other
potential STW section, nor
- if the same lock is acquired at any point using [Mutex.lock] or
[caml_plat_lock_non_blocking] on the same domain (circular
deadlock with the domain lock).
Hence, as a general rule, prefer [caml_plat_lock_non_blocking] to
lock a mutex when inside the mutator and holding the domain lock.
The domain lock must be held in order to call
[caml_plat_lock_non_blocking].
These functions never raise exceptions; errors are fatal. Thus, for
usages where bugs are susceptible to be introduced by users, the
functions from caml/sync.h should be used instead.
*/
typedef pthread_mutex_t caml_plat_mutex;
#define CAML_PLAT_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
CAMLextern void caml_plat_mutex_init(caml_plat_mutex*);
Caml_inline void caml_plat_lock_blocking(caml_plat_mutex*);
Caml_inline void caml_plat_lock_non_blocking(caml_plat_mutex*);
Caml_inline int caml_plat_try_lock(caml_plat_mutex*);
void caml_plat_assert_locked(caml_plat_mutex*);
void caml_plat_assert_all_locks_unlocked(void);
Caml_inline void caml_plat_unlock(caml_plat_mutex*);
void caml_plat_mutex_free(caml_plat_mutex*);
typedef pthread_cond_t caml_plat_cond;
#define CAML_PLAT_COND_INITIALIZER PTHREAD_COND_INITIALIZER
void caml_plat_cond_init(caml_plat_cond*);
void caml_plat_wait(caml_plat_cond*, caml_plat_mutex*); /* blocking */
void caml_plat_broadcast(caml_plat_cond*);
void caml_plat_signal(caml_plat_cond*);
void caml_plat_cond_free(caml_plat_cond*);
/* Memory management primitives (mmap) */
uintnat caml_mem_round_up_pages(uintnat size);
/* The size given to caml_mem_map and caml_mem_commit must be a multiple of
caml_plat_pagesize. The size given to caml_mem_unmap and caml_mem_decommit
must match the size given to caml_mem_map/caml_mem_commit for mem.
*/
void* caml_mem_map(uintnat size, int reserve_only);
void* caml_mem_commit(void* mem, uintnat size);
void caml_mem_decommit(void* mem, uintnat size);
void caml_mem_unmap(void* mem, uintnat size);
CAMLnoret void caml_plat_fatal_error(const char * action, int err);
Caml_inline void check_err(const char* action, int err)
{
if (err) caml_plat_fatal_error(action, err);
}
#ifdef DEBUG
CAMLextern CAMLthread_local int caml_lockdepth;
#define DEBUG_LOCK(m) (caml_lockdepth++)
#define DEBUG_UNLOCK(m) (caml_lockdepth--)
#else
#define DEBUG_LOCK(m)
#define DEBUG_UNLOCK(m)
#endif
Caml_inline void caml_plat_lock_blocking(caml_plat_mutex* m)
{
check_err("lock", pthread_mutex_lock(m));
DEBUG_LOCK(m);
}
Caml_inline int caml_plat_try_lock(caml_plat_mutex* m)
{
int r = pthread_mutex_trylock(m);
if (r == EBUSY) {
return 0;
} else {
check_err("try_lock", r);
DEBUG_LOCK(m);
return 1;
}
}
CAMLextern void caml_plat_lock_non_blocking_actual(caml_plat_mutex* m);
Caml_inline void caml_plat_lock_non_blocking(caml_plat_mutex* m)
{
if (!caml_plat_try_lock(m)) {
caml_plat_lock_non_blocking_actual(m);
}
}
Caml_inline void caml_plat_unlock(caml_plat_mutex* m)
{
DEBUG_UNLOCK(m);
check_err("unlock", pthread_mutex_unlock(m));
}
extern intnat caml_plat_pagesize;
extern intnat caml_plat_mmap_alignment;
#endif /* CAML_INTERNALS */
#endif /* CAML_PLATFORM_H */