This repository has been archived by the owner on Nov 18, 2021. It is now read-only.
forked from FooBarWidget/rubyenterpriseedition187-248
-
Notifications
You must be signed in to change notification settings - Fork 5
/
rubysig.h
322 lines (271 loc) · 10.5 KB
/
rubysig.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
/**********************************************************************
rubysig.h -
$Author$
$Date$
created at: Wed Aug 16 01:15:38 JST 1995
Copyright (C) 1993-2003 Yukihiro Matsumoto
**********************************************************************/
#ifndef SIG_H
#define SIG_H
#include <errno.h>
#if defined __ppc__ || defined __powerpc__ || \
defined __ppc64__ || defined __powerpc64__
#define __anyPowerPC__ 1 /* for compatibility with older gcc versions */
#endif
/* STACK_WIPE_SITES determines where attempts are made to exorcise
"ghost object refereces" from the stack and how the stack is cleared:
0x*001 --> wipe stack just after every thread_switch
0x*002 --> wipe stack just after every EXEC_TAG()
0x*004 --> wipe stack in CHECK_INTS
0x*010 --> wipe stack in while & until loops
0x*020 --> wipe stack before yield() in iterators and outside eval.c
0x*040 --> wipe stack on catch and thread save context
0x*100 --> update stack extent on each object allocation
0x*200 --> update stack extent on each object reallocation
0x*400 --> update stack extent during GC marking passes
0x*800 --> update stack extent on each throw (use with 0x040)
0x1000 --> use inline assembly code for x86, PowerPC, or ARM CPUs
0x0*** --> do not even call rb_wipe_stack()
0x2*** --> call dummy rb_wipe_stack() (for debugging and profiling)
0x4*** --> safe, portable stack clearing in memory allocated with alloca
0x6*** --> use faster, but less safe stack clearing in unallocated stack
0x8*** --> use faster, but less safe stack clearing (with inline code)
for most effective gc use 0x*707
for fastest micro-benchmarking use 0x0000
0x*770 prevents almost all memory leaks caused by ghost references
without adding much overhead for stack clearing.
Other good trade offs are 0x*270, 0x*703, 0x*303 or even 0x*03
In general, you may lessen the default -mpreferred-stack-boundary
only if using less safe stack clearing (0x6***). Lessening the
stack alignment with portable stack clearing (0x4***) may fail to clear
all ghost references off the stack.
When using 0x6*** or 0x8***, the compiler could insert
stack push(s) between reading the stack pointer and clearing
the ghost references. The register(s) pushed will be
cleared by the rb_gc_stack_wipe(), typically resulting in a segfault
or an interpreter hang.
STACK_WIPE_SITES of 0x8770 works well compiled with gcc on most machines
using the recommended CFLAGS="-O2 -fno-stack-protector". However...
If it hangs or crashes for you, try changing STACK_WIPE_SITES to 0x4770
and please report your details. i.e. CFLAGS, compiler, version, CPU
Note that it is redundant to wipe_stack in looping constructs if
also doing so in CHECK_INTS. It is also redundant to wipe_stack on
each thread_switch if wiping after every thread save context.
*/
#ifndef STACK_WIPE_SITES
# ifdef __x86_64__ /* deal with "red zone" by not inlining stack clearing */
# define STACK_WIPE_SITES 0x6770
# elif defined __anyPowerPC__ /* On any PowerPC, deal with... */
# define STACK_WIPE_SITES 0x7764 /* red zone & alloc(0) doesn't return sp */
# else
# define STACK_WIPE_SITES 0x8770 /*normal case, use 0x4770 if problems arise*/
# endif
#endif
#if (STACK_WIPE_SITES & 0x14) == 0x14
#warning wiping stack in CHECK_INTS makes wiping in loops redundant
#endif
#if (STACK_WIPE_SITES & 0x41) == 0x41
#warning wiping stack after thread save makes wiping on thread_switch redundant
#endif
#define STACK_WIPE_METHOD (STACK_WIPE_SITES>>13)
#ifdef _WIN32
typedef LONG rb_atomic_t;
# define ATOMIC_TEST(var) InterlockedExchange(&(var), 0)
# define ATOMIC_SET(var, val) InterlockedExchange(&(var), (val))
# define ATOMIC_INC(var) InterlockedIncrement(&(var))
# define ATOMIC_DEC(var) InterlockedDecrement(&(var))
/* Windows doesn't allow interrupt while system calls */
# define TRAP_BEG do {\
int saved_errno = 0;\
rb_atomic_t trap_immediate = ATOMIC_SET(rb_trap_immediate, 1)
# define TRAP_END\
ATOMIC_SET(rb_trap_immediate, trap_immediate);\
saved_errno = errno;\
CHECK_INTS;\
errno = saved_errno;\
} while (0)
# define RUBY_CRITICAL(statements) do {\
rb_w32_enter_critical();\
statements;\
rb_w32_leave_critical();\
} while (0)
#else
typedef int rb_atomic_t;
# define ATOMIC_TEST(var) ((var) ? ((var) = 0, 1) : 0)
# define ATOMIC_SET(var, val) ((var) = (val))
# define ATOMIC_INC(var) (++(var))
# define ATOMIC_DEC(var) (--(var))
# define TRAP_BEG do {\
int saved_errno = 0;\
int trap_immediate = rb_trap_immediate;\
rb_trap_immediate = 1
# define TRAP_END rb_trap_immediate = trap_immediate;\
saved_errno = errno;\
CHECK_INTS;\
errno = saved_errno;\
} while (0)
# define RUBY_CRITICAL(statements) do {\
int trap_immediate = rb_trap_immediate;\
rb_trap_immediate = 0;\
statements;\
rb_trap_immediate = trap_immediate;\
} while (0)
#endif
RUBY_EXTERN rb_atomic_t rb_trap_immediate;
RUBY_EXTERN int rb_prohibit_interrupt;
#define DEFER_INTS (rb_prohibit_interrupt++)
#define ALLOW_INTS do {\
rb_prohibit_interrupt--;\
CHECK_INTS;\
} while (0)
#define ENABLE_INTS (rb_prohibit_interrupt--)
VALUE rb_with_disable_interrupt _((VALUE(*)(ANYARGS),VALUE));
RUBY_EXTERN rb_atomic_t rb_trap_pending;
void rb_trap_restore_mask _((void));
RUBY_EXTERN int rb_thread_critical;
void rb_thread_schedule _((void));
RUBY_EXTERN int rb_gc_stack_grow_direction; /* -1 for down or 1 for up */
#if STACK_GROW_DIRECTION > 0
/* clear stack space between end and sp (not including *sp) */
#define __stack_zero(end,sp) __stack_zero_up(end,sp)
/* true if top has grown past limit, i.e. top deeper than limit */
#define __stack_past(limit,top) __stack_past_up(limit,top)
/* depth of mid below stack top */
#define __stack_depth(top,mid) __stack_depth_up(top,mid)
/* stack pointer top adjusted to include depth more items */
#define __stack_grow(top,depth) __stack_grow_up(top,depth)
#elif STACK_GROW_DIRECTION < 0
#define __stack_zero(end,sp) __stack_zero_down(end,sp)
#define __stack_past(limit,top) __stack_past_down(limit,top)
#define __stack_depth(top,mid) __stack_depth_down(top,mid)
#define __stack_grow(top,depth) __stack_grow_down(top,depth)
#else /* limp along if stack direction can't be determined at compile time */
#define __stack_zero(end,sp) if (rb_gc_stack_grow_direction<0) \
__stack_zero_down(end,sp); else __stack_zero_up(end,sp);
#define __stack_past(limit,top) (rb_gc_stack_grow_direction<0 ? \
__stack_past_down(limit,top) : __stack_past_up(limit,top))
#define __stack_depth(top,mid) (rb_gc_stack_grow_direction<0 ? \
__stack_depth_down(top,mid) : __stack_depth_up(top,mid))
#define __stack_grow(top,depth) (rb_gc_stack_grow_direction<0 ? \
__stack_grow_down(top,depth) : __stack_grow_up(top,depth))
#endif
#define __stack_zero_up(end,sp) while (end >= ++sp) *sp=0
#define __stack_past_up(limit,top) ((limit) < (top))
#define __stack_depth_up(top,mid) ((top) - (mid))
#define __stack_grow_up(top,depth) ((top)+(depth))
#define __stack_zero_down(end,sp) while (end <= --sp) *sp=0
#define __stack_past_down(limit,top) ((limit) > (top))
#define __stack_depth_down(top,mid) ((mid) - (top))
#define __stack_grow_down(top,depth) ((top)-(depth))
/* Make alloca work the best possible way. */
#ifdef __GNUC__
# ifndef atarist
# ifndef alloca
# define alloca __builtin_alloca
# endif
# endif /* atarist */
# define nativeAllocA __builtin_alloca
/* use assembly to get stack pointer quickly */
# if STACK_WIPE_SITES & 0x1000
# define __defspfn(asmb) \
static inline VALUE *__sp(void) __attribute__((always_inline)); \
static inline VALUE *__sp(void) \
{ \
VALUE *sp; asm(asmb); \
return sp; \
}
# ifdef __anyPowerPC__
__defspfn("addi %0, r1, 0": "=r"(sp))
# elif defined __i386__
__defspfn("movl %%esp, %0": "=r"(sp))
# elif defined __x86_64__
#warn ===> x86_64 inline assembler is known to crash -- change STACK_WIPE_SITES
__defspfn("movq %%rsp, %0": "=r"(sp))
# elif __arm__
__defspfn("mov %0, sp": "=r"(sp))
# else
# define __sp() ((VALUE *)__builtin_alloca(0))
# warning No assembly version of __sp() defined for this CPU.
# endif
# else
# define __sp() ((VALUE *)__builtin_alloca(0))
# endif
#else // not GNUC
# ifdef HAVE_ALLOCA_H
# include <alloca.h>
# else
# ifndef _AIX
# ifndef alloca /* predefined by HP cc +Olibcalls */
void *alloca ();
# endif
# endif /* AIX */
# endif /* HAVE_ALLOCA_H */
# if STACK_WIPE_SITES & 0x1000
# warning No assembly versions of __sp() defined for this compiler.
# endif
# if HAVE_ALLOCA
# define __sp() ((VALUE *)alloca(0))
# define nativeAllocA alloca
# else
RUBY_EXTERN VALUE *__sp(void);
# if STACK_WIPE_SITES
# define STACK_WIPE_SITES 0
# warning Disabled Stack Wiping because there is no native alloca()
# endif
# endif
#endif /* __GNUC__ */
/*
Zero memory that was (recently) part of the stack, but is no longer.
Invoke when stack is deep to mark its extent and when it's shallow to wipe it.
*/
#if STACK_WIPE_METHOD == 0
#define rb_gc_wipe_stack() ((void)0)
#elif STACK_WIPE_METHOD == 4
#define rb_gc_wipe_stack() do { \
if (rb_curr_thread) { \
VALUE *end = rb_curr_thread->gc_stack_end; \
VALUE *sp = __sp(); \
rb_curr_thread->gc_stack_end = sp; \
__stack_zero(end, sp); \
} \
} while (0)
#else
RUBY_EXTERN void rb_gc_wipe_stack(void);
#endif
/*
Update our record of maximum stack extent without zeroing unused stack
*/
#define rb_gc_update_stack_extent() do { \
VALUE *sp = __sp(); \
if (rb_curr_thread && __stack_past(rb_curr_thread->gc_stack_end, sp)) rb_curr_thread->gc_stack_end = sp; \
} while(0)
#if STACK_WIPE_SITES & 4
# define CHECK_INTS_wipe_stack() rb_gc_wipe_stack()
#else
# define CHECK_INTS_wipe_stack() (void)0
#endif
#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE)
RUBY_EXTERN int rb_thread_pending;
# define CHECK_INTS do {\
CHECK_INTS_wipe_stack(); \
if (!(rb_prohibit_interrupt || rb_thread_critical)) {\
if (rb_thread_pending) rb_thread_schedule();\
if (rb_trap_pending) rb_trap_exec();\
}\
} while (0)
#else
/* pseudo preemptive thread switching */
RUBY_EXTERN int rb_thread_tick;
#define THREAD_TICK 500
#define CHECK_INTS do {\
CHECK_INTS_wipe_stack(); \
if (!(rb_prohibit_interrupt || rb_thread_critical)) {\
if (rb_thread_tick-- <= 0) {\
rb_thread_tick = THREAD_TICK;\
rb_thread_schedule();\
}\
if (rb_trap_pending) rb_trap_exec();\
}\
} while (0)
#endif
#endif