forked from ivmai/bdwgc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
gcj_mlc.c
231 lines (210 loc) · 7.25 KB
/
gcj_mlc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2008-2022 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_pmark.h" /* includes gc_priv.h */
#ifdef GC_GCJ_SUPPORT
/*
* This is an allocator interface tuned for gcj (the GNU static
* java compiler).
*
* Each allocated object has a pointer in its beginning to a vtable,
* which for our purposes is simply a structure describing the type of
* the object. This descriptor structure contains a GC marking
* descriptor at offset GC_GCJ_MARK_DESCR_OFFSET.
*
* It is hoped that this interface may also be useful for other systems,
* possibly with some tuning of the constants. But the immediate goal
* is to get better gcj performance.
*
* We assume: counting on explicit initialization of this interface is OK.
*/
# include "gc/gc_gcj.h"
# include "private/dbg_mlc.h"
/* Object kind for objects with descriptors in "vtable". */
int GC_gcj_kind = 0;
/* The kind of objects that are always marked with a mark proc call. */
int GC_gcj_debug_kind = 0;
STATIC struct GC_ms_entry *GC_CALLBACK
GC_gcj_fake_mark_proc(word *addr, struct GC_ms_entry *mark_stack_top,
struct GC_ms_entry *mark_stack_limit, word env)
{
UNUSED_ARG(addr);
UNUSED_ARG(mark_stack_limit);
UNUSED_ARG(env);
# if defined(FUNCPTR_IS_DATAPTR) && defined(CPPCHECK)
GC_noop1((word)&GC_init_gcj_malloc);
# endif
ABORT_RET("No client gcj mark proc is specified");
return mark_stack_top;
}
# ifdef FUNCPTR_IS_DATAPTR
GC_API void GC_CALL
GC_init_gcj_malloc(int mp_index, void *mp)
{
GC_init_gcj_malloc_mp((unsigned)mp_index,
CAST_THRU_UINTPTR(GC_mark_proc, mp));
}
# endif /* FUNCPTR_IS_DATAPTR */
GC_API void GC_CALL
GC_init_gcj_malloc_mp(unsigned mp_index, GC_mark_proc mp)
{
# ifndef GC_IGNORE_GCJ_INFO
GC_bool ignore_gcj_info;
# endif
GC_STATIC_ASSERT(GC_GCJ_MARK_DESCR_OFFSET >= sizeof(ptr_t));
if (0 == mp) {
/* In case GC_DS_PROC is unused. */
mp = GC_gcj_fake_mark_proc;
}
/* Initialize the collector just in case it is not done yet. */
GC_init();
LOCK();
if (GC_gcjobjfreelist != NULL) {
/* Already initialized. */
UNLOCK();
return;
}
# ifdef GC_IGNORE_GCJ_INFO
/* This is useful for debugging on platforms with missing getenv(). */
# define ignore_gcj_info TRUE
# else
ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO"));
# endif
if (ignore_gcj_info) {
GC_COND_LOG_PRINTF("Gcj-style type information is disabled!\n");
}
GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */
GC_mark_procs[mp_index] = mp;
if (mp_index >= GC_n_mark_procs)
ABORT("GC_init_gcj_malloc_mp: bad index");
/* Set up object kind gcj-style indirect descriptor. */
GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner();
if (ignore_gcj_info) {
/* Use a simple length-based descriptor, thus forcing a fully */
/* conservative scan. */
GC_gcj_kind = (int)GC_new_kind_inner((void **)GC_gcjobjfreelist,
/* 0 | */ GC_DS_LENGTH, TRUE, TRUE);
GC_gcj_debug_kind = GC_gcj_kind;
} else {
GC_gcj_kind = (int)GC_new_kind_inner(
(void **)GC_gcjobjfreelist,
(((word)(-(signed_word)GC_GCJ_MARK_DESCR_OFFSET
- GC_INDIR_PER_OBJ_BIAS))
| GC_DS_PER_OBJECT),
FALSE, TRUE);
/* Set up object kind for objects that require mark proc call. */
GC_gcj_debug_kind = (int)GC_new_kind_inner(
GC_new_free_list_inner(),
GC_MAKE_PROC(mp_index, 1 /* allocated with debug info */), FALSE,
TRUE);
}
UNLOCK();
# undef ignore_gcj_info
}
/* A mechanism to release the allocator lock and invoke finalizers. */
/* We don't really have an opportunity to do this on a rarely executed */
/* path on which the allocator lock is not held. Thus we check at */
/* a rarely executed point at which it is safe to release the allocator */
/* lock; we do this even where we could just call GC_INVOKE_FINALIZERS, */
/* since it is probably cheaper and certainly more uniform. */
/* TODO: Consider doing the same elsewhere? */
static void
maybe_finalize(void)
{
static word last_finalized_no = 0;
GC_ASSERT(I_HOLD_LOCK());
if (GC_gc_no == last_finalized_no || !EXPECT(GC_is_initialized, TRUE))
return;
UNLOCK();
GC_INVOKE_FINALIZERS();
LOCK();
last_finalized_no = GC_gc_no;
}
/* Allocate an object, clear it, and store the pointer to the */
/* type structure (vtable in gcj). This adds a byte at the */
/* end of the object if GC_malloc would. */
# ifdef THREAD_LOCAL_ALLOC
GC_INNER
# else
STATIC
# endif
void *
GC_core_gcj_malloc(size_t lb, const void *vtable_ptr, unsigned flags)
{
ptr_t op;
size_t lg;
GC_DBG_COLLECT_AT_MALLOC(lb);
LOCK();
if (SMALL_OBJ(lb)
&& (op = GC_gcjobjfreelist[lg = GC_size_map[lb]],
EXPECT(op != NULL, TRUE))) {
GC_gcjobjfreelist[lg] = (ptr_t)obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
GC_ASSERT(NULL == ((void **)op)[1]);
} else {
maybe_finalize();
op = (ptr_t)GC_generic_malloc_inner(lb, GC_gcj_kind, flags);
if (NULL == op) {
GC_oom_func oom_fn = GC_oom_fn;
UNLOCK();
return (*oom_fn)(lb);
}
}
*(const void **)op = vtable_ptr;
UNLOCK();
GC_dirty(op);
REACHABLE_AFTER_DIRTY(vtable_ptr);
return GC_clear_stack(op);
}
# ifndef THREAD_LOCAL_ALLOC
GC_API GC_ATTR_MALLOC void *GC_CALL
GC_gcj_malloc(size_t lb, const void *vtable_ptr)
{
return GC_core_gcj_malloc(lb, vtable_ptr, 0 /* flags */);
}
# endif /* !THREAD_LOCAL_ALLOC */
GC_API GC_ATTR_MALLOC void *GC_CALL
GC_gcj_malloc_ignore_off_page(size_t lb, const void *vtable_ptr)
{
return GC_core_gcj_malloc(lb, vtable_ptr, IGNORE_OFF_PAGE);
}
GC_API GC_ATTR_MALLOC void *GC_CALL
GC_debug_gcj_malloc(size_t lb, const void *vtable_ptr, GC_EXTRA_PARAMS)
{
void *base, *result;
/* We are careful to avoid extra calls those could confuse the */
/* backtrace. */
LOCK();
maybe_finalize();
base = GC_generic_malloc_inner(SIZET_SAT_ADD(lb, DEBUG_BYTES),
GC_gcj_debug_kind, 0 /* flags */);
if (NULL == base) {
GC_oom_func oom_fn = GC_oom_fn;
UNLOCK();
GC_err_printf("GC_debug_gcj_malloc(%lu, %p) returning NULL (%s:%d)\n",
(unsigned long)lb, vtable_ptr, s, i);
return (*oom_fn)(lb);
}
*((const void **)((ptr_t)base + sizeof(oh))) = vtable_ptr;
if (!GC_debugging_started) {
GC_start_debugging_inner();
}
result = GC_store_debug_info_inner(base, lb, s, i);
ADD_CALL_CHAIN(base, ra);
UNLOCK();
GC_dirty(result);
REACHABLE_AFTER_DIRTY(vtable_ptr);
return result;
}
#endif /* GC_GCJ_SUPPORT */