3092
|
1 /* New incremental garbage collector for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
|
24 #include "lisp.h"
|
|
25
|
|
26 #include "backtrace.h"
|
|
27 #include "buffer.h"
|
|
28 #include "bytecode.h"
|
|
29 #include "chartab.h"
|
|
30 #include "console-stream.h"
|
|
31 #include "device.h"
|
|
32 #include "elhash.h"
|
|
33 #include "events.h"
|
|
34 #include "extents-impl.h"
|
|
35 #include "file-coding.h"
|
|
36 #include "frame-impl.h"
|
|
37 #include "gc.h"
|
|
38 #include "glyphs.h"
|
|
39 #include "opaque.h"
|
|
40 #include "lrecord.h"
|
|
41 #include "lstream.h"
|
|
42 #include "process.h"
|
|
43 #include "profile.h"
|
|
44 #include "redisplay.h"
|
|
45 #include "specifier.h"
|
|
46 #include "sysfile.h"
|
|
47 #include "sysdep.h"
|
|
48 #include "window.h"
|
|
49 #include "vdb.h"
|
|
50
|
|
51
|
|
52 #define GC_CONS_THRESHOLD 2000000
|
|
53 #define GC_CONS_INCREMENTAL_THRESHOLD 200000
|
|
54 #define GC_INCREMENTAL_TRAVERSAL_THRESHOLD 100000
|
|
55
|
|
56 /* Number of bytes of consing done since the last GC. */
|
|
57 EMACS_INT consing_since_gc;
|
|
58
|
|
59 /* Number of bytes of consing done since startup. */
|
|
60 EMACS_UINT total_consing;
|
|
61
|
|
62 /* Number of bytes of current allocated heap objects. */
|
|
63 EMACS_INT total_gc_usage;
|
|
64
|
|
65 /* If the above is set. */
|
|
66 int total_gc_usage_set;
|
|
67
|
|
68 /* Number of bytes of consing since gc before another gc should be done. */
|
|
69 EMACS_INT gc_cons_threshold;
|
|
70
|
|
71 /* Nonzero during gc */
|
|
72 int gc_in_progress;
|
|
73
|
|
74 /* Percentage of consing of total data size before another GC. */
|
|
75 EMACS_INT gc_cons_percentage;
|
|
76
|
|
77 #ifdef NEW_GC
|
|
78 /* Number of bytes of consing since gc before another cycle of the gc
|
|
79 should be done in incremental mode. */
|
|
80 EMACS_INT gc_cons_incremental_threshold;
|
|
81
|
|
82 /* Number of elements marked in one cycle of incremental GC. */
|
|
83 EMACS_INT gc_incremental_traversal_threshold;
|
|
84
|
|
85 /* Nonzero during write barrier */
|
|
86 int write_barrier_enabled;
|
|
87 #endif /* NEW_GC */
|
|
88
|
|
89
|
|
90
|
|
91 #ifdef NEW_GC
|
|
92 /************************************************************************/
|
|
93 /* Incremental State and Statistics */
|
|
94 /************************************************************************/
|
|
95
|
|
96 enum gc_phase
|
|
97 {
|
|
98 NONE,
|
|
99 INIT_GC,
|
|
100 PUSH_ROOT_SET,
|
|
101 MARK,
|
|
102 REPUSH_ROOT_SET,
|
|
103 FINISH_MARK,
|
|
104 FINALIZE,
|
|
105 SWEEP,
|
|
106 FINISH_GC
|
|
107 };
|
|
108
|
|
109 #ifndef ERROR_CHECK_GC
|
|
110 struct
|
|
111 {
|
|
112 enum gc_phase phase;
|
|
113 } gc_state;
|
|
114 #else /* ERROR_CHECK_GC */
|
|
115 enum gc_stat_id
|
|
116 {
|
|
117 GC_STAT_TOTAL,
|
|
118 GC_STAT_IN_LAST_GC,
|
|
119 GC_STAT_IN_THIS_GC,
|
|
120 GC_STAT_IN_LAST_CYCLE,
|
|
121 GC_STAT_IN_THIS_CYCLE,
|
|
122 GC_STAT_COUNT /* has to be last */
|
|
123 };
|
|
124
|
|
125 struct
|
|
126 {
|
|
127 enum gc_phase phase;
|
|
128 EMACS_INT n_gc[GC_STAT_COUNT];
|
|
129 EMACS_INT n_cycles[GC_STAT_COUNT];
|
|
130 EMACS_INT enqueued[GC_STAT_COUNT];
|
|
131 EMACS_INT dequeued[GC_STAT_COUNT];
|
|
132 EMACS_INT repushed[GC_STAT_COUNT];
|
|
133 EMACS_INT enqueued2[GC_STAT_COUNT];
|
|
134 EMACS_INT dequeued2[GC_STAT_COUNT];
|
|
135 EMACS_INT finalized[GC_STAT_COUNT];
|
|
136 EMACS_INT freed[GC_STAT_COUNT];
|
|
137 EMACS_INT explicitly_freed;
|
|
138 EMACS_INT explicitly_tried_freed;
|
|
139 } gc_state;
|
|
140 #endif /* ERROR_CHECK_GC */
|
|
141
|
|
142 #define GC_PHASE gc_state.phase
|
|
143 #define GC_SET_PHASE(p) GC_PHASE = p
|
|
144
|
|
145 #ifdef ERROR_CHECK_GC
|
|
146 # define GC_STAT_START_NEW_GC gc_stat_start_new_gc ()
|
|
147 # define GC_STAT_RESUME_GC gc_stat_resume_gc ()
|
|
148
|
|
149 #define GC_STAT_TICK(STAT) \
|
|
150 gc_state.STAT[GC_STAT_TOTAL]++; \
|
|
151 gc_state.STAT[GC_STAT_IN_THIS_GC]++; \
|
|
152 gc_state.STAT[GC_STAT_IN_THIS_CYCLE]++
|
|
153
|
|
154 # define GC_STAT_ENQUEUED \
|
|
155 if (GC_PHASE == REPUSH_ROOT_SET) \
|
|
156 { \
|
|
157 GC_STAT_TICK (enqueued2); \
|
|
158 } \
|
|
159 else \
|
|
160 { \
|
|
161 GC_STAT_TICK (enqueued); \
|
|
162 }
|
|
163
|
|
164 # define GC_STAT_DEQUEUED \
|
|
165 if (gc_state.phase == REPUSH_ROOT_SET) \
|
|
166 { \
|
|
167 GC_STAT_TICK (dequeued2); \
|
|
168 } \
|
|
169 else \
|
|
170 { \
|
|
171 GC_STAT_TICK (dequeued); \
|
|
172 }
|
|
173 # define GC_STAT_REPUSHED GC_STAT_TICK (repushed)
|
|
174
|
|
175 #define GC_STAT_RESUME(stat) \
|
|
176 gc_state.stat[GC_STAT_IN_LAST_CYCLE] = \
|
|
177 gc_state.stat[GC_STAT_IN_THIS_CYCLE]; \
|
|
178 gc_state.stat[GC_STAT_IN_THIS_CYCLE] = 0
|
|
179
|
|
180 #define GC_STAT_RESTART(stat) \
|
|
181 gc_state.stat[GC_STAT_IN_LAST_GC] = \
|
|
182 gc_state.stat[GC_STAT_IN_THIS_GC]; \
|
|
183 gc_state.stat[GC_STAT_IN_THIS_GC] = 0; \
|
|
184 GC_STAT_RESUME (stat)
|
|
185
|
|
186 void
|
|
187 gc_stat_start_new_gc (void)
|
|
188 {
|
|
189 gc_state.n_gc[GC_STAT_TOTAL]++;
|
|
190 gc_state.n_cycles[GC_STAT_TOTAL]++;
|
|
191 gc_state.n_cycles[GC_STAT_IN_LAST_GC] = gc_state.n_cycles[GC_STAT_IN_THIS_GC];
|
|
192 gc_state.n_cycles[GC_STAT_IN_THIS_GC] = 1;
|
|
193
|
|
194 GC_STAT_RESTART (enqueued);
|
|
195 GC_STAT_RESTART (dequeued);
|
|
196 GC_STAT_RESTART (repushed);
|
|
197 GC_STAT_RESTART (finalized);
|
|
198 GC_STAT_RESTART (enqueued2);
|
|
199 GC_STAT_RESTART (dequeued2);
|
|
200 GC_STAT_RESTART (freed);
|
|
201 }
|
|
202
|
|
203 void
|
|
204 gc_stat_resume_gc (void)
|
|
205 {
|
|
206 gc_state.n_cycles[GC_STAT_TOTAL]++;
|
|
207 gc_state.n_cycles[GC_STAT_IN_THIS_GC]++;
|
|
208 GC_STAT_RESUME (enqueued);
|
|
209 GC_STAT_RESUME (dequeued);
|
|
210 GC_STAT_RESUME (repushed);
|
|
211 GC_STAT_RESUME (finalized);
|
|
212 GC_STAT_RESUME (enqueued2);
|
|
213 GC_STAT_RESUME (dequeued2);
|
|
214 GC_STAT_RESUME (freed);
|
|
215 }
|
|
216
|
|
217 void
|
|
218 gc_stat_finalized (void)
|
|
219 {
|
|
220 GC_STAT_TICK (finalized);
|
|
221 }
|
|
222
|
|
223 void
|
|
224 gc_stat_freed (void)
|
|
225 {
|
|
226 GC_STAT_TICK (freed);
|
|
227 }
|
|
228
|
|
229 void
|
|
230 gc_stat_explicitly_freed (void)
|
|
231 {
|
|
232 gc_state.explicitly_freed++;
|
|
233 }
|
|
234
|
|
235 void
|
|
236 gc_stat_explicitly_tried_freed (void)
|
|
237 {
|
|
238 gc_state.explicitly_tried_freed++;
|
|
239 }
|
|
240
|
|
241 #define GC_STAT_PRINT_ONE(stat) \
|
|
242 printf (" | %9s %10d %10d %10d %10d %10d\n", \
|
|
243 #stat, \
|
|
244 (int) gc_state.stat[GC_STAT_TOTAL], \
|
|
245 (int) gc_state.stat[GC_STAT_IN_LAST_GC], \
|
|
246 (int) gc_state.stat[GC_STAT_IN_THIS_GC], \
|
|
247 (int) gc_state.stat[GC_STAT_IN_LAST_CYCLE], \
|
|
248 (int) gc_state.stat[GC_STAT_IN_THIS_CYCLE])
|
|
249
|
|
250 void
|
|
251 gc_stat_print_stats (void)
|
|
252 {
|
|
253 printf (" | PHASE %d TOTAL_GC %d\n",
|
|
254 (int) GC_PHASE,
|
|
255 (int) gc_state.n_gc[GC_STAT_TOTAL]);
|
|
256 printf (" | %9s %10s %10s %10s %10s %10s\n",
|
|
257 "stat", "total", "last_gc", "this_gc",
|
|
258 "last_cycle", "this_cycle");
|
|
259 printf (" | %9s %10d %10d %10d \n",
|
|
260 "cycle", (int) gc_state.n_cycles[GC_STAT_TOTAL],
|
|
261 (int) gc_state.n_cycles[GC_STAT_IN_LAST_GC],
|
|
262 (int) gc_state.n_cycles[GC_STAT_IN_THIS_GC]);
|
|
263
|
|
264 GC_STAT_PRINT_ONE (enqueued);
|
|
265 GC_STAT_PRINT_ONE (dequeued);
|
|
266 GC_STAT_PRINT_ONE (repushed);
|
|
267 GC_STAT_PRINT_ONE (enqueued2);
|
|
268 GC_STAT_PRINT_ONE (dequeued2);
|
|
269 GC_STAT_PRINT_ONE (finalized);
|
|
270 GC_STAT_PRINT_ONE (freed);
|
|
271
|
|
272 printf (" | explicitly freed %d tried %d\n",
|
|
273 (int) gc_state.explicitly_freed,
|
|
274 (int) gc_state.explicitly_tried_freed);
|
|
275 }
|
|
276
|
|
277 DEFUN("gc-stats", Fgc_stats, 0, 0 ,"", /*
|
|
278 Return statistics about garbage collection cycles in a property list.
|
|
279 */
|
|
280 ())
|
|
281 {
|
|
282 Lisp_Object pl = Qnil;
|
|
283 #define PL(name,value) \
|
|
284 pl = cons3 (intern (name), make_int ((int) gc_state.value), pl)
|
|
285
|
|
286 PL ("explicitly-tried-freed", explicitly_tried_freed);
|
|
287 PL ("explicitly-freed", explicitly_freed);
|
|
288 PL ("freed-in-this-cycle", freed[GC_STAT_IN_THIS_CYCLE]);
|
|
289 PL ("freed-in-this-gc", freed[GC_STAT_IN_THIS_GC]);
|
|
290 PL ("freed-in-last-cycle", freed[GC_STAT_IN_LAST_CYCLE]);
|
|
291 PL ("freed-in-last-gc", freed[GC_STAT_IN_LAST_GC]);
|
|
292 PL ("freed-total", freed[GC_STAT_TOTAL]);
|
|
293 PL ("finalized-in-this-cycle", finalized[GC_STAT_IN_THIS_CYCLE]);
|
|
294 PL ("finalized-in-this-gc", finalized[GC_STAT_IN_THIS_GC]);
|
|
295 PL ("finalized-in-last-cycle", finalized[GC_STAT_IN_LAST_CYCLE]);
|
|
296 PL ("finalized-in-last-gc", finalized[GC_STAT_IN_LAST_GC]);
|
|
297 PL ("finalized-total", finalized[GC_STAT_TOTAL]);
|
|
298 PL ("repushed-in-this-cycle", repushed[GC_STAT_IN_THIS_CYCLE]);
|
|
299 PL ("repushed-in-this-gc", repushed[GC_STAT_IN_THIS_GC]);
|
|
300 PL ("repushed-in-last-cycle", repushed[GC_STAT_IN_LAST_CYCLE]);
|
|
301 PL ("repushed-in-last-gc", repushed[GC_STAT_IN_LAST_GC]);
|
|
302 PL ("repushed-total", repushed[GC_STAT_TOTAL]);
|
|
303 PL ("dequeued2-in-this-cycle", dequeued2[GC_STAT_IN_THIS_CYCLE]);
|
|
304 PL ("dequeued2-in-this-gc", dequeued2[GC_STAT_IN_THIS_GC]);
|
|
305 PL ("dequeued2-in-last-cycle", dequeued2[GC_STAT_IN_LAST_CYCLE]);
|
|
306 PL ("dequeued2-in-last-gc", dequeued2[GC_STAT_IN_LAST_GC]);
|
|
307 PL ("dequeued2-total", dequeued2[GC_STAT_TOTAL]);
|
|
308 PL ("enqueued2-in-this-cycle", enqueued2[GC_STAT_IN_THIS_CYCLE]);
|
|
309 PL ("enqueued2-in-this-gc", enqueued2[GC_STAT_IN_THIS_GC]);
|
|
310 PL ("enqueued2-in-last-cycle", enqueued2[GC_STAT_IN_LAST_CYCLE]);
|
|
311 PL ("enqueued2-in-last-gc", enqueued2[GC_STAT_IN_LAST_GC]);
|
|
312 PL ("enqueued2-total", enqueued2[GC_STAT_TOTAL]);
|
|
313 PL ("dequeued-in-this-cycle", dequeued[GC_STAT_IN_THIS_CYCLE]);
|
|
314 PL ("dequeued-in-this-gc", dequeued[GC_STAT_IN_THIS_GC]);
|
|
315 PL ("dequeued-in-last-cycle", dequeued[GC_STAT_IN_LAST_CYCLE]);
|
|
316 PL ("dequeued-in-last-gc", dequeued[GC_STAT_IN_LAST_GC]);
|
|
317 PL ("dequeued-total", dequeued[GC_STAT_TOTAL]);
|
|
318 PL ("enqueued-in-this-cycle", enqueued[GC_STAT_IN_THIS_CYCLE]);
|
|
319 PL ("enqueued-in-this-gc", enqueued[GC_STAT_IN_THIS_GC]);
|
|
320 PL ("enqueued-in-last-cycle", enqueued[GC_STAT_IN_LAST_CYCLE]);
|
|
321 PL ("enqueued-in-last-gc", enqueued[GC_STAT_IN_LAST_GC]);
|
|
322 PL ("enqueued-total", enqueued[GC_STAT_TOTAL]);
|
|
323 PL ("n-cycles-in-this-gc", n_cycles[GC_STAT_IN_THIS_GC]);
|
|
324 PL ("n-cycles-in-last-gc", n_cycles[GC_STAT_IN_LAST_GC]);
|
|
325 PL ("n-cycles-total", n_cycles[GC_STAT_TOTAL]);
|
|
326 PL ("n-gc-total", n_gc[GC_STAT_TOTAL]);
|
|
327 PL ("phase", phase);
|
|
328 return pl;
|
|
329 }
|
|
330 #else /* not ERROR_CHECK_GC */
|
|
331 # define GC_STAT_START_NEW_GC
|
|
332 # define GC_STAT_RESUME_GC
|
|
333 # define GC_STAT_ENQUEUED
|
|
334 # define GC_STAT_DEQUEUED
|
|
335 # define GC_STAT_REPUSHED
|
|
336 # define GC_STAT_REMOVED
|
|
337 #endif /* not ERROR_CHECK_GC */
|
|
338 #endif /* NEW_GC */
|
|
339
|
|
340
|
|
341 /************************************************************************/
|
|
342 /* Recompute need to garbage collect */
|
|
343 /************************************************************************/
|
|
344
|
|
345 int need_to_garbage_collect;
|
|
346
|
|
347 #ifdef ERROR_CHECK_GC
|
|
348 int always_gc = 0; /* Debugging hack; equivalent to
|
|
349 (setq gc-cons-thresold -1) */
|
|
350 #else
|
|
351 #define always_gc 0
|
|
352 #endif
|
|
353
|
|
354 /* True if it's time to garbage collect now. */
|
|
355 void
|
|
356 recompute_need_to_garbage_collect (void)
|
|
357 {
|
|
358 if (always_gc)
|
|
359 need_to_garbage_collect = 1;
|
|
360 else
|
|
361 need_to_garbage_collect =
|
|
362 #ifdef NEW_GC
|
|
363 write_barrier_enabled ?
|
|
364 (consing_since_gc > gc_cons_incremental_threshold) :
|
|
365 #endif /* NEW_GC */
|
|
366 (consing_since_gc > gc_cons_threshold
|
|
367 &&
|
|
368 #if 0 /* #### implement this better */
|
|
369 (100 * consing_since_gc) / total_data_usage () >=
|
|
370 gc_cons_percentage
|
|
371 #else
|
|
372 (!total_gc_usage_set ||
|
|
373 (100 * consing_since_gc) / total_gc_usage >=
|
|
374 gc_cons_percentage)
|
|
375 #endif
|
|
376 );
|
|
377 recompute_funcall_allocation_flag ();
|
|
378 }
|
|
379
|
|
380
|
|
381
|
|
382 /************************************************************************/
|
|
383 /* Mark Phase */
|
|
384 /************************************************************************/
|
|
385
|
|
386 static const struct memory_description lisp_object_description_1[] = {
|
|
387 { XD_LISP_OBJECT, 0 },
|
|
388 { XD_END }
|
|
389 };
|
|
390
|
|
391 const struct sized_memory_description lisp_object_description = {
|
|
392 sizeof (Lisp_Object),
|
|
393 lisp_object_description_1
|
|
394 };
|
|
395
|
|
396 #if defined (USE_KKCC) || defined (PDUMP)
|
|
397
|
|
398 /* This function extracts the value of a count variable described somewhere
|
|
399 else in the description. It is converted corresponding to the type */
|
|
400 EMACS_INT
|
|
401 lispdesc_indirect_count_1 (EMACS_INT code,
|
|
402 const struct memory_description *idesc,
|
|
403 const void *idata)
|
|
404 {
|
|
405 EMACS_INT count;
|
|
406 const void *irdata;
|
|
407
|
|
408 int line = XD_INDIRECT_VAL (code);
|
|
409 int delta = XD_INDIRECT_DELTA (code);
|
|
410
|
|
411 irdata = ((char *) idata) +
|
|
412 lispdesc_indirect_count (idesc[line].offset, idesc, idata);
|
|
413 switch (idesc[line].type)
|
|
414 {
|
|
415 case XD_BYTECOUNT:
|
|
416 count = * (Bytecount *) irdata;
|
|
417 break;
|
|
418 case XD_ELEMCOUNT:
|
|
419 count = * (Elemcount *) irdata;
|
|
420 break;
|
|
421 case XD_HASHCODE:
|
|
422 count = * (Hashcode *) irdata;
|
|
423 break;
|
|
424 case XD_INT:
|
|
425 count = * (int *) irdata;
|
|
426 break;
|
|
427 case XD_LONG:
|
|
428 count = * (long *) irdata;
|
|
429 break;
|
|
430 default:
|
|
431 stderr_out ("Unsupported count type : %d (line = %d, code = %ld)\n",
|
|
432 idesc[line].type, line, (long) code);
|
|
433 #if defined(USE_KKCC) && defined(DEBUG_XEMACS)
|
|
434 if (gc_in_progress)
|
|
435 kkcc_backtrace ();
|
|
436 #endif
|
|
437 #ifdef PDUMP
|
|
438 if (in_pdump)
|
|
439 pdump_backtrace ();
|
|
440 #endif
|
|
441 count = 0; /* warning suppression */
|
|
442 ABORT ();
|
|
443 }
|
|
444 count += delta;
|
|
445 return count;
|
|
446 }
|
|
447
|
|
448 /* SDESC is a "description map" (basically, a list of offsets used for
|
|
449 successive indirections) and OBJ is the first object to indirect off of.
|
|
450 Return the description ultimately found. */
|
|
451
|
|
452 const struct sized_memory_description *
|
|
453 lispdesc_indirect_description_1 (const void *obj,
|
|
454 const struct sized_memory_description *sdesc)
|
|
455 {
|
|
456 int pos;
|
|
457
|
|
458 for (pos = 0; sdesc[pos].size >= 0; pos++)
|
|
459 obj = * (const void **) ((const char *) obj + sdesc[pos].size);
|
|
460
|
|
461 return (const struct sized_memory_description *) obj;
|
|
462 }
|
|
463
|
|
464 /* Compute the size of the data at RDATA, described by a single entry
|
|
465 DESC1 in a description array. OBJ and DESC are used for
|
|
466 XD_INDIRECT references. */
|
|
467
|
|
468 static Bytecount
|
|
469 lispdesc_one_description_line_size (void *rdata,
|
|
470 const struct memory_description *desc1,
|
|
471 const void *obj,
|
|
472 const struct memory_description *desc)
|
|
473 {
|
|
474 union_switcheroo:
|
|
475 switch (desc1->type)
|
|
476 {
|
|
477 case XD_LISP_OBJECT_ARRAY:
|
|
478 {
|
|
479 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
480 return (val * sizeof (Lisp_Object));
|
|
481 }
|
|
482 case XD_LISP_OBJECT:
|
|
483 case XD_LO_LINK:
|
|
484 return sizeof (Lisp_Object);
|
|
485 case XD_OPAQUE_PTR:
|
|
486 return sizeof (void *);
|
|
487 #ifdef NEW_GC
|
|
488 case XD_LISP_OBJECT_BLOCK_PTR:
|
|
489 #endif /* NEW_GC */
|
|
490 case XD_BLOCK_PTR:
|
|
491 {
|
|
492 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
493 return val * sizeof (void *);
|
|
494 }
|
|
495 case XD_BLOCK_ARRAY:
|
|
496 {
|
|
497 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
498
|
|
499 return (val *
|
|
500 lispdesc_block_size
|
|
501 (rdata,
|
|
502 lispdesc_indirect_description (obj, desc1->data2.descr)));
|
|
503 }
|
|
504 case XD_OPAQUE_DATA_PTR:
|
|
505 return sizeof (void *);
|
|
506 case XD_UNION_DYNAMIC_SIZE:
|
|
507 {
|
|
508 /* If an explicit size was given in the first-level structure
|
|
509 description, use it; else compute size based on current union
|
|
510 constant. */
|
|
511 const struct sized_memory_description *sdesc =
|
|
512 lispdesc_indirect_description (obj, desc1->data2.descr);
|
|
513 if (sdesc->size)
|
|
514 return sdesc->size;
|
|
515 else
|
|
516 {
|
|
517 desc1 = lispdesc_process_xd_union (desc1, desc, obj);
|
|
518 if (desc1)
|
|
519 goto union_switcheroo;
|
|
520 break;
|
|
521 }
|
|
522 }
|
|
523 case XD_UNION:
|
|
524 {
|
|
525 /* If an explicit size was given in the first-level structure
|
|
526 description, use it; else compute size based on maximum of all
|
|
527 possible structures. */
|
|
528 const struct sized_memory_description *sdesc =
|
|
529 lispdesc_indirect_description (obj, desc1->data2.descr);
|
|
530 if (sdesc->size)
|
|
531 return sdesc->size;
|
|
532 else
|
|
533 {
|
|
534 int count;
|
|
535 Bytecount max_size = -1, size;
|
|
536
|
|
537 desc1 = sdesc->description;
|
|
538
|
|
539 for (count = 0; desc1[count].type != XD_END; count++)
|
|
540 {
|
|
541 size = lispdesc_one_description_line_size (rdata,
|
|
542 &desc1[count],
|
|
543 obj, desc);
|
|
544 if (size > max_size)
|
|
545 max_size = size;
|
|
546 }
|
|
547 return max_size;
|
|
548 }
|
|
549 }
|
|
550 case XD_ASCII_STRING:
|
|
551 return sizeof (void *);
|
|
552 case XD_DOC_STRING:
|
|
553 return sizeof (void *);
|
|
554 case XD_INT_RESET:
|
|
555 return sizeof (int);
|
|
556 case XD_BYTECOUNT:
|
|
557 return sizeof (Bytecount);
|
|
558 case XD_ELEMCOUNT:
|
|
559 return sizeof (Elemcount);
|
|
560 case XD_HASHCODE:
|
|
561 return sizeof (Hashcode);
|
|
562 case XD_INT:
|
|
563 return sizeof (int);
|
|
564 case XD_LONG:
|
|
565 return sizeof (long);
|
|
566 default:
|
|
567 stderr_out ("Unsupported dump type : %d\n", desc1->type);
|
|
568 ABORT ();
|
|
569 }
|
|
570
|
|
571 return 0;
|
|
572 }
|
|
573
|
|
574
|
|
575 /* Return the size of the memory block (NOT necessarily a structure!)
|
|
576 described by SDESC and pointed to by OBJ. If SDESC records an
|
|
577 explicit size (i.e. non-zero), it is simply returned; otherwise,
|
|
578 the size is calculated by the maximum offset and the size of the
|
|
579 object at that offset, rounded up to the maximum alignment. In
|
|
580 this case, we may need the object, for example when retrieving an
|
|
581 "indirect count" of an inlined array (the count is not constant,
|
|
582 but is specified by one of the elements of the memory block). (It
|
|
583 is generally not a problem if we return an overly large size -- we
|
|
584 will simply end up reserving more space than necessary; but if the
|
|
585 size is too small we could be in serious trouble, in particular
|
|
586 with nested inlined structures, where there may be alignment
|
|
587 padding in the middle of a block. #### In fact there is an (at
|
|
588 least theoretical) problem with an overly large size -- we may
|
|
589 trigger a protection fault when reading from invalid memory. We
|
|
590 need to handle this -- perhaps in a stupid but dependable way,
|
|
591 i.e. by trapping SIGSEGV and SIGBUS.) */
|
|
592
|
|
593 Bytecount
|
|
594 lispdesc_block_size_1 (const void *obj, Bytecount size,
|
|
595 const struct memory_description *desc)
|
|
596 {
|
|
597 EMACS_INT max_offset = -1;
|
|
598 int max_offset_pos = -1;
|
|
599 int pos;
|
|
600
|
|
601 if (size)
|
|
602 return size;
|
|
603
|
|
604 for (pos = 0; desc[pos].type != XD_END; pos++)
|
|
605 {
|
|
606 EMACS_INT offset = lispdesc_indirect_count (desc[pos].offset, desc, obj);
|
|
607 if (offset == max_offset)
|
|
608 {
|
|
609 stderr_out ("Two relocatable elements at same offset?\n");
|
|
610 ABORT ();
|
|
611 }
|
|
612 else if (offset > max_offset)
|
|
613 {
|
|
614 max_offset = offset;
|
|
615 max_offset_pos = pos;
|
|
616 }
|
|
617 }
|
|
618
|
|
619 if (max_offset_pos < 0)
|
|
620 return 0;
|
|
621
|
|
622 {
|
|
623 Bytecount size_at_max;
|
|
624 size_at_max =
|
|
625 lispdesc_one_description_line_size ((char *) obj + max_offset,
|
|
626 &desc[max_offset_pos], obj, desc);
|
|
627
|
|
628 /* We have no way of knowing the required alignment for this structure,
|
|
629 so just make it maximally aligned. */
|
|
630 return MAX_ALIGN_SIZE (max_offset + size_at_max);
|
|
631 }
|
|
632 }
|
|
633 #endif /* defined (USE_KKCC) || defined (PDUMP) */
|
|
634
|
|
635 #ifdef MC_ALLOC
|
|
636 #define GC_CHECK_NOT_FREE(lheader) \
|
|
637 gc_checking_assert (! LRECORD_FREE_P (lheader));
|
|
638 #else /* MC_ALLOC */
|
|
639 #define GC_CHECK_NOT_FREE(lheader) \
|
|
640 gc_checking_assert (! LRECORD_FREE_P (lheader)); \
|
|
641 gc_checking_assert (LHEADER_IMPLEMENTATION (lheader)->basic_p || \
|
|
642 ! ((struct old_lcrecord_header *) lheader)->free)
|
|
643 #endif /* MC_ALLOC */
|
|
644
|
|
645 #ifdef USE_KKCC
|
|
646 /* The following functions implement the new mark algorithm.
|
|
647 They mark objects according to their descriptions. They
|
|
648 are modeled on the corresponding pdumper procedures. */
|
|
649
|
|
650 #if 0
|
|
651 # define KKCC_STACK_AS_QUEUE 1
|
|
652 #endif
|
|
653
|
|
654 #ifdef DEBUG_XEMACS
|
|
655 /* The backtrace for the KKCC mark functions. */
|
|
656 #define KKCC_INIT_BT_STACK_SIZE 4096
|
|
657
|
|
658 typedef struct
|
|
659 {
|
|
660 void *obj;
|
|
661 const struct memory_description *desc;
|
|
662 int pos;
|
|
663 } kkcc_bt_stack_entry;
|
|
664
|
|
665 static kkcc_bt_stack_entry *kkcc_bt;
|
|
666 static int kkcc_bt_stack_size;
|
|
667 static int kkcc_bt_depth = 0;
|
|
668
|
|
669 static void
|
|
670 kkcc_bt_init (void)
|
|
671 {
|
|
672 kkcc_bt_depth = 0;
|
|
673 kkcc_bt_stack_size = KKCC_INIT_BT_STACK_SIZE;
|
|
674 kkcc_bt = (kkcc_bt_stack_entry *)
|
|
675 xmalloc_and_zero (kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
|
|
676 if (!kkcc_bt)
|
|
677 {
|
|
678 stderr_out ("KKCC backtrace stack init failed for size %d\n",
|
|
679 kkcc_bt_stack_size);
|
|
680 ABORT ();
|
|
681 }
|
|
682 }
|
|
683
|
|
684 void
|
|
685 kkcc_backtrace (void)
|
|
686 {
|
|
687 int i;
|
|
688 stderr_out ("KKCC mark stack backtrace :\n");
|
|
689 for (i = kkcc_bt_depth - 1; i >= 0; i--)
|
|
690 {
|
|
691 Lisp_Object obj = wrap_pointer_1 (kkcc_bt[i].obj);
|
|
692 stderr_out (" [%d]", i);
|
|
693 if ((XRECORD_LHEADER (obj)->type >= lrecord_type_last_built_in_type)
|
|
694 || (!LRECORDP (obj))
|
|
695 || (!XRECORD_LHEADER_IMPLEMENTATION (obj)))
|
|
696 {
|
|
697 stderr_out (" non Lisp Object");
|
|
698 }
|
|
699 else
|
|
700 {
|
|
701 stderr_out (" %s",
|
|
702 XRECORD_LHEADER_IMPLEMENTATION (obj)->name);
|
|
703 }
|
|
704 stderr_out (" (addr: 0x%x, desc: 0x%x, ",
|
|
705 (int) kkcc_bt[i].obj,
|
|
706 (int) kkcc_bt[i].desc);
|
|
707 if (kkcc_bt[i].pos >= 0)
|
|
708 stderr_out ("pos: %d)\n", kkcc_bt[i].pos);
|
|
709 else
|
|
710 if (kkcc_bt[i].pos == -1)
|
|
711 stderr_out ("root set)\n");
|
|
712 else if (kkcc_bt[i].pos == -2)
|
|
713 stderr_out ("dirty object)\n");
|
|
714 }
|
|
715 }
|
|
716
|
|
717 static void
|
|
718 kkcc_bt_stack_realloc (void)
|
|
719 {
|
|
720 kkcc_bt_stack_size *= 2;
|
|
721 kkcc_bt = (kkcc_bt_stack_entry *)
|
|
722 xrealloc (kkcc_bt, kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
|
|
723 if (!kkcc_bt)
|
|
724 {
|
|
725 stderr_out ("KKCC backtrace stack realloc failed for size %d\n",
|
|
726 kkcc_bt_stack_size);
|
|
727 ABORT ();
|
|
728 }
|
|
729 }
|
|
730
|
|
731 static void
|
|
732 kkcc_bt_free (void)
|
|
733 {
|
|
734 xfree_1 (kkcc_bt);
|
|
735 kkcc_bt = 0;
|
|
736 kkcc_bt_stack_size = 0;
|
|
737 }
|
|
738
|
|
739 static void
|
|
740 kkcc_bt_push (void *obj, const struct memory_description *desc,
|
|
741 int level, int pos)
|
|
742 {
|
|
743 kkcc_bt_depth = level;
|
|
744 kkcc_bt[kkcc_bt_depth].obj = obj;
|
|
745 kkcc_bt[kkcc_bt_depth].desc = desc;
|
|
746 kkcc_bt[kkcc_bt_depth].pos = pos;
|
|
747 kkcc_bt_depth++;
|
|
748 if (kkcc_bt_depth >= kkcc_bt_stack_size)
|
|
749 kkcc_bt_stack_realloc ();
|
|
750 }
|
|
751
|
|
752 #else /* not DEBUG_XEMACS */
|
|
753 #define kkcc_bt_init()
|
|
754 #define kkcc_bt_push(obj, desc, level, pos)
|
|
755 #endif /* not DEBUG_XEMACS */
|
|
756
|
|
757 /* Object memory descriptions are in the lrecord_implementation structure.
|
|
758 But copying them to a parallel array is much more cache-friendly. */
|
|
759 const struct memory_description *lrecord_memory_descriptions[countof (lrecord_implementations_table)];
|
|
760
|
|
761 /* the initial stack size in kkcc_gc_stack_entries */
|
|
762 #define KKCC_INIT_GC_STACK_SIZE 16384
|
|
763
|
|
764 typedef struct
|
|
765 {
|
|
766 void *data;
|
|
767 const struct memory_description *desc;
|
|
768 #ifdef DEBUG_XEMACS
|
|
769 int level;
|
|
770 int pos;
|
|
771 #endif
|
|
772 } kkcc_gc_stack_entry;
|
|
773
|
|
774
|
|
775 static kkcc_gc_stack_entry *kkcc_gc_stack_ptr;
|
|
776 static int kkcc_gc_stack_front;
|
|
777 static int kkcc_gc_stack_rear;
|
|
778 static int kkcc_gc_stack_size;
|
|
779
|
|
780 #define KKCC_INC(i) ((i + 1) % kkcc_gc_stack_size)
|
|
781 #define KKCC_INC2(i) ((i + 2) % kkcc_gc_stack_size)
|
|
782
|
|
783 #define KKCC_GC_STACK_FULL (KKCC_INC2 (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
|
|
784 #define KKCC_GC_STACK_EMPTY (KKCC_INC (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
|
|
785
|
|
786 static void
|
|
787 kkcc_gc_stack_init (void)
|
|
788 {
|
|
789 kkcc_gc_stack_size = KKCC_INIT_GC_STACK_SIZE;
|
|
790 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
|
|
791 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
|
|
792 if (!kkcc_gc_stack_ptr)
|
|
793 {
|
|
794 stderr_out ("stack init failed for size %d\n", kkcc_gc_stack_size);
|
|
795 ABORT ();
|
|
796 }
|
|
797 kkcc_gc_stack_front = 0;
|
|
798 kkcc_gc_stack_rear = kkcc_gc_stack_size - 1;
|
|
799 }
|
|
800
|
|
801 static void
|
|
802 kkcc_gc_stack_free (void)
|
|
803 {
|
|
804 xfree_1 (kkcc_gc_stack_ptr);
|
|
805 kkcc_gc_stack_ptr = 0;
|
|
806 kkcc_gc_stack_front = 0;
|
|
807 kkcc_gc_stack_rear = 0;
|
|
808 kkcc_gc_stack_size = 0;
|
|
809 }
|
|
810
|
|
811 static void
|
|
812 kkcc_gc_stack_realloc (void)
|
|
813 {
|
|
814 kkcc_gc_stack_entry *old_ptr = kkcc_gc_stack_ptr;
|
|
815 int old_size = kkcc_gc_stack_size;
|
|
816 kkcc_gc_stack_size *= 2;
|
|
817 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
|
|
818 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
|
|
819 if (!kkcc_gc_stack_ptr)
|
|
820 {
|
|
821 stderr_out ("stack realloc failed for size %d\n", kkcc_gc_stack_size);
|
|
822 ABORT ();
|
|
823 }
|
|
824 if (kkcc_gc_stack_rear >= kkcc_gc_stack_front)
|
|
825 {
|
|
826 int number_elements = kkcc_gc_stack_rear - kkcc_gc_stack_front + 1;
|
|
827 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
|
|
828 number_elements * sizeof (kkcc_gc_stack_entry));
|
|
829 kkcc_gc_stack_front = 0;
|
|
830 kkcc_gc_stack_rear = number_elements - 1;
|
|
831 }
|
|
832 else
|
|
833 {
|
|
834 int number_elements = old_size - kkcc_gc_stack_front;
|
|
835 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
|
|
836 number_elements * sizeof (kkcc_gc_stack_entry));
|
|
837 memcpy (&kkcc_gc_stack_ptr[number_elements], &old_ptr[0],
|
|
838 (kkcc_gc_stack_rear + 1) * sizeof (kkcc_gc_stack_entry));
|
|
839 kkcc_gc_stack_front = 0;
|
|
840 kkcc_gc_stack_rear = kkcc_gc_stack_rear + number_elements;
|
|
841 }
|
|
842 xfree_1 (old_ptr);
|
|
843 }
|
|
844
|
|
845 static void
|
|
846 #ifdef DEBUG_XEMACS
|
|
847 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc,
|
|
848 int level, int pos)
|
|
849 #else
|
|
850 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc)
|
|
851 #endif
|
|
852 {
|
|
853 #ifdef NEW_GC
|
|
854 GC_STAT_ENQUEUED;
|
|
855 #endif /* NEW_GC */
|
|
856 if (KKCC_GC_STACK_FULL)
|
|
857 kkcc_gc_stack_realloc();
|
|
858 kkcc_gc_stack_rear = KKCC_INC (kkcc_gc_stack_rear);
|
|
859 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].data = data;
|
|
860 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].desc = desc;
|
|
861 #ifdef DEBUG_XEMACS
|
|
862 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].level = level;
|
|
863 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].pos = pos;
|
|
864 #endif
|
|
865 }
|
|
866
|
|
867 #ifdef DEBUG_XEMACS
|
|
868 #define kkcc_gc_stack_push(data, desc, level, pos) \
|
|
869 kkcc_gc_stack_push_1 (data, desc, level, pos)
|
|
870 #else
|
|
871 #define kkcc_gc_stack_push(data, desc, level, pos) \
|
|
872 kkcc_gc_stack_push_1 (data, desc)
|
|
873 #endif
|
|
874
|
|
875 static kkcc_gc_stack_entry *
|
|
876 kkcc_gc_stack_pop (void)
|
|
877 {
|
|
878 if (KKCC_GC_STACK_EMPTY)
|
|
879 return 0;
|
|
880 #ifdef NEW_GC
|
|
881 GC_STAT_DEQUEUED;
|
|
882 #endif /* NEW_GC */
|
|
883 #ifndef KKCC_STACK_AS_QUEUE
|
|
884 /* stack behaviour */
|
|
885 return &kkcc_gc_stack_ptr[kkcc_gc_stack_rear--];
|
|
886 #else
|
|
887 /* queue behaviour */
|
|
888 {
|
|
889 int old_front = kkcc_gc_stack_front;
|
|
890 kkcc_gc_stack_front = KKCC_INC (kkcc_gc_stack_front);
|
|
891 return &kkcc_gc_stack_ptr[old_front];
|
|
892 }
|
|
893 #endif
|
|
894 }
|
|
895
|
|
896 void
|
|
897 #ifdef DEBUG_XEMACS
|
|
898 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj, int level, int pos)
|
|
899 #else
|
|
900 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj)
|
|
901 #endif
|
|
902 {
|
|
903 if (XTYPE (obj) == Lisp_Type_Record)
|
|
904 {
|
|
905 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
906 const struct memory_description *desc;
|
|
907 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
908 desc = RECORD_DESCRIPTION (lheader);
|
|
909 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
910 {
|
|
911 #ifdef NEW_GC
|
|
912 MARK_GREY (lheader);
|
|
913 #else /* not NEW_GC */
|
|
914 MARK_RECORD_HEADER (lheader);
|
|
915 #endif /* not NEW_GC */
|
|
916 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
|
|
917 }
|
|
918 }
|
|
919 }
|
|
920
|
|
921 #ifdef NEW_GC
|
|
922 #ifdef DEBUG_XEMACS
|
|
923 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
|
|
924 kkcc_gc_stack_push_lisp_object_1 (obj, level, pos)
|
|
925 #else
|
|
926 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
|
|
927 kkcc_gc_stack_push_lisp_object_1 (obj)
|
|
928 #endif
|
|
929
|
|
930 void
|
|
931 #ifdef DEBUG_XEMACS
|
|
932 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj, int level, int pos)
|
|
933 #else
|
|
934 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj)
|
|
935 #endif
|
|
936 {
|
|
937 if (XTYPE (obj) == Lisp_Type_Record)
|
|
938 {
|
|
939 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
940 const struct memory_description *desc;
|
|
941 GC_STAT_REPUSHED;
|
|
942 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
943 desc = RECORD_DESCRIPTION (lheader);
|
|
944 MARK_GREY (lheader);
|
|
945 kkcc_gc_stack_push ((void*) lheader, desc, level, pos);
|
|
946 }
|
|
947 }
|
|
948 #endif /* NEW_GC */
|
|
949
|
|
950 #ifdef ERROR_CHECK_GC
|
|
951 #define KKCC_DO_CHECK_FREE(obj, allow_free) \
|
|
952 do \
|
|
953 { \
|
|
954 if (!allow_free && XTYPE (obj) == Lisp_Type_Record) \
|
|
955 { \
|
|
956 struct lrecord_header *lheader = XRECORD_LHEADER (obj); \
|
|
957 GC_CHECK_NOT_FREE (lheader); \
|
|
958 } \
|
|
959 } while (0)
|
|
960 #else
|
|
961 #define KKCC_DO_CHECK_FREE(obj, allow_free)
|
|
962 #endif
|
|
963
|
|
964 #ifdef ERROR_CHECK_GC
|
|
965 #ifdef DEBUG_XEMACS
|
|
966 static void
|
|
967 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free,
|
|
968 int level, int pos)
|
|
969 #else
|
|
970 static void
|
|
971 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free)
|
|
972 #endif
|
|
973 {
|
|
974 KKCC_DO_CHECK_FREE (obj, allow_free);
|
|
975 kkcc_gc_stack_push_lisp_object (obj, level, pos);
|
|
976 }
|
|
977
|
|
978 #ifdef DEBUG_XEMACS
|
|
979 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
980 mark_object_maybe_checking_free_1 (obj, allow_free, level, pos)
|
|
981 #else
|
|
982 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
983 mark_object_maybe_checking_free_1 (obj, allow_free)
|
|
984 #endif
|
|
985 #else /* not ERROR_CHECK_GC */
|
|
986 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
987 kkcc_gc_stack_push_lisp_object (obj, level, pos)
|
|
988 #endif /* not ERROR_CHECK_GC */
|
|
989
|
|
990
|
|
991 /* This function loops all elements of a struct pointer and calls
|
|
992 mark_with_description with each element. */
|
|
993 static void
|
|
994 #ifdef DEBUG_XEMACS
|
|
995 mark_struct_contents_1 (const void *data,
|
|
996 const struct sized_memory_description *sdesc,
|
|
997 int count, int level, int pos)
|
|
998 #else
|
|
999 mark_struct_contents_1 (const void *data,
|
|
1000 const struct sized_memory_description *sdesc,
|
|
1001 int count)
|
|
1002 #endif
|
|
1003 {
|
|
1004 int i;
|
|
1005 Bytecount elsize;
|
|
1006 elsize = lispdesc_block_size (data, sdesc);
|
|
1007
|
|
1008 for (i = 0; i < count; i++)
|
|
1009 {
|
|
1010 kkcc_gc_stack_push (((char *) data) + elsize * i, sdesc->description,
|
|
1011 level, pos);
|
|
1012 }
|
|
1013 }
|
|
1014
|
|
1015 #ifdef DEBUG_XEMACS
|
|
1016 #define mark_struct_contents(data, sdesc, count, level, pos) \
|
|
1017 mark_struct_contents_1 (data, sdesc, count, level, pos)
|
|
1018 #else
|
|
1019 #define mark_struct_contents(data, sdesc, count, level, pos) \
|
|
1020 mark_struct_contents_1 (data, sdesc, count)
|
|
1021 #endif
|
|
1022
|
|
1023
|
|
1024 #ifdef NEW_GC
|
|
1025 /* This function loops all elements of a struct pointer and calls
|
|
1026 mark_with_description with each element. */
|
|
1027 static void
|
|
1028 #ifdef DEBUG_XEMACS
|
|
1029 mark_lisp_object_block_contents_1 (const void *data,
|
|
1030 const struct sized_memory_description *sdesc,
|
|
1031 int count, int level, int pos)
|
|
1032 #else
|
|
1033 mark_lisp_object_block_contents_1 (const void *data,
|
|
1034 const struct sized_memory_description *sdesc,
|
|
1035 int count)
|
|
1036 #endif
|
|
1037 {
|
|
1038 int i;
|
|
1039 Bytecount elsize;
|
|
1040 elsize = lispdesc_block_size (data, sdesc);
|
|
1041
|
|
1042 for (i = 0; i < count; i++)
|
|
1043 {
|
|
1044 const Lisp_Object obj = wrap_pointer_1 (((char *) data) + elsize * i);
|
|
1045 if (XTYPE (obj) == Lisp_Type_Record)
|
|
1046 {
|
|
1047 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
1048 const struct memory_description *desc;
|
|
1049 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
1050 desc = sdesc->description;
|
|
1051 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
1052 {
|
|
1053 MARK_GREY (lheader);
|
|
1054 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
|
|
1055 }
|
|
1056 }
|
|
1057 }
|
|
1058 }
|
|
1059
|
|
1060 #ifdef DEBUG_XEMACS
|
|
1061 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
|
|
1062 mark_lisp_object_block_contents_1 (data, sdesc, count, level, pos)
|
|
1063 #else
|
|
1064 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
|
|
1065 mark_lisp_object_block_contents_1 (data, sdesc, count)
|
|
1066 #endif
|
|
1067 #endif /* not NEW_GC */
|
|
1068
|
|
1069 /* This function implements the KKCC mark algorithm.
|
|
1070 Instead of calling mark_object, all the alive Lisp_Objects are pushed
|
|
1071 on the kkcc_gc_stack. This function processes all elements on the stack
|
|
1072 according to their descriptions. */
|
|
1073 static void
|
|
1074 kkcc_marking (
|
|
1075 #ifdef NEW_GC
|
|
1076 int cnt
|
|
1077 #else /* not NEW_GC */
|
|
1078 int UNUSED(cnt)
|
|
1079 #endif /* not NEW_GC */
|
|
1080 )
|
|
1081 {
|
|
1082 kkcc_gc_stack_entry *stack_entry = 0;
|
|
1083 void *data = 0;
|
|
1084 const struct memory_description *desc = 0;
|
|
1085 int pos;
|
|
1086 #ifdef NEW_GC
|
|
1087 int count = cnt;
|
|
1088 #endif /* NEW_GC */
|
|
1089 #ifdef DEBUG_XEMACS
|
|
1090 int level = 0;
|
|
1091 #endif
|
|
1092
|
|
1093 while ((stack_entry = kkcc_gc_stack_pop ()) != 0)
|
|
1094 {
|
|
1095 data = stack_entry->data;
|
|
1096 desc = stack_entry->desc;
|
|
1097 #ifdef DEBUG_XEMACS
|
|
1098 level = stack_entry->level + 1;
|
|
1099 #endif
|
|
1100 kkcc_bt_push (data, desc, stack_entry->level, stack_entry->pos);
|
|
1101
|
|
1102 #ifdef NEW_GC
|
|
1103 /* Mark black if object is currently grey. This first checks,
|
|
1104 if the object is really allocated on the mc-heap. If it is,
|
|
1105 it can be marked black; if it is not, it cannot be marked. */
|
|
1106 maybe_mark_black (data);
|
|
1107 #endif /* NEW_GC */
|
|
1108
|
|
1109 if (!data) continue;
|
|
1110
|
|
1111 gc_checking_assert (data);
|
|
1112 gc_checking_assert (desc);
|
|
1113
|
|
1114 for (pos = 0; desc[pos].type != XD_END; pos++)
|
|
1115 {
|
|
1116 const struct memory_description *desc1 = &desc[pos];
|
|
1117 const void *rdata =
|
|
1118 (const char *) data + lispdesc_indirect_count (desc1->offset,
|
|
1119 desc, data);
|
|
1120 union_switcheroo:
|
|
1121
|
|
1122 /* If the flag says don't mark, then don't mark. */
|
|
1123 if ((desc1->flags) & XD_FLAG_NO_KKCC)
|
|
1124 continue;
|
|
1125
|
|
1126 switch (desc1->type)
|
|
1127 {
|
|
1128 case XD_BYTECOUNT:
|
|
1129 case XD_ELEMCOUNT:
|
|
1130 case XD_HASHCODE:
|
|
1131 case XD_INT:
|
|
1132 case XD_LONG:
|
|
1133 case XD_INT_RESET:
|
|
1134 case XD_LO_LINK:
|
|
1135 case XD_OPAQUE_PTR:
|
|
1136 case XD_OPAQUE_DATA_PTR:
|
|
1137 case XD_ASCII_STRING:
|
|
1138 case XD_DOC_STRING:
|
|
1139 break;
|
|
1140 case XD_LISP_OBJECT:
|
|
1141 {
|
|
1142 const Lisp_Object *stored_obj = (const Lisp_Object *) rdata;
|
|
1143
|
|
1144 /* Because of the way that tagged objects work (pointers and
|
|
1145 Lisp_Objects have the same representation), XD_LISP_OBJECT
|
|
1146 can be used for untagged pointers. They might be NULL,
|
|
1147 though. */
|
|
1148 if (EQ (*stored_obj, Qnull_pointer))
|
|
1149 break;
|
|
1150 #ifdef MC_ALLOC
|
|
1151 mark_object_maybe_checking_free (*stored_obj, 0, level, pos);
|
|
1152 #else /* not MC_ALLOC */
|
|
1153 mark_object_maybe_checking_free
|
|
1154 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
|
|
1155 level, pos);
|
|
1156 #endif /* not MC_ALLOC */
|
|
1157 break;
|
|
1158 }
|
|
1159 case XD_LISP_OBJECT_ARRAY:
|
|
1160 {
|
|
1161 int i;
|
|
1162 EMACS_INT count =
|
|
1163 lispdesc_indirect_count (desc1->data1, desc, data);
|
|
1164
|
|
1165 for (i = 0; i < count; i++)
|
|
1166 {
|
|
1167 const Lisp_Object *stored_obj =
|
|
1168 (const Lisp_Object *) rdata + i;
|
|
1169
|
|
1170 if (EQ (*stored_obj, Qnull_pointer))
|
|
1171 break;
|
|
1172 #ifdef MC_ALLOC
|
|
1173 mark_object_maybe_checking_free
|
|
1174 (*stored_obj, 0, level, pos);
|
|
1175 #else /* not MC_ALLOC */
|
|
1176 mark_object_maybe_checking_free
|
|
1177 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
|
|
1178 level, pos);
|
|
1179 #endif /* not MC_ALLOC */
|
|
1180 }
|
|
1181 break;
|
|
1182 }
|
|
1183 #ifdef NEW_GC
|
|
1184 case XD_LISP_OBJECT_BLOCK_PTR:
|
|
1185 {
|
|
1186 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1187 data);
|
|
1188 const struct sized_memory_description *sdesc =
|
|
1189 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1190 const char *dobj = * (const char **) rdata;
|
|
1191 if (dobj)
|
|
1192 mark_lisp_object_block_contents
|
|
1193 (dobj, sdesc, count, level, pos);
|
|
1194 break;
|
|
1195 }
|
|
1196 #endif /* NEW_GC */
|
|
1197 case XD_BLOCK_PTR:
|
|
1198 {
|
|
1199 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1200 data);
|
|
1201 const struct sized_memory_description *sdesc =
|
|
1202 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1203 const char *dobj = * (const char **) rdata;
|
|
1204 if (dobj)
|
|
1205 mark_struct_contents (dobj, sdesc, count, level, pos);
|
|
1206 break;
|
|
1207 }
|
|
1208 case XD_BLOCK_ARRAY:
|
|
1209 {
|
|
1210 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1211 data);
|
|
1212 const struct sized_memory_description *sdesc =
|
|
1213 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1214
|
|
1215 mark_struct_contents (rdata, sdesc, count, level, pos);
|
|
1216 break;
|
|
1217 }
|
|
1218 case XD_UNION:
|
|
1219 case XD_UNION_DYNAMIC_SIZE:
|
|
1220 desc1 = lispdesc_process_xd_union (desc1, desc, data);
|
|
1221 if (desc1)
|
|
1222 goto union_switcheroo;
|
|
1223 break;
|
|
1224
|
|
1225 default:
|
|
1226 stderr_out ("Unsupported description type : %d\n", desc1->type);
|
|
1227 kkcc_backtrace ();
|
|
1228 ABORT ();
|
|
1229 }
|
|
1230 }
|
|
1231
|
|
1232 #ifdef NEW_GC
|
|
1233 if (cnt)
|
|
1234 if (!--count)
|
|
1235 break;
|
|
1236 #endif /* NEW_GC */
|
|
1237 }
|
|
1238 }
|
|
1239 #endif /* USE_KKCC */
|
|
1240
|
|
1241 /* I hate duplicating all this crap! */
|
|
1242 int
|
|
1243 marked_p (Lisp_Object obj)
|
|
1244 {
|
|
1245 /* Checks we used to perform. */
|
|
1246 /* if (EQ (obj, Qnull_pointer)) return 1; */
|
|
1247 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return 1; */
|
|
1248 /* if (PURIFIED (XPNTR (obj))) return 1; */
|
|
1249
|
|
1250 if (XTYPE (obj) == Lisp_Type_Record)
|
|
1251 {
|
|
1252 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
1253
|
|
1254 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
1255
|
|
1256 return MARKED_RECORD_HEADER_P (lheader);
|
|
1257 }
|
|
1258 return 1;
|
|
1259 }
|
|
1260
|
|
1261
|
|
1262 /* Mark reference to a Lisp_Object. If the object referred to has not been
|
|
1263 seen yet, recursively mark all the references contained in it. */
|
|
1264 void
|
|
1265 mark_object (
|
|
1266 #ifdef USE_KKCC
|
|
1267 Lisp_Object UNUSED (obj)
|
|
1268 #else
|
|
1269 Lisp_Object obj
|
|
1270 #endif
|
|
1271 )
|
|
1272 {
|
|
1273 #ifdef USE_KKCC
|
|
1274 /* this code should never be reached when configured for KKCC */
|
|
1275 stderr_out ("KKCC: Invalid mark_object call.\n");
|
|
1276 stderr_out ("Replace mark_object with kkcc_gc_stack_push_lisp_object.\n");
|
|
1277 ABORT ();
|
|
1278 #else /* not USE_KKCC */
|
|
1279
|
|
1280 tail_recurse:
|
|
1281
|
|
1282 /* Checks we used to perform */
|
|
1283 /* if (EQ (obj, Qnull_pointer)) return; */
|
|
1284 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return; */
|
|
1285 /* if (PURIFIED (XPNTR (obj))) return; */
|
|
1286
|
|
1287 if (XTYPE (obj) == Lisp_Type_Record)
|
|
1288 {
|
|
1289 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
1290
|
|
1291 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
1292
|
|
1293 /* We handle this separately, above, so we can mark free objects */
|
|
1294 GC_CHECK_NOT_FREE (lheader);
|
|
1295
|
|
1296 /* All c_readonly objects have their mark bit set,
|
|
1297 so that we only need to check the mark bit here. */
|
|
1298 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
1299 {
|
|
1300 MARK_RECORD_HEADER (lheader);
|
|
1301
|
|
1302 if (RECORD_MARKER (lheader))
|
|
1303 {
|
|
1304 obj = RECORD_MARKER (lheader) (obj);
|
|
1305 if (!NILP (obj)) goto tail_recurse;
|
|
1306 }
|
|
1307 }
|
|
1308 }
|
|
1309 #endif /* not KKCC */
|
|
1310 }
|
|
1311
|
|
1312
|
|
1313 /************************************************************************/
|
|
1314 /* Hooks */
|
|
1315 /************************************************************************/
|
|
1316
|
|
1317 /* Nonzero when calling certain hooks or doing other things where a GC
|
|
1318 would be bad. It prevents infinite recursive calls to gc. */
|
|
1319 int gc_currently_forbidden;
|
|
1320
|
|
1321 int
|
|
1322 begin_gc_forbidden (void)
|
|
1323 {
|
|
1324 return internal_bind_int (&gc_currently_forbidden, 1);
|
|
1325 }
|
|
1326
|
|
1327 void
|
|
1328 end_gc_forbidden (int count)
|
|
1329 {
|
|
1330 unbind_to (count);
|
|
1331 }
|
|
1332
|
|
1333 /* Hooks. */
|
|
1334 Lisp_Object Vpre_gc_hook, Qpre_gc_hook;
|
|
1335 Lisp_Object Vpost_gc_hook, Qpost_gc_hook;
|
|
1336
|
|
1337 /* Maybe we want to use this when doing a "panic" gc after memory_full()? */
|
|
1338 static int gc_hooks_inhibited;
|
|
1339
|
|
1340 struct post_gc_action
|
|
1341 {
|
|
1342 void (*fun) (void *);
|
|
1343 void *arg;
|
|
1344 };
|
|
1345
|
|
1346 typedef struct post_gc_action post_gc_action;
|
|
1347
|
|
1348 typedef struct
|
|
1349 {
|
|
1350 Dynarr_declare (post_gc_action);
|
|
1351 } post_gc_action_dynarr;
|
|
1352
|
|
1353 static post_gc_action_dynarr *post_gc_actions;
|
|
1354
|
|
1355 /* Register an action to be called at the end of GC.
|
|
1356 gc_in_progress is 0 when this is called.
|
|
1357 This is used when it is discovered that an action needs to be taken,
|
|
1358 but it's during GC, so it's not safe. (e.g. in a finalize method.)
|
|
1359
|
|
1360 As a general rule, do not use Lisp objects here.
|
|
1361 And NEVER signal an error.
|
|
1362 */
|
|
1363
|
|
1364 void
|
|
1365 register_post_gc_action (void (*fun) (void *), void *arg)
|
|
1366 {
|
|
1367 post_gc_action action;
|
|
1368
|
|
1369 if (!post_gc_actions)
|
|
1370 post_gc_actions = Dynarr_new (post_gc_action);
|
|
1371
|
|
1372 action.fun = fun;
|
|
1373 action.arg = arg;
|
|
1374
|
|
1375 Dynarr_add (post_gc_actions, action);
|
|
1376 }
|
|
1377
|
|
1378 static void
|
|
1379 run_post_gc_actions (void)
|
|
1380 {
|
|
1381 int i;
|
|
1382
|
|
1383 if (post_gc_actions)
|
|
1384 {
|
|
1385 for (i = 0; i < Dynarr_length (post_gc_actions); i++)
|
|
1386 {
|
|
1387 post_gc_action action = Dynarr_at (post_gc_actions, i);
|
|
1388 (action.fun) (action.arg);
|
|
1389 }
|
|
1390
|
|
1391 Dynarr_reset (post_gc_actions);
|
|
1392 }
|
|
1393 }
|
|
1394
|
|
1395
|
|
1396
|
|
1397 /************************************************************************/
|
|
1398 /* Garbage Collection */
|
|
1399 /************************************************************************/
|
|
1400
|
|
1401 /* Enable/disable incremental garbage collection during runtime. */
|
|
1402 int allow_incremental_gc;
|
|
1403
|
|
1404 /* For profiling. */
|
|
1405 static Lisp_Object QSin_garbage_collection;
|
|
1406
|
|
1407 /* Nonzero means display messages at beginning and end of GC. */
|
|
1408 int garbage_collection_messages;
|
|
1409
|
|
1410 /* "Garbage collecting" */
|
|
1411 Lisp_Object Vgc_message;
|
|
1412 Lisp_Object Vgc_pointer_glyph;
|
|
1413 static const Ascbyte gc_default_message[] = "Garbage collecting";
|
|
1414 Lisp_Object Qgarbage_collecting;
|
|
1415
|
|
1416 /* "Locals" during GC. */
|
|
1417 struct frame *f;
|
|
1418 int speccount;
|
|
1419 int cursor_changed;
|
|
1420 Lisp_Object pre_gc_cursor;
|
|
1421
|
|
1422 /* PROFILE_DECLARE */
|
|
1423 int do_backtrace;
|
|
1424 struct backtrace backtrace;
|
|
1425
|
|
1426 /* Maximum amount of C stack to save when a GC happens. */
|
|
1427 #ifndef MAX_SAVE_STACK
|
|
1428 #define MAX_SAVE_STACK 0 /* 16000 */
|
|
1429 #endif
|
|
1430
|
|
1431 void
|
|
1432 gc_prepare (void)
|
|
1433 {
|
|
1434 #if MAX_SAVE_STACK > 0
|
|
1435 char stack_top_variable;
|
|
1436 extern char *stack_bottom;
|
|
1437 #endif
|
|
1438
|
|
1439 #ifdef NEW_GC
|
|
1440 GC_STAT_START_NEW_GC;
|
|
1441 GC_SET_PHASE (INIT_GC);
|
|
1442 #endif /* NEW_GC */
|
|
1443
|
|
1444 do_backtrace = profiling_active || backtrace_with_internal_sections;
|
|
1445
|
|
1446 assert (!gc_in_progress);
|
|
1447 assert (!in_display || gc_currently_forbidden);
|
|
1448
|
|
1449 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
|
|
1450
|
|
1451 /* We used to call selected_frame() here.
|
|
1452
|
|
1453 The following functions cannot be called inside GC
|
|
1454 so we move to after the above tests. */
|
|
1455 {
|
|
1456 Lisp_Object frame;
|
|
1457 Lisp_Object device = Fselected_device (Qnil);
|
|
1458 if (NILP (device)) /* Could happen during startup, eg. if always_gc */
|
|
1459 return;
|
|
1460 frame = Fselected_frame (device);
|
|
1461 if (NILP (frame))
|
|
1462 invalid_state ("No frames exist on device", device);
|
|
1463 f = XFRAME (frame);
|
|
1464 }
|
|
1465
|
|
1466 pre_gc_cursor = Qnil;
|
|
1467 cursor_changed = 0;
|
|
1468
|
|
1469 need_to_signal_post_gc = 0;
|
|
1470 recompute_funcall_allocation_flag ();
|
|
1471
|
|
1472 if (!gc_hooks_inhibited)
|
|
1473 run_hook_trapping_problems
|
|
1474 (Qgarbage_collecting, Qpre_gc_hook,
|
|
1475 INHIBIT_EXISTING_PERMANENT_DISPLAY_OBJECT_DELETION);
|
|
1476
|
|
1477 /* Now show the GC cursor/message. */
|
|
1478 if (!noninteractive)
|
|
1479 {
|
|
1480 if (FRAME_WIN_P (f))
|
|
1481 {
|
|
1482 Lisp_Object frame = wrap_frame (f);
|
|
1483 Lisp_Object cursor = glyph_image_instance (Vgc_pointer_glyph,
|
|
1484 FRAME_SELECTED_WINDOW (f),
|
|
1485 ERROR_ME_NOT, 1);
|
|
1486 pre_gc_cursor = f->pointer;
|
|
1487 if (POINTER_IMAGE_INSTANCEP (cursor)
|
|
1488 /* don't change if we don't know how to change back. */
|
|
1489 && POINTER_IMAGE_INSTANCEP (pre_gc_cursor))
|
|
1490 {
|
|
1491 cursor_changed = 1;
|
|
1492 Fset_frame_pointer (frame, cursor);
|
|
1493 }
|
|
1494 }
|
|
1495
|
|
1496 /* Don't print messages to the stream device. */
|
|
1497 if (!cursor_changed && !FRAME_STREAM_P (f))
|
|
1498 {
|
|
1499 if (garbage_collection_messages)
|
|
1500 {
|
|
1501 Lisp_Object args[2], whole_msg;
|
|
1502 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
|
|
1503 build_msg_string (gc_default_message));
|
|
1504 args[1] = build_string ("...");
|
|
1505 whole_msg = Fconcat (2, args);
|
|
1506 echo_area_message (f, (Ibyte *) 0, whole_msg, 0, -1,
|
|
1507 Qgarbage_collecting);
|
|
1508 }
|
|
1509 }
|
|
1510 }
|
|
1511
|
|
1512 /***** Now we actually start the garbage collection. */
|
|
1513
|
|
1514 gc_in_progress = 1;
|
|
1515 #ifndef NEW_GC
|
|
1516 inhibit_non_essential_conversion_operations = 1;
|
|
1517 #endif /* NEW_GC */
|
|
1518
|
|
1519 #if MAX_SAVE_STACK > 0
|
|
1520
|
|
1521 /* Save a copy of the contents of the stack, for debugging. */
|
|
1522 if (!purify_flag)
|
|
1523 {
|
|
1524 /* Static buffer in which we save a copy of the C stack at each GC. */
|
|
1525 static char *stack_copy;
|
|
1526 static Bytecount stack_copy_size;
|
|
1527
|
|
1528 ptrdiff_t stack_diff = &stack_top_variable - stack_bottom;
|
|
1529 Bytecount stack_size = (stack_diff > 0 ? stack_diff : -stack_diff);
|
|
1530 if (stack_size < MAX_SAVE_STACK)
|
|
1531 {
|
|
1532 if (stack_copy_size < stack_size)
|
|
1533 {
|
|
1534 stack_copy = (char *) xrealloc (stack_copy, stack_size);
|
|
1535 stack_copy_size = stack_size;
|
|
1536 }
|
|
1537
|
|
1538 memcpy (stack_copy,
|
|
1539 stack_diff > 0 ? stack_bottom : &stack_top_variable,
|
|
1540 stack_size);
|
|
1541 }
|
|
1542 }
|
|
1543 #endif /* MAX_SAVE_STACK > 0 */
|
|
1544
|
|
1545 /* Do some totally ad-hoc resource clearing. */
|
|
1546 /* #### generalize this? */
|
|
1547 clear_event_resource ();
|
|
1548 cleanup_specifiers ();
|
|
1549 cleanup_buffer_undo_lists ();
|
|
1550 }
|
|
1551
|
|
1552 void
|
|
1553 gc_mark_root_set (
|
|
1554 #ifdef NEW_GC
|
|
1555 enum gc_phase phase
|
|
1556 #else /* not NEW_GC */
|
|
1557 void
|
|
1558 #endif /* not NEW_GC */
|
|
1559 )
|
|
1560 {
|
|
1561 #ifdef NEW_GC
|
|
1562 GC_SET_PHASE (phase);
|
|
1563 #endif /* NEW_GC */
|
|
1564
|
|
1565 /* Mark all the special slots that serve as the roots of accessibility. */
|
|
1566
|
|
1567 #ifdef USE_KKCC
|
|
1568 # define mark_object(obj) kkcc_gc_stack_push_lisp_object (obj, 0, -1)
|
|
1569 #endif /* USE_KKCC */
|
|
1570
|
|
1571 { /* staticpro() */
|
|
1572 Lisp_Object **p = Dynarr_begin (staticpros);
|
|
1573 Elemcount count;
|
|
1574 for (count = Dynarr_length (staticpros); count; count--)
|
|
1575 /* Need to check if the pointer in the staticpro array is not
|
|
1576 NULL. A gc can occur after variable is added to the staticpro
|
|
1577 array and _before_ it is correctly initialized. In this case
|
|
1578 its value is NULL, which we have to catch here. */
|
|
1579 if (*p)
|
|
1580 mark_object (**p++);
|
|
1581 else
|
|
1582 **p++;
|
|
1583 }
|
|
1584
|
|
1585 { /* staticpro_nodump() */
|
|
1586 Lisp_Object **p = Dynarr_begin (staticpros_nodump);
|
|
1587 Elemcount count;
|
|
1588 for (count = Dynarr_length (staticpros_nodump); count; count--)
|
|
1589 /* Need to check if the pointer in the staticpro array is not
|
|
1590 NULL. A gc can occur after variable is added to the staticpro
|
|
1591 array and _before_ it is correctly initialized. In this case
|
|
1592 its value is NULL, which we have to catch here. */
|
|
1593 if (*p)
|
|
1594 mark_object (**p++);
|
|
1595 else
|
|
1596 **p++;
|
|
1597 }
|
|
1598
|
|
1599 #ifdef MC_ALLOC
|
|
1600 { /* mcpro () */
|
|
1601 Lisp_Object *p = Dynarr_begin (mcpros);
|
|
1602 Elemcount count;
|
|
1603 for (count = Dynarr_length (mcpros); count; count--)
|
|
1604 mark_object (*p++);
|
|
1605 }
|
|
1606 #endif /* MC_ALLOC */
|
|
1607
|
|
1608 { /* GCPRO() */
|
|
1609 struct gcpro *tail;
|
|
1610 int i;
|
|
1611 for (tail = gcprolist; tail; tail = tail->next)
|
|
1612 for (i = 0; i < tail->nvars; i++)
|
|
1613 mark_object (tail->var[i]);
|
|
1614 }
|
|
1615
|
|
1616 { /* specbind() */
|
|
1617 struct specbinding *bind;
|
|
1618 for (bind = specpdl; bind != specpdl_ptr; bind++)
|
|
1619 {
|
|
1620 mark_object (bind->symbol);
|
|
1621 mark_object (bind->old_value);
|
|
1622 }
|
|
1623 }
|
|
1624
|
|
1625 {
|
|
1626 struct catchtag *c;
|
|
1627 for (c = catchlist; c; c = c->next)
|
|
1628 {
|
|
1629 mark_object (c->tag);
|
|
1630 mark_object (c->val);
|
|
1631 mark_object (c->actual_tag);
|
|
1632 mark_object (c->backtrace);
|
|
1633 }
|
|
1634 }
|
|
1635
|
|
1636 {
|
|
1637 struct backtrace *backlist;
|
|
1638 for (backlist = backtrace_list; backlist; backlist = backlist->next)
|
|
1639 {
|
|
1640 int nargs = backlist->nargs;
|
|
1641 int i;
|
|
1642
|
|
1643 mark_object (*backlist->function);
|
|
1644 if (nargs < 0 /* nargs == UNEVALLED || nargs == MANY */
|
|
1645 /* might be fake (internal profiling entry) */
|
|
1646 && backlist->args)
|
|
1647 mark_object (backlist->args[0]);
|
|
1648 else
|
|
1649 for (i = 0; i < nargs; i++)
|
|
1650 mark_object (backlist->args[i]);
|
|
1651 }
|
|
1652 }
|
|
1653
|
|
1654 mark_profiling_info ();
|
|
1655 #ifdef USE_KKCC
|
|
1656 # undef mark_object
|
|
1657 #endif
|
|
1658 }
|
|
1659
|
|
1660 void
|
|
1661 gc_finish_mark (void)
|
|
1662 {
|
|
1663 #ifdef NEW_GC
|
|
1664 GC_SET_PHASE (FINISH_MARK);
|
|
1665 #endif /* NEW_GC */
|
|
1666 init_marking_ephemerons ();
|
|
1667
|
|
1668 while (finish_marking_weak_hash_tables () > 0 ||
|
|
1669 finish_marking_weak_lists () > 0 ||
|
|
1670 continue_marking_ephemerons () > 0)
|
|
1671 #ifdef USE_KKCC
|
|
1672 {
|
|
1673 kkcc_marking (0);
|
|
1674 }
|
|
1675 #else /* not USE_KKCC */
|
|
1676 ;
|
|
1677 #endif /* not USE_KKCC */
|
|
1678
|
|
1679 /* At this point, we know which objects need to be finalized: we
|
|
1680 still need to resurrect them */
|
|
1681
|
|
1682 while (finish_marking_ephemerons () > 0 ||
|
|
1683 finish_marking_weak_lists () > 0 ||
|
|
1684 finish_marking_weak_hash_tables () > 0)
|
|
1685 #ifdef USE_KKCC
|
|
1686 {
|
|
1687 kkcc_marking (0);
|
|
1688 }
|
|
1689 #else /* not USE_KKCC */
|
|
1690 ;
|
|
1691 #endif /* not USE_KKCC */
|
|
1692
|
|
1693 /* And prune (this needs to be called after everything else has been
|
|
1694 marked and before we do any sweeping). */
|
|
1695 /* #### this is somewhat ad-hoc and should probably be an object
|
|
1696 method */
|
|
1697 prune_weak_hash_tables ();
|
|
1698 prune_weak_lists ();
|
|
1699 prune_specifiers ();
|
|
1700 prune_syntax_tables ();
|
|
1701
|
|
1702 prune_ephemerons ();
|
|
1703 prune_weak_boxes ();
|
|
1704 }
|
|
1705
|
|
1706 #ifdef NEW_GC
|
|
1707 void
|
|
1708 gc_finalize (void)
|
|
1709 {
|
|
1710 GC_SET_PHASE (FINALIZE);
|
|
1711 mc_finalize ();
|
|
1712 }
|
|
1713
|
|
1714 void
|
|
1715 gc_sweep (void)
|
|
1716 {
|
|
1717 GC_SET_PHASE (SWEEP);
|
|
1718 mc_sweep ();
|
|
1719 }
|
|
1720 #endif /* NEW_GC */
|
|
1721
|
|
1722
|
|
1723 void
|
|
1724 gc_finish (void)
|
|
1725 {
|
|
1726 #ifdef NEW_GC
|
|
1727 GC_SET_PHASE (FINISH_GC);
|
|
1728 #endif /* NEW_GC */
|
|
1729 consing_since_gc = 0;
|
|
1730 #ifndef DEBUG_XEMACS
|
|
1731 /* Allow you to set it really fucking low if you really want ... */
|
|
1732 if (gc_cons_threshold < 10000)
|
|
1733 gc_cons_threshold = 10000;
|
|
1734 #endif
|
|
1735 recompute_need_to_garbage_collect ();
|
|
1736
|
|
1737 #ifndef NEW_GC
|
|
1738 inhibit_non_essential_conversion_operations = 0;
|
|
1739 #endif /* not NEW_GC */
|
|
1740 gc_in_progress = 0;
|
|
1741
|
|
1742 run_post_gc_actions ();
|
|
1743
|
|
1744 /******* End of garbage collection ********/
|
|
1745
|
|
1746 /* Now remove the GC cursor/message */
|
|
1747 if (!noninteractive)
|
|
1748 {
|
|
1749 if (cursor_changed)
|
|
1750 Fset_frame_pointer (wrap_frame (f), pre_gc_cursor);
|
|
1751 else if (!FRAME_STREAM_P (f))
|
|
1752 {
|
|
1753 /* Show "...done" only if the echo area would otherwise be empty. */
|
|
1754 if (NILP (clear_echo_area (selected_frame (),
|
|
1755 Qgarbage_collecting, 0)))
|
|
1756 {
|
|
1757 if (garbage_collection_messages)
|
|
1758 {
|
|
1759 Lisp_Object args[2], whole_msg;
|
|
1760 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
|
|
1761 build_msg_string (gc_default_message));
|
|
1762 args[1] = build_msg_string ("... done");
|
|
1763 whole_msg = Fconcat (2, args);
|
|
1764 echo_area_message (selected_frame (), (Ibyte *) 0,
|
|
1765 whole_msg, 0, -1,
|
|
1766 Qgarbage_collecting);
|
|
1767 }
|
|
1768 }
|
|
1769 }
|
|
1770 }
|
|
1771
|
|
1772 #ifndef MC_ALLOC
|
|
1773 if (!breathing_space)
|
|
1774 {
|
|
1775 breathing_space = malloc (4096 - MALLOC_OVERHEAD);
|
|
1776 }
|
|
1777 #endif /* not MC_ALLOC */
|
|
1778
|
|
1779 need_to_signal_post_gc = 1;
|
|
1780 funcall_allocation_flag = 1;
|
|
1781
|
|
1782 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
|
|
1783
|
|
1784 #ifdef NEW_GC
|
|
1785 GC_SET_PHASE (NONE);
|
|
1786 #endif /* NEW_GC */
|
|
1787 }
|
|
1788
|
|
1789 #ifdef NEW_GC
|
|
1790 void
|
|
1791 gc_suspend_mark_phase (void)
|
|
1792 {
|
|
1793 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
|
|
1794 write_barrier_enabled = 1;
|
|
1795 consing_since_gc = 0;
|
|
1796 vdb_start_dirty_bits_recording ();
|
|
1797 }
|
|
1798
|
|
1799 int
|
|
1800 gc_resume_mark_phase (void)
|
|
1801 {
|
|
1802 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
|
|
1803 assert (write_barrier_enabled);
|
|
1804 vdb_stop_dirty_bits_recording ();
|
|
1805 write_barrier_enabled = 0;
|
|
1806 return vdb_read_dirty_bits ();
|
|
1807 }
|
|
1808
|
|
1809 int
|
|
1810 gc_mark (int incremental)
|
|
1811 {
|
|
1812 GC_SET_PHASE (MARK);
|
|
1813 if (!incremental)
|
|
1814 {
|
|
1815 kkcc_marking (0);
|
|
1816 }
|
|
1817 else
|
|
1818 {
|
|
1819 kkcc_marking (gc_incremental_traversal_threshold);
|
|
1820 if (!KKCC_GC_STACK_EMPTY)
|
|
1821 {
|
|
1822 gc_suspend_mark_phase ();
|
|
1823 return 0;
|
|
1824 }
|
|
1825 }
|
|
1826 return 1;
|
|
1827 }
|
|
1828
|
|
1829 int
|
|
1830 gc_resume_mark (int incremental)
|
|
1831 {
|
|
1832 if (!incremental)
|
|
1833 {
|
|
1834 if (!KKCC_GC_STACK_EMPTY)
|
|
1835 {
|
|
1836 GC_STAT_RESUME_GC;
|
|
1837 /* An incremental garbage collection is already running ---
|
|
1838 now wrap it up and resume it atomically. */
|
|
1839 gc_resume_mark_phase ();
|
|
1840 gc_mark_root_set (REPUSH_ROOT_SET);
|
|
1841 kkcc_marking (0);
|
|
1842 }
|
|
1843 }
|
|
1844 else
|
|
1845 {
|
|
1846 int repushed_objects;
|
|
1847 int mark_work;
|
|
1848 GC_STAT_RESUME_GC;
|
|
1849 repushed_objects = gc_resume_mark_phase ();
|
|
1850 mark_work = (gc_incremental_traversal_threshold > repushed_objects) ?
|
|
1851 gc_incremental_traversal_threshold : repushed_objects;
|
|
1852 kkcc_marking (mark_work);
|
|
1853 if (KKCC_GC_STACK_EMPTY)
|
|
1854 {
|
|
1855 /* Mark root set again and finish up marking. */
|
|
1856 gc_mark_root_set (REPUSH_ROOT_SET);
|
|
1857 kkcc_marking (0);
|
|
1858 }
|
|
1859 else
|
|
1860 {
|
|
1861 gc_suspend_mark_phase ();
|
|
1862 return 0;
|
|
1863 }
|
|
1864 }
|
|
1865 return 1;
|
|
1866 }
|
|
1867
|
|
1868
|
|
1869 void
|
|
1870 gc_1 (int incremental)
|
|
1871 {
|
|
1872 switch (GC_PHASE)
|
|
1873 {
|
|
1874 case NONE:
|
|
1875 gc_prepare ();
|
|
1876 kkcc_gc_stack_init();
|
|
1877 #ifdef DEBUG_XEMACS
|
|
1878 kkcc_bt_init ();
|
|
1879 #endif
|
|
1880 case INIT_GC:
|
|
1881 gc_mark_root_set (PUSH_ROOT_SET);
|
|
1882 case PUSH_ROOT_SET:
|
|
1883 if (!gc_mark (incremental))
|
|
1884 return; /* suspend gc */
|
|
1885 case MARK:
|
|
1886 if (!KKCC_GC_STACK_EMPTY)
|
|
1887 if (!gc_resume_mark (incremental))
|
|
1888 return; /* suspend gc */
|
|
1889 gc_finish_mark ();
|
|
1890 kkcc_gc_stack_free ();
|
|
1891 #ifdef DEBUG_XEMACS
|
|
1892 kkcc_bt_free ();
|
|
1893 #endif
|
|
1894 case FINISH_MARK:
|
|
1895 gc_finalize ();
|
|
1896 case FINALIZE:
|
|
1897 gc_sweep ();
|
|
1898 case SWEEP:
|
|
1899 gc_finish ();
|
|
1900 case FINISH_GC:
|
|
1901 break;
|
|
1902 }
|
|
1903 }
|
|
1904
|
|
1905 void gc (int incremental)
|
|
1906 {
|
|
1907 if (gc_currently_forbidden
|
|
1908 || in_display
|
|
1909 || preparing_for_armageddon)
|
|
1910 return;
|
|
1911
|
|
1912 /* Very important to prevent GC during any of the following
|
|
1913 stuff that might run Lisp code; otherwise, we'll likely
|
|
1914 have infinite GC recursion. */
|
|
1915 speccount = begin_gc_forbidden ();
|
|
1916
|
|
1917 gc_1 (incremental);
|
|
1918
|
|
1919 /* now stop inhibiting GC */
|
|
1920 unbind_to (speccount);
|
|
1921 }
|
|
1922
|
|
1923 void
|
|
1924 gc_full (void)
|
|
1925 {
|
|
1926 gc (0);
|
|
1927 }
|
|
1928
|
|
1929 DEFUN ("gc-full", Fgc_full, 0, 0, "", /*
|
|
1930 This function performs a full garbage collection. If an incremental
|
|
1931 garbage collection is already running, it completes without any
|
|
1932 further interruption. This function guarantees that unused objects
|
|
1933 are freed when it returns. Garbage collection happens automatically if
|
|
1934 the client allocates more than `gc-cons-threshold' bytes of Lisp data
|
|
1935 since the previous garbage collection.
|
|
1936 */
|
|
1937 ())
|
|
1938 {
|
|
1939 gc_full ();
|
|
1940 return Qt;
|
|
1941 }
|
|
1942
|
|
1943 void
|
|
1944 gc_incremental (void)
|
|
1945 {
|
|
1946 gc (allow_incremental_gc);
|
|
1947 }
|
|
1948
|
|
1949 DEFUN ("gc-incremental", Fgc_incremental, 0, 0, "", /*
|
|
1950 This function starts an incremental garbage collection. If an
|
|
1951 incremental garbage collection is already running, the next cycle
|
|
1952 starts. Note that this function has not necessarily freed any memory
|
|
1953 when it returns. This function only guarantees, that the traversal of
|
|
1954 the heap makes progress. The next cycle of incremental garbage
|
|
1955 collection happens automatically if the client allocates more than
|
|
1956 `gc-incremental-cons-threshold' bytes of Lisp data since previous
|
|
1957 garbage collection.
|
|
1958 */
|
|
1959 ())
|
|
1960 {
|
|
1961 gc_incremental ();
|
|
1962 return Qt;
|
|
1963 }
|
|
1964 #else /* not NEW_GC */
|
|
1965 void garbage_collect_1 (void)
|
|
1966 {
|
|
1967 if (gc_in_progress
|
|
1968 || gc_currently_forbidden
|
|
1969 || in_display
|
|
1970 || preparing_for_armageddon)
|
|
1971 return;
|
|
1972
|
|
1973 /* Very important to prevent GC during any of the following
|
|
1974 stuff that might run Lisp code; otherwise, we'll likely
|
|
1975 have infinite GC recursion. */
|
|
1976 speccount = begin_gc_forbidden ();
|
|
1977
|
|
1978 gc_prepare ();
|
|
1979 #ifdef USE_KKCC
|
|
1980 kkcc_gc_stack_init();
|
|
1981 #ifdef DEBUG_XEMACS
|
|
1982 kkcc_bt_init ();
|
|
1983 #endif
|
|
1984 #endif /* USE_KKCC */
|
|
1985 gc_mark_root_set ();
|
|
1986 #ifdef USE_KKCC
|
|
1987 kkcc_marking (0);
|
|
1988 #endif /* USE_KKCC */
|
|
1989 gc_finish_mark ();
|
|
1990 #ifdef USE_KKCC
|
|
1991 kkcc_gc_stack_free ();
|
|
1992 #ifdef DEBUG_XEMACS
|
|
1993 kkcc_bt_free ();
|
|
1994 #endif
|
|
1995 #endif /* USE_KKCC */
|
|
1996 gc_sweep_1 ();
|
|
1997 gc_finish ();
|
|
1998
|
|
1999 /* now stop inhibiting GC */
|
|
2000 unbind_to (speccount);
|
|
2001 }
|
|
2002 #endif /* not NEW_GC */
|
|
2003
|
|
2004
|
|
2005 /************************************************************************/
|
|
2006 /* Initializations */
|
|
2007 /************************************************************************/
|
|
2008
|
|
2009 /* Initialization */
|
|
2010 static void
|
|
2011 common_init_gc_early (void)
|
|
2012 {
|
|
2013 Vgc_message = Qzero;
|
|
2014
|
|
2015 gc_currently_forbidden = 0;
|
|
2016 gc_hooks_inhibited = 0;
|
|
2017
|
|
2018 need_to_garbage_collect = always_gc;
|
|
2019
|
|
2020 gc_cons_threshold = GC_CONS_THRESHOLD;
|
|
2021 gc_cons_percentage = 40; /* #### what is optimal? */
|
|
2022 total_gc_usage_set = 0;
|
|
2023 #ifdef NEW_GC
|
|
2024 gc_cons_incremental_threshold = GC_CONS_INCREMENTAL_THRESHOLD;
|
|
2025 gc_incremental_traversal_threshold = GC_INCREMENTAL_TRAVERSAL_THRESHOLD;
|
|
2026 #endif /* not NEW_GC */
|
|
2027 }
|
|
2028
|
|
2029 void
|
|
2030 init_gc_early (void)
|
|
2031 {
|
|
2032 }
|
|
2033
|
|
2034 void
|
|
2035 reinit_gc_early (void)
|
|
2036 {
|
|
2037 common_init_gc_early ();
|
|
2038 }
|
|
2039
|
|
2040 void
|
|
2041 init_gc_once_early (void)
|
|
2042 {
|
|
2043 common_init_gc_early ();
|
|
2044 }
|
|
2045
|
|
2046 void
|
|
2047 syms_of_gc (void)
|
|
2048 {
|
|
2049 DEFSYMBOL (Qpre_gc_hook);
|
|
2050 DEFSYMBOL (Qpost_gc_hook);
|
|
2051 #ifdef NEW_GC
|
|
2052 DEFSUBR (Fgc_full);
|
|
2053 DEFSUBR (Fgc_incremental);
|
|
2054 #ifdef ERROR_CHECK_GC
|
|
2055 DEFSUBR (Fgc_stats);
|
|
2056 #endif /* not ERROR_CHECK_GC */
|
|
2057 #endif /* NEW_GC */
|
|
2058 }
|
|
2059
|
|
2060 void
|
|
2061 vars_of_gc (void)
|
|
2062 {
|
|
2063 staticpro_nodump (&pre_gc_cursor);
|
|
2064
|
|
2065 QSin_garbage_collection = build_msg_string ("(in garbage collection)");
|
|
2066 staticpro (&QSin_garbage_collection);
|
|
2067
|
|
2068 DEFVAR_INT ("gc-cons-threshold", &gc_cons_threshold /*
|
|
2069 *Number of bytes of consing between full garbage collections.
|
|
2070 \"Consing\" is a misnomer in that this actually counts allocation
|
|
2071 of all different kinds of objects, not just conses.
|
|
2072 Garbage collection can happen automatically once this many bytes have been
|
|
2073 allocated since the last garbage collection. All data types count.
|
|
2074
|
|
2075 Garbage collection happens automatically when `eval' or `funcall' are
|
|
2076 called. (Note that `funcall' is called implicitly as part of evaluation.)
|
|
2077 By binding this temporarily to a large number, you can effectively
|
|
2078 prevent garbage collection during a part of the program.
|
|
2079
|
|
2080 Normally, you cannot set this value less than 10,000 (if you do, it is
|
|
2081 automatically reset during the next garbage collection). However, if
|
|
2082 XEmacs was compiled with DEBUG_XEMACS, this does not happen, allowing
|
|
2083 you to set this value very low to track down problems with insufficient
|
|
2084 GCPRO'ing. If you set this to a negative number, garbage collection will
|
|
2085 happen at *EVERY* call to `eval' or `funcall'. This is an extremely
|
|
2086 effective way to check GCPRO problems, but be warned that your XEmacs
|
|
2087 will be unusable! You almost certainly won't have the patience to wait
|
|
2088 long enough to be able to set it back.
|
|
2089
|
|
2090 See also `consing-since-gc' and `gc-cons-percentage'.
|
|
2091 */ );
|
|
2092
|
|
2093 DEFVAR_INT ("gc-cons-percentage", &gc_cons_percentage /*
|
|
2094 *Percentage of memory allocated between garbage collections.
|
|
2095
|
|
2096 Garbage collection will happen if this percentage of the total amount of
|
|
2097 memory used for data (see `lisp-object-memory-usage') has been allocated
|
|
2098 since the last garbage collection. However, it will not happen if less
|
|
2099 than `gc-cons-threshold' bytes have been allocated -- this sets an absolute
|
|
2100 minimum in case very little data has been allocated or the percentage is
|
|
2101 set very low. Set this to 0 to have garbage collection always happen after
|
|
2102 `gc-cons-threshold' bytes have been allocated, regardless of current memory
|
|
2103 usage.
|
|
2104
|
|
2105 See also `consing-since-gc' and `gc-cons-threshold'.
|
|
2106 */ );
|
|
2107
|
|
2108 #ifdef NEW_GC
|
|
2109 DEFVAR_INT ("gc-cons-incremental-threshold",
|
|
2110 &gc_cons_incremental_threshold /*
|
|
2111 *Number of bytes of consing between cycles of incremental garbage
|
|
2112 collections. \"Consing\" is a misnomer in that this actually counts
|
|
2113 allocation of all different kinds of objects, not just conses. The
|
|
2114 next garbage collection cycle can happen automatically once this many
|
|
2115 bytes have been allocated since the last garbage collection cycle.
|
|
2116 All data types count.
|
|
2117
|
|
2118 See also `gc-cons-threshold'.
|
|
2119 */ );
|
|
2120
|
|
2121 DEFVAR_INT ("gc-incremental-traversal-threshold",
|
|
2122 &gc_incremental_traversal_threshold /*
|
|
2123 *Number of elements processed in one cycle of incremental travesal.
|
|
2124 */ );
|
|
2125 #endif /* NEW_GC */
|
|
2126
|
|
2127 DEFVAR_BOOL ("purify-flag", &purify_flag /*
|
|
2128 Non-nil means loading Lisp code in order to dump an executable.
|
|
2129 This means that certain objects should be allocated in readonly space.
|
|
2130 */ );
|
|
2131
|
|
2132 DEFVAR_BOOL ("garbage-collection-messages", &garbage_collection_messages /*
|
|
2133 Non-nil means display messages at start and end of garbage collection.
|
|
2134 */ );
|
|
2135 garbage_collection_messages = 0;
|
|
2136
|
|
2137 DEFVAR_LISP ("pre-gc-hook", &Vpre_gc_hook /*
|
|
2138 Function or functions to be run just before each garbage collection.
|
|
2139 Interrupts, garbage collection, and errors are inhibited while this hook
|
|
2140 runs, so be extremely careful in what you add here. In particular, avoid
|
|
2141 consing, and do not interact with the user.
|
|
2142 */ );
|
|
2143 Vpre_gc_hook = Qnil;
|
|
2144
|
|
2145 DEFVAR_LISP ("post-gc-hook", &Vpost_gc_hook /*
|
|
2146 Function or functions to be run just after each garbage collection.
|
|
2147 Interrupts, garbage collection, and errors are inhibited while this hook
|
|
2148 runs. Each hook is called with one argument which is an alist with
|
|
2149 finalization data.
|
|
2150 */ );
|
|
2151 Vpost_gc_hook = Qnil;
|
|
2152
|
|
2153 DEFVAR_LISP ("gc-message", &Vgc_message /*
|
|
2154 String to print to indicate that a garbage collection is in progress.
|
|
2155 This is printed in the echo area. If the selected frame is on a
|
|
2156 window system and `gc-pointer-glyph' specifies a value (i.e. a pointer
|
|
2157 image instance) in the domain of the selected frame, the mouse pointer
|
|
2158 will change instead of this message being printed.
|
|
2159 */ );
|
|
2160 Vgc_message = build_string (gc_default_message);
|
|
2161
|
|
2162 DEFVAR_LISP ("gc-pointer-glyph", &Vgc_pointer_glyph /*
|
|
2163 Pointer glyph used to indicate that a garbage collection is in progress.
|
|
2164 If the selected window is on a window system and this glyph specifies a
|
|
2165 value (i.e. a pointer image instance) in the domain of the selected
|
|
2166 window, the pointer will be changed as specified during garbage collection.
|
|
2167 Otherwise, a message will be printed in the echo area, as controlled
|
|
2168 by `gc-message'.
|
|
2169 */ );
|
|
2170
|
|
2171 #ifdef NEW_GC
|
|
2172 DEFVAR_BOOL ("allow-incremental-gc", &allow_incremental_gc /*
|
|
2173 *Non-nil means to allow incremental garbage collection. Nil prevents
|
|
2174 *incremental garbage collection, the garbage collector then only does
|
|
2175 *full collects (even if (gc-incremental) is called).
|
|
2176 */ );
|
|
2177 #endif /* NEW_GC */
|
|
2178 }
|
|
2179
|
|
2180 void
|
|
2181 complex_vars_of_gc (void)
|
|
2182 {
|
|
2183 Vgc_pointer_glyph = Fmake_glyph_internal (Qpointer);
|
|
2184 }
|