3092
|
1 /* New incremental garbage collector for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
|
24 #include "lisp.h"
|
|
25
|
|
26 #include "backtrace.h"
|
|
27 #include "buffer.h"
|
|
28 #include "bytecode.h"
|
|
29 #include "chartab.h"
|
|
30 #include "console-stream.h"
|
|
31 #include "device.h"
|
|
32 #include "elhash.h"
|
|
33 #include "events.h"
|
|
34 #include "extents-impl.h"
|
|
35 #include "file-coding.h"
|
|
36 #include "frame-impl.h"
|
|
37 #include "gc.h"
|
|
38 #include "glyphs.h"
|
|
39 #include "opaque.h"
|
|
40 #include "lrecord.h"
|
|
41 #include "lstream.h"
|
|
42 #include "process.h"
|
|
43 #include "profile.h"
|
|
44 #include "redisplay.h"
|
|
45 #include "specifier.h"
|
|
46 #include "sysfile.h"
|
|
47 #include "sysdep.h"
|
|
48 #include "window.h"
|
|
49 #include "vdb.h"
|
|
50
|
|
51
|
|
52 #define GC_CONS_THRESHOLD 2000000
|
|
53 #define GC_CONS_INCREMENTAL_THRESHOLD 200000
|
|
54 #define GC_INCREMENTAL_TRAVERSAL_THRESHOLD 100000
|
|
55
|
|
56 /* Number of bytes of consing done since the last GC. */
|
|
57 EMACS_INT consing_since_gc;
|
|
58
|
|
59 /* Number of bytes of consing done since startup. */
|
|
60 EMACS_UINT total_consing;
|
|
61
|
|
62 /* Number of bytes of current allocated heap objects. */
|
|
63 EMACS_INT total_gc_usage;
|
|
64
|
|
65 /* If the above is set. */
|
|
66 int total_gc_usage_set;
|
|
67
|
|
68 /* Number of bytes of consing since gc before another gc should be done. */
|
|
69 EMACS_INT gc_cons_threshold;
|
|
70
|
|
71 /* Nonzero during gc */
|
|
72 int gc_in_progress;
|
|
73
|
|
74 /* Percentage of consing of total data size before another GC. */
|
|
75 EMACS_INT gc_cons_percentage;
|
|
76
|
|
77 #ifdef NEW_GC
|
|
78 /* Number of bytes of consing since gc before another cycle of the gc
|
|
79 should be done in incremental mode. */
|
|
80 EMACS_INT gc_cons_incremental_threshold;
|
|
81
|
|
82 /* Number of elements marked in one cycle of incremental GC. */
|
|
83 EMACS_INT gc_incremental_traversal_threshold;
|
|
84
|
|
85 /* Nonzero during write barrier */
|
|
86 int write_barrier_enabled;
|
|
87 #endif /* NEW_GC */
|
|
88
|
|
89
|
|
90
|
|
91 #ifdef NEW_GC
|
|
92 /************************************************************************/
|
|
93 /* Incremental State and Statistics */
|
|
94 /************************************************************************/
|
|
95
|
|
96 enum gc_phase
|
|
97 {
|
|
98 NONE,
|
|
99 INIT_GC,
|
|
100 PUSH_ROOT_SET,
|
|
101 MARK,
|
|
102 REPUSH_ROOT_SET,
|
|
103 FINISH_MARK,
|
|
104 FINALIZE,
|
|
105 SWEEP,
|
|
106 FINISH_GC
|
|
107 };
|
|
108
|
|
109 #ifndef ERROR_CHECK_GC
|
|
110 struct
|
|
111 {
|
|
112 enum gc_phase phase;
|
|
113 } gc_state;
|
|
114 #else /* ERROR_CHECK_GC */
|
|
115 enum gc_stat_id
|
|
116 {
|
|
117 GC_STAT_TOTAL,
|
|
118 GC_STAT_IN_LAST_GC,
|
|
119 GC_STAT_IN_THIS_GC,
|
|
120 GC_STAT_IN_LAST_CYCLE,
|
|
121 GC_STAT_IN_THIS_CYCLE,
|
|
122 GC_STAT_COUNT /* has to be last */
|
|
123 };
|
|
124
|
|
125 struct
|
|
126 {
|
|
127 enum gc_phase phase;
|
3313
|
128 double n_gc[GC_STAT_COUNT];
|
|
129 double n_cycles[GC_STAT_COUNT];
|
|
130 double enqueued[GC_STAT_COUNT];
|
|
131 double dequeued[GC_STAT_COUNT];
|
|
132 double repushed[GC_STAT_COUNT];
|
|
133 double enqueued2[GC_STAT_COUNT];
|
|
134 double dequeued2[GC_STAT_COUNT];
|
|
135 double finalized[GC_STAT_COUNT];
|
|
136 double freed[GC_STAT_COUNT];
|
3092
|
137 } gc_state;
|
|
138 #endif /* ERROR_CHECK_GC */
|
|
139
|
|
140 #define GC_PHASE gc_state.phase
|
|
141 #define GC_SET_PHASE(p) GC_PHASE = p
|
|
142
|
|
143 #ifdef ERROR_CHECK_GC
|
|
144 # define GC_STAT_START_NEW_GC gc_stat_start_new_gc ()
|
|
145 # define GC_STAT_RESUME_GC gc_stat_resume_gc ()
|
|
146
|
|
147 #define GC_STAT_TICK(STAT) \
|
|
148 gc_state.STAT[GC_STAT_TOTAL]++; \
|
|
149 gc_state.STAT[GC_STAT_IN_THIS_GC]++; \
|
|
150 gc_state.STAT[GC_STAT_IN_THIS_CYCLE]++
|
|
151
|
|
152 # define GC_STAT_ENQUEUED \
|
|
153 if (GC_PHASE == REPUSH_ROOT_SET) \
|
|
154 { \
|
|
155 GC_STAT_TICK (enqueued2); \
|
|
156 } \
|
|
157 else \
|
|
158 { \
|
|
159 GC_STAT_TICK (enqueued); \
|
|
160 }
|
|
161
|
|
162 # define GC_STAT_DEQUEUED \
|
|
163 if (gc_state.phase == REPUSH_ROOT_SET) \
|
|
164 { \
|
|
165 GC_STAT_TICK (dequeued2); \
|
|
166 } \
|
|
167 else \
|
|
168 { \
|
|
169 GC_STAT_TICK (dequeued); \
|
|
170 }
|
|
171 # define GC_STAT_REPUSHED GC_STAT_TICK (repushed)
|
|
172
|
|
173 #define GC_STAT_RESUME(stat) \
|
|
174 gc_state.stat[GC_STAT_IN_LAST_CYCLE] = \
|
|
175 gc_state.stat[GC_STAT_IN_THIS_CYCLE]; \
|
|
176 gc_state.stat[GC_STAT_IN_THIS_CYCLE] = 0
|
|
177
|
|
178 #define GC_STAT_RESTART(stat) \
|
|
179 gc_state.stat[GC_STAT_IN_LAST_GC] = \
|
|
180 gc_state.stat[GC_STAT_IN_THIS_GC]; \
|
|
181 gc_state.stat[GC_STAT_IN_THIS_GC] = 0; \
|
|
182 GC_STAT_RESUME (stat)
|
|
183
|
|
184 void
|
|
185 gc_stat_start_new_gc (void)
|
|
186 {
|
|
187 gc_state.n_gc[GC_STAT_TOTAL]++;
|
|
188 gc_state.n_cycles[GC_STAT_TOTAL]++;
|
|
189 gc_state.n_cycles[GC_STAT_IN_LAST_GC] = gc_state.n_cycles[GC_STAT_IN_THIS_GC];
|
|
190 gc_state.n_cycles[GC_STAT_IN_THIS_GC] = 1;
|
|
191
|
|
192 GC_STAT_RESTART (enqueued);
|
|
193 GC_STAT_RESTART (dequeued);
|
|
194 GC_STAT_RESTART (repushed);
|
|
195 GC_STAT_RESTART (finalized);
|
|
196 GC_STAT_RESTART (enqueued2);
|
|
197 GC_STAT_RESTART (dequeued2);
|
|
198 GC_STAT_RESTART (freed);
|
|
199 }
|
|
200
|
|
201 void
|
|
202 gc_stat_resume_gc (void)
|
|
203 {
|
|
204 gc_state.n_cycles[GC_STAT_TOTAL]++;
|
|
205 gc_state.n_cycles[GC_STAT_IN_THIS_GC]++;
|
|
206 GC_STAT_RESUME (enqueued);
|
|
207 GC_STAT_RESUME (dequeued);
|
|
208 GC_STAT_RESUME (repushed);
|
|
209 GC_STAT_RESUME (finalized);
|
|
210 GC_STAT_RESUME (enqueued2);
|
|
211 GC_STAT_RESUME (dequeued2);
|
|
212 GC_STAT_RESUME (freed);
|
|
213 }
|
|
214
|
|
215 void
|
|
216 gc_stat_finalized (void)
|
|
217 {
|
|
218 GC_STAT_TICK (finalized);
|
|
219 }
|
|
220
|
|
221 void
|
|
222 gc_stat_freed (void)
|
|
223 {
|
|
224 GC_STAT_TICK (freed);
|
|
225 }
|
|
226
|
|
227 DEFUN("gc-stats", Fgc_stats, 0, 0 ,"", /*
|
|
228 Return statistics about garbage collection cycles in a property list.
|
|
229 */
|
|
230 ())
|
|
231 {
|
|
232 Lisp_Object pl = Qnil;
|
|
233 #define PL(name,value) \
|
3313
|
234 pl = cons3 (intern (name), make_float (gc_state.value), pl)
|
3092
|
235
|
|
236 PL ("freed-in-this-cycle", freed[GC_STAT_IN_THIS_CYCLE]);
|
|
237 PL ("freed-in-this-gc", freed[GC_STAT_IN_THIS_GC]);
|
|
238 PL ("freed-in-last-cycle", freed[GC_STAT_IN_LAST_CYCLE]);
|
|
239 PL ("freed-in-last-gc", freed[GC_STAT_IN_LAST_GC]);
|
|
240 PL ("freed-total", freed[GC_STAT_TOTAL]);
|
|
241 PL ("finalized-in-this-cycle", finalized[GC_STAT_IN_THIS_CYCLE]);
|
|
242 PL ("finalized-in-this-gc", finalized[GC_STAT_IN_THIS_GC]);
|
|
243 PL ("finalized-in-last-cycle", finalized[GC_STAT_IN_LAST_CYCLE]);
|
|
244 PL ("finalized-in-last-gc", finalized[GC_STAT_IN_LAST_GC]);
|
|
245 PL ("finalized-total", finalized[GC_STAT_TOTAL]);
|
|
246 PL ("repushed-in-this-cycle", repushed[GC_STAT_IN_THIS_CYCLE]);
|
|
247 PL ("repushed-in-this-gc", repushed[GC_STAT_IN_THIS_GC]);
|
|
248 PL ("repushed-in-last-cycle", repushed[GC_STAT_IN_LAST_CYCLE]);
|
|
249 PL ("repushed-in-last-gc", repushed[GC_STAT_IN_LAST_GC]);
|
|
250 PL ("repushed-total", repushed[GC_STAT_TOTAL]);
|
|
251 PL ("dequeued2-in-this-cycle", dequeued2[GC_STAT_IN_THIS_CYCLE]);
|
|
252 PL ("dequeued2-in-this-gc", dequeued2[GC_STAT_IN_THIS_GC]);
|
|
253 PL ("dequeued2-in-last-cycle", dequeued2[GC_STAT_IN_LAST_CYCLE]);
|
|
254 PL ("dequeued2-in-last-gc", dequeued2[GC_STAT_IN_LAST_GC]);
|
|
255 PL ("dequeued2-total", dequeued2[GC_STAT_TOTAL]);
|
|
256 PL ("enqueued2-in-this-cycle", enqueued2[GC_STAT_IN_THIS_CYCLE]);
|
|
257 PL ("enqueued2-in-this-gc", enqueued2[GC_STAT_IN_THIS_GC]);
|
|
258 PL ("enqueued2-in-last-cycle", enqueued2[GC_STAT_IN_LAST_CYCLE]);
|
|
259 PL ("enqueued2-in-last-gc", enqueued2[GC_STAT_IN_LAST_GC]);
|
|
260 PL ("enqueued2-total", enqueued2[GC_STAT_TOTAL]);
|
|
261 PL ("dequeued-in-this-cycle", dequeued[GC_STAT_IN_THIS_CYCLE]);
|
|
262 PL ("dequeued-in-this-gc", dequeued[GC_STAT_IN_THIS_GC]);
|
|
263 PL ("dequeued-in-last-cycle", dequeued[GC_STAT_IN_LAST_CYCLE]);
|
|
264 PL ("dequeued-in-last-gc", dequeued[GC_STAT_IN_LAST_GC]);
|
|
265 PL ("dequeued-total", dequeued[GC_STAT_TOTAL]);
|
|
266 PL ("enqueued-in-this-cycle", enqueued[GC_STAT_IN_THIS_CYCLE]);
|
|
267 PL ("enqueued-in-this-gc", enqueued[GC_STAT_IN_THIS_GC]);
|
|
268 PL ("enqueued-in-last-cycle", enqueued[GC_STAT_IN_LAST_CYCLE]);
|
|
269 PL ("enqueued-in-last-gc", enqueued[GC_STAT_IN_LAST_GC]);
|
|
270 PL ("enqueued-total", enqueued[GC_STAT_TOTAL]);
|
|
271 PL ("n-cycles-in-this-gc", n_cycles[GC_STAT_IN_THIS_GC]);
|
|
272 PL ("n-cycles-in-last-gc", n_cycles[GC_STAT_IN_LAST_GC]);
|
|
273 PL ("n-cycles-total", n_cycles[GC_STAT_TOTAL]);
|
|
274 PL ("n-gc-total", n_gc[GC_STAT_TOTAL]);
|
|
275 PL ("phase", phase);
|
|
276 return pl;
|
|
277 }
|
|
278 #else /* not ERROR_CHECK_GC */
|
|
279 # define GC_STAT_START_NEW_GC
|
|
280 # define GC_STAT_RESUME_GC
|
|
281 # define GC_STAT_ENQUEUED
|
|
282 # define GC_STAT_DEQUEUED
|
|
283 # define GC_STAT_REPUSHED
|
|
284 # define GC_STAT_REMOVED
|
|
285 #endif /* not ERROR_CHECK_GC */
|
|
286 #endif /* NEW_GC */
|
|
287
|
|
288
|
|
289 /************************************************************************/
|
|
290 /* Recompute need to garbage collect */
|
|
291 /************************************************************************/
|
|
292
|
|
293 int need_to_garbage_collect;
|
|
294
|
|
295 #ifdef ERROR_CHECK_GC
|
|
296 int always_gc = 0; /* Debugging hack; equivalent to
|
|
297 (setq gc-cons-thresold -1) */
|
|
298 #else
|
|
299 #define always_gc 0
|
|
300 #endif
|
|
301
|
|
302 /* True if it's time to garbage collect now. */
|
|
303 void
|
|
304 recompute_need_to_garbage_collect (void)
|
|
305 {
|
|
306 if (always_gc)
|
|
307 need_to_garbage_collect = 1;
|
|
308 else
|
|
309 need_to_garbage_collect =
|
|
310 #ifdef NEW_GC
|
|
311 write_barrier_enabled ?
|
|
312 (consing_since_gc > gc_cons_incremental_threshold) :
|
|
313 #endif /* NEW_GC */
|
|
314 (consing_since_gc > gc_cons_threshold
|
|
315 &&
|
|
316 #if 0 /* #### implement this better */
|
|
317 (100 * consing_since_gc) / total_data_usage () >=
|
|
318 gc_cons_percentage
|
|
319 #else
|
|
320 (!total_gc_usage_set ||
|
|
321 (100 * consing_since_gc) / total_gc_usage >=
|
|
322 gc_cons_percentage)
|
|
323 #endif
|
|
324 );
|
|
325 recompute_funcall_allocation_flag ();
|
|
326 }
|
|
327
|
|
328
|
|
329
|
|
330 /************************************************************************/
|
|
331 /* Mark Phase */
|
|
332 /************************************************************************/
|
|
333
|
|
334 static const struct memory_description lisp_object_description_1[] = {
|
|
335 { XD_LISP_OBJECT, 0 },
|
|
336 { XD_END }
|
|
337 };
|
|
338
|
|
339 const struct sized_memory_description lisp_object_description = {
|
|
340 sizeof (Lisp_Object),
|
|
341 lisp_object_description_1
|
|
342 };
|
|
343
|
|
344 #if defined (USE_KKCC) || defined (PDUMP)
|
|
345
|
|
346 /* This function extracts the value of a count variable described somewhere
|
|
347 else in the description. It is converted corresponding to the type */
|
|
348 EMACS_INT
|
|
349 lispdesc_indirect_count_1 (EMACS_INT code,
|
|
350 const struct memory_description *idesc,
|
|
351 const void *idata)
|
|
352 {
|
|
353 EMACS_INT count;
|
|
354 const void *irdata;
|
|
355
|
|
356 int line = XD_INDIRECT_VAL (code);
|
|
357 int delta = XD_INDIRECT_DELTA (code);
|
|
358
|
|
359 irdata = ((char *) idata) +
|
|
360 lispdesc_indirect_count (idesc[line].offset, idesc, idata);
|
|
361 switch (idesc[line].type)
|
|
362 {
|
|
363 case XD_BYTECOUNT:
|
|
364 count = * (Bytecount *) irdata;
|
|
365 break;
|
|
366 case XD_ELEMCOUNT:
|
|
367 count = * (Elemcount *) irdata;
|
|
368 break;
|
|
369 case XD_HASHCODE:
|
|
370 count = * (Hashcode *) irdata;
|
|
371 break;
|
|
372 case XD_INT:
|
|
373 count = * (int *) irdata;
|
|
374 break;
|
|
375 case XD_LONG:
|
|
376 count = * (long *) irdata;
|
|
377 break;
|
|
378 default:
|
|
379 stderr_out ("Unsupported count type : %d (line = %d, code = %ld)\n",
|
|
380 idesc[line].type, line, (long) code);
|
|
381 #if defined(USE_KKCC) && defined(DEBUG_XEMACS)
|
|
382 if (gc_in_progress)
|
|
383 kkcc_backtrace ();
|
|
384 #endif
|
|
385 #ifdef PDUMP
|
|
386 if (in_pdump)
|
|
387 pdump_backtrace ();
|
|
388 #endif
|
|
389 count = 0; /* warning suppression */
|
|
390 ABORT ();
|
|
391 }
|
|
392 count += delta;
|
|
393 return count;
|
|
394 }
|
|
395
|
|
396 /* SDESC is a "description map" (basically, a list of offsets used for
|
|
397 successive indirections) and OBJ is the first object to indirect off of.
|
|
398 Return the description ultimately found. */
|
|
399
|
|
400 const struct sized_memory_description *
|
|
401 lispdesc_indirect_description_1 (const void *obj,
|
|
402 const struct sized_memory_description *sdesc)
|
|
403 {
|
|
404 int pos;
|
|
405
|
|
406 for (pos = 0; sdesc[pos].size >= 0; pos++)
|
|
407 obj = * (const void **) ((const char *) obj + sdesc[pos].size);
|
|
408
|
|
409 return (const struct sized_memory_description *) obj;
|
|
410 }
|
|
411
|
|
412 /* Compute the size of the data at RDATA, described by a single entry
|
|
413 DESC1 in a description array. OBJ and DESC are used for
|
|
414 XD_INDIRECT references. */
|
|
415
|
|
416 static Bytecount
|
|
417 lispdesc_one_description_line_size (void *rdata,
|
|
418 const struct memory_description *desc1,
|
|
419 const void *obj,
|
|
420 const struct memory_description *desc)
|
|
421 {
|
|
422 union_switcheroo:
|
|
423 switch (desc1->type)
|
|
424 {
|
|
425 case XD_LISP_OBJECT_ARRAY:
|
|
426 {
|
|
427 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
428 return (val * sizeof (Lisp_Object));
|
|
429 }
|
|
430 case XD_LISP_OBJECT:
|
|
431 case XD_LO_LINK:
|
|
432 return sizeof (Lisp_Object);
|
|
433 case XD_OPAQUE_PTR:
|
|
434 return sizeof (void *);
|
|
435 #ifdef NEW_GC
|
|
436 case XD_LISP_OBJECT_BLOCK_PTR:
|
|
437 #endif /* NEW_GC */
|
|
438 case XD_BLOCK_PTR:
|
|
439 {
|
|
440 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
441 return val * sizeof (void *);
|
|
442 }
|
|
443 case XD_BLOCK_ARRAY:
|
|
444 {
|
|
445 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
|
|
446
|
|
447 return (val *
|
|
448 lispdesc_block_size
|
|
449 (rdata,
|
|
450 lispdesc_indirect_description (obj, desc1->data2.descr)));
|
|
451 }
|
|
452 case XD_OPAQUE_DATA_PTR:
|
|
453 return sizeof (void *);
|
|
454 case XD_UNION_DYNAMIC_SIZE:
|
|
455 {
|
|
456 /* If an explicit size was given in the first-level structure
|
|
457 description, use it; else compute size based on current union
|
|
458 constant. */
|
|
459 const struct sized_memory_description *sdesc =
|
|
460 lispdesc_indirect_description (obj, desc1->data2.descr);
|
|
461 if (sdesc->size)
|
|
462 return sdesc->size;
|
|
463 else
|
|
464 {
|
|
465 desc1 = lispdesc_process_xd_union (desc1, desc, obj);
|
|
466 if (desc1)
|
|
467 goto union_switcheroo;
|
|
468 break;
|
|
469 }
|
|
470 }
|
|
471 case XD_UNION:
|
|
472 {
|
|
473 /* If an explicit size was given in the first-level structure
|
|
474 description, use it; else compute size based on maximum of all
|
|
475 possible structures. */
|
|
476 const struct sized_memory_description *sdesc =
|
|
477 lispdesc_indirect_description (obj, desc1->data2.descr);
|
|
478 if (sdesc->size)
|
|
479 return sdesc->size;
|
|
480 else
|
|
481 {
|
|
482 int count;
|
|
483 Bytecount max_size = -1, size;
|
|
484
|
|
485 desc1 = sdesc->description;
|
|
486
|
|
487 for (count = 0; desc1[count].type != XD_END; count++)
|
|
488 {
|
|
489 size = lispdesc_one_description_line_size (rdata,
|
|
490 &desc1[count],
|
|
491 obj, desc);
|
|
492 if (size > max_size)
|
|
493 max_size = size;
|
|
494 }
|
|
495 return max_size;
|
|
496 }
|
|
497 }
|
|
498 case XD_ASCII_STRING:
|
|
499 return sizeof (void *);
|
|
500 case XD_DOC_STRING:
|
|
501 return sizeof (void *);
|
|
502 case XD_INT_RESET:
|
|
503 return sizeof (int);
|
|
504 case XD_BYTECOUNT:
|
|
505 return sizeof (Bytecount);
|
|
506 case XD_ELEMCOUNT:
|
|
507 return sizeof (Elemcount);
|
|
508 case XD_HASHCODE:
|
|
509 return sizeof (Hashcode);
|
|
510 case XD_INT:
|
|
511 return sizeof (int);
|
|
512 case XD_LONG:
|
|
513 return sizeof (long);
|
|
514 default:
|
|
515 stderr_out ("Unsupported dump type : %d\n", desc1->type);
|
|
516 ABORT ();
|
|
517 }
|
|
518
|
|
519 return 0;
|
|
520 }
|
|
521
|
|
522
|
|
523 /* Return the size of the memory block (NOT necessarily a structure!)
|
|
524 described by SDESC and pointed to by OBJ. If SDESC records an
|
|
525 explicit size (i.e. non-zero), it is simply returned; otherwise,
|
|
526 the size is calculated by the maximum offset and the size of the
|
|
527 object at that offset, rounded up to the maximum alignment. In
|
|
528 this case, we may need the object, for example when retrieving an
|
|
529 "indirect count" of an inlined array (the count is not constant,
|
|
530 but is specified by one of the elements of the memory block). (It
|
|
531 is generally not a problem if we return an overly large size -- we
|
|
532 will simply end up reserving more space than necessary; but if the
|
|
533 size is too small we could be in serious trouble, in particular
|
|
534 with nested inlined structures, where there may be alignment
|
|
535 padding in the middle of a block. #### In fact there is an (at
|
|
536 least theoretical) problem with an overly large size -- we may
|
|
537 trigger a protection fault when reading from invalid memory. We
|
|
538 need to handle this -- perhaps in a stupid but dependable way,
|
|
539 i.e. by trapping SIGSEGV and SIGBUS.) */
|
|
540
|
|
541 Bytecount
|
|
542 lispdesc_block_size_1 (const void *obj, Bytecount size,
|
|
543 const struct memory_description *desc)
|
|
544 {
|
|
545 EMACS_INT max_offset = -1;
|
|
546 int max_offset_pos = -1;
|
|
547 int pos;
|
|
548
|
|
549 if (size)
|
|
550 return size;
|
|
551
|
|
552 for (pos = 0; desc[pos].type != XD_END; pos++)
|
|
553 {
|
|
554 EMACS_INT offset = lispdesc_indirect_count (desc[pos].offset, desc, obj);
|
|
555 if (offset == max_offset)
|
|
556 {
|
|
557 stderr_out ("Two relocatable elements at same offset?\n");
|
|
558 ABORT ();
|
|
559 }
|
|
560 else if (offset > max_offset)
|
|
561 {
|
|
562 max_offset = offset;
|
|
563 max_offset_pos = pos;
|
|
564 }
|
|
565 }
|
|
566
|
|
567 if (max_offset_pos < 0)
|
|
568 return 0;
|
|
569
|
|
570 {
|
|
571 Bytecount size_at_max;
|
|
572 size_at_max =
|
|
573 lispdesc_one_description_line_size ((char *) obj + max_offset,
|
|
574 &desc[max_offset_pos], obj, desc);
|
|
575
|
|
576 /* We have no way of knowing the required alignment for this structure,
|
|
577 so just make it maximally aligned. */
|
|
578 return MAX_ALIGN_SIZE (max_offset + size_at_max);
|
|
579 }
|
|
580 }
|
|
581 #endif /* defined (USE_KKCC) || defined (PDUMP) */
|
|
582
|
3263
|
583 #ifdef NEW_GC
|
3092
|
584 #define GC_CHECK_NOT_FREE(lheader) \
|
|
585 gc_checking_assert (! LRECORD_FREE_P (lheader));
|
3263
|
586 #else /* not NEW_GC */
|
3092
|
587 #define GC_CHECK_NOT_FREE(lheader) \
|
|
588 gc_checking_assert (! LRECORD_FREE_P (lheader)); \
|
|
589 gc_checking_assert (LHEADER_IMPLEMENTATION (lheader)->basic_p || \
|
|
590 ! ((struct old_lcrecord_header *) lheader)->free)
|
3263
|
591 #endif /* not NEW_GC */
|
3092
|
592
|
|
593 #ifdef USE_KKCC
|
|
594 /* The following functions implement the new mark algorithm.
|
|
595 They mark objects according to their descriptions. They
|
|
596 are modeled on the corresponding pdumper procedures. */
|
|
597
|
|
598 #if 0
|
|
599 # define KKCC_STACK_AS_QUEUE 1
|
|
600 #endif
|
|
601
|
|
602 #ifdef DEBUG_XEMACS
|
|
603 /* The backtrace for the KKCC mark functions. */
|
|
604 #define KKCC_INIT_BT_STACK_SIZE 4096
|
|
605
|
|
606 typedef struct
|
|
607 {
|
|
608 void *obj;
|
|
609 const struct memory_description *desc;
|
|
610 int pos;
|
|
611 } kkcc_bt_stack_entry;
|
|
612
|
|
613 static kkcc_bt_stack_entry *kkcc_bt;
|
|
614 static int kkcc_bt_stack_size;
|
|
615 static int kkcc_bt_depth = 0;
|
|
616
|
|
617 static void
|
|
618 kkcc_bt_init (void)
|
|
619 {
|
|
620 kkcc_bt_depth = 0;
|
|
621 kkcc_bt_stack_size = KKCC_INIT_BT_STACK_SIZE;
|
|
622 kkcc_bt = (kkcc_bt_stack_entry *)
|
|
623 xmalloc_and_zero (kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
|
|
624 if (!kkcc_bt)
|
|
625 {
|
|
626 stderr_out ("KKCC backtrace stack init failed for size %d\n",
|
|
627 kkcc_bt_stack_size);
|
|
628 ABORT ();
|
|
629 }
|
|
630 }
|
|
631
|
|
632 void
|
|
633 kkcc_backtrace (void)
|
|
634 {
|
|
635 int i;
|
|
636 stderr_out ("KKCC mark stack backtrace :\n");
|
|
637 for (i = kkcc_bt_depth - 1; i >= 0; i--)
|
|
638 {
|
|
639 Lisp_Object obj = wrap_pointer_1 (kkcc_bt[i].obj);
|
|
640 stderr_out (" [%d]", i);
|
|
641 if ((XRECORD_LHEADER (obj)->type >= lrecord_type_last_built_in_type)
|
|
642 || (!LRECORDP (obj))
|
|
643 || (!XRECORD_LHEADER_IMPLEMENTATION (obj)))
|
|
644 {
|
|
645 stderr_out (" non Lisp Object");
|
|
646 }
|
|
647 else
|
|
648 {
|
|
649 stderr_out (" %s",
|
|
650 XRECORD_LHEADER_IMPLEMENTATION (obj)->name);
|
|
651 }
|
3519
|
652 stderr_out (" (addr: %p, desc: %p, ",
|
|
653 (void *) kkcc_bt[i].obj,
|
|
654 (void *) kkcc_bt[i].desc);
|
3092
|
655 if (kkcc_bt[i].pos >= 0)
|
|
656 stderr_out ("pos: %d)\n", kkcc_bt[i].pos);
|
|
657 else
|
|
658 if (kkcc_bt[i].pos == -1)
|
|
659 stderr_out ("root set)\n");
|
|
660 else if (kkcc_bt[i].pos == -2)
|
|
661 stderr_out ("dirty object)\n");
|
|
662 }
|
|
663 }
|
|
664
|
|
665 static void
|
|
666 kkcc_bt_stack_realloc (void)
|
|
667 {
|
|
668 kkcc_bt_stack_size *= 2;
|
|
669 kkcc_bt = (kkcc_bt_stack_entry *)
|
|
670 xrealloc (kkcc_bt, kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
|
|
671 if (!kkcc_bt)
|
|
672 {
|
|
673 stderr_out ("KKCC backtrace stack realloc failed for size %d\n",
|
|
674 kkcc_bt_stack_size);
|
|
675 ABORT ();
|
|
676 }
|
|
677 }
|
|
678
|
|
679 static void
|
|
680 kkcc_bt_free (void)
|
|
681 {
|
|
682 xfree_1 (kkcc_bt);
|
|
683 kkcc_bt = 0;
|
|
684 kkcc_bt_stack_size = 0;
|
|
685 }
|
|
686
|
|
687 static void
|
|
688 kkcc_bt_push (void *obj, const struct memory_description *desc,
|
|
689 int level, int pos)
|
|
690 {
|
|
691 kkcc_bt_depth = level;
|
|
692 kkcc_bt[kkcc_bt_depth].obj = obj;
|
|
693 kkcc_bt[kkcc_bt_depth].desc = desc;
|
|
694 kkcc_bt[kkcc_bt_depth].pos = pos;
|
|
695 kkcc_bt_depth++;
|
|
696 if (kkcc_bt_depth >= kkcc_bt_stack_size)
|
|
697 kkcc_bt_stack_realloc ();
|
|
698 }
|
|
699
|
|
700 #else /* not DEBUG_XEMACS */
|
|
701 #define kkcc_bt_init()
|
|
702 #define kkcc_bt_push(obj, desc, level, pos)
|
|
703 #endif /* not DEBUG_XEMACS */
|
|
704
|
|
705 /* Object memory descriptions are in the lrecord_implementation structure.
|
|
706 But copying them to a parallel array is much more cache-friendly. */
|
|
707 const struct memory_description *lrecord_memory_descriptions[countof (lrecord_implementations_table)];
|
|
708
|
|
709 /* the initial stack size in kkcc_gc_stack_entries */
|
|
710 #define KKCC_INIT_GC_STACK_SIZE 16384
|
|
711
|
|
712 typedef struct
|
|
713 {
|
|
714 void *data;
|
|
715 const struct memory_description *desc;
|
|
716 #ifdef DEBUG_XEMACS
|
|
717 int level;
|
|
718 int pos;
|
|
719 #endif
|
|
720 } kkcc_gc_stack_entry;
|
|
721
|
|
722
|
|
723 static kkcc_gc_stack_entry *kkcc_gc_stack_ptr;
|
|
724 static int kkcc_gc_stack_front;
|
|
725 static int kkcc_gc_stack_rear;
|
|
726 static int kkcc_gc_stack_size;
|
|
727
|
|
728 #define KKCC_INC(i) ((i + 1) % kkcc_gc_stack_size)
|
|
729 #define KKCC_INC2(i) ((i + 2) % kkcc_gc_stack_size)
|
|
730
|
|
731 #define KKCC_GC_STACK_FULL (KKCC_INC2 (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
|
|
732 #define KKCC_GC_STACK_EMPTY (KKCC_INC (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
|
|
733
|
|
734 static void
|
|
735 kkcc_gc_stack_init (void)
|
|
736 {
|
|
737 kkcc_gc_stack_size = KKCC_INIT_GC_STACK_SIZE;
|
|
738 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
|
|
739 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
|
|
740 if (!kkcc_gc_stack_ptr)
|
|
741 {
|
|
742 stderr_out ("stack init failed for size %d\n", kkcc_gc_stack_size);
|
|
743 ABORT ();
|
|
744 }
|
|
745 kkcc_gc_stack_front = 0;
|
|
746 kkcc_gc_stack_rear = kkcc_gc_stack_size - 1;
|
|
747 }
|
|
748
|
|
749 static void
|
|
750 kkcc_gc_stack_free (void)
|
|
751 {
|
|
752 xfree_1 (kkcc_gc_stack_ptr);
|
|
753 kkcc_gc_stack_ptr = 0;
|
|
754 kkcc_gc_stack_front = 0;
|
|
755 kkcc_gc_stack_rear = 0;
|
|
756 kkcc_gc_stack_size = 0;
|
|
757 }
|
|
758
|
|
759 static void
|
|
760 kkcc_gc_stack_realloc (void)
|
|
761 {
|
|
762 kkcc_gc_stack_entry *old_ptr = kkcc_gc_stack_ptr;
|
|
763 int old_size = kkcc_gc_stack_size;
|
|
764 kkcc_gc_stack_size *= 2;
|
|
765 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
|
|
766 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
|
|
767 if (!kkcc_gc_stack_ptr)
|
|
768 {
|
|
769 stderr_out ("stack realloc failed for size %d\n", kkcc_gc_stack_size);
|
|
770 ABORT ();
|
|
771 }
|
|
772 if (kkcc_gc_stack_rear >= kkcc_gc_stack_front)
|
|
773 {
|
|
774 int number_elements = kkcc_gc_stack_rear - kkcc_gc_stack_front + 1;
|
|
775 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
|
|
776 number_elements * sizeof (kkcc_gc_stack_entry));
|
|
777 kkcc_gc_stack_front = 0;
|
|
778 kkcc_gc_stack_rear = number_elements - 1;
|
|
779 }
|
|
780 else
|
|
781 {
|
|
782 int number_elements = old_size - kkcc_gc_stack_front;
|
|
783 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
|
|
784 number_elements * sizeof (kkcc_gc_stack_entry));
|
|
785 memcpy (&kkcc_gc_stack_ptr[number_elements], &old_ptr[0],
|
|
786 (kkcc_gc_stack_rear + 1) * sizeof (kkcc_gc_stack_entry));
|
|
787 kkcc_gc_stack_front = 0;
|
|
788 kkcc_gc_stack_rear = kkcc_gc_stack_rear + number_elements;
|
|
789 }
|
|
790 xfree_1 (old_ptr);
|
|
791 }
|
|
792
|
|
793 static void
|
|
794 #ifdef DEBUG_XEMACS
|
|
795 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc,
|
|
796 int level, int pos)
|
|
797 #else
|
|
798 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc)
|
|
799 #endif
|
|
800 {
|
|
801 #ifdef NEW_GC
|
|
802 GC_STAT_ENQUEUED;
|
|
803 #endif /* NEW_GC */
|
|
804 if (KKCC_GC_STACK_FULL)
|
|
805 kkcc_gc_stack_realloc();
|
|
806 kkcc_gc_stack_rear = KKCC_INC (kkcc_gc_stack_rear);
|
|
807 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].data = data;
|
|
808 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].desc = desc;
|
|
809 #ifdef DEBUG_XEMACS
|
|
810 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].level = level;
|
|
811 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].pos = pos;
|
|
812 #endif
|
|
813 }
|
|
814
|
|
815 #ifdef DEBUG_XEMACS
|
|
816 #define kkcc_gc_stack_push(data, desc, level, pos) \
|
|
817 kkcc_gc_stack_push_1 (data, desc, level, pos)
|
|
818 #else
|
|
819 #define kkcc_gc_stack_push(data, desc, level, pos) \
|
|
820 kkcc_gc_stack_push_1 (data, desc)
|
|
821 #endif
|
|
822
|
|
823 static kkcc_gc_stack_entry *
|
|
824 kkcc_gc_stack_pop (void)
|
|
825 {
|
|
826 if (KKCC_GC_STACK_EMPTY)
|
|
827 return 0;
|
|
828 #ifdef NEW_GC
|
|
829 GC_STAT_DEQUEUED;
|
|
830 #endif /* NEW_GC */
|
|
831 #ifndef KKCC_STACK_AS_QUEUE
|
|
832 /* stack behaviour */
|
|
833 return &kkcc_gc_stack_ptr[kkcc_gc_stack_rear--];
|
|
834 #else
|
|
835 /* queue behaviour */
|
|
836 {
|
|
837 int old_front = kkcc_gc_stack_front;
|
|
838 kkcc_gc_stack_front = KKCC_INC (kkcc_gc_stack_front);
|
|
839 return &kkcc_gc_stack_ptr[old_front];
|
|
840 }
|
|
841 #endif
|
|
842 }
|
|
843
|
|
844 void
|
|
845 #ifdef DEBUG_XEMACS
|
|
846 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj, int level, int pos)
|
|
847 #else
|
|
848 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj)
|
|
849 #endif
|
|
850 {
|
|
851 if (XTYPE (obj) == Lisp_Type_Record)
|
|
852 {
|
|
853 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
854 const struct memory_description *desc;
|
|
855 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
856 desc = RECORD_DESCRIPTION (lheader);
|
|
857 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
858 {
|
|
859 #ifdef NEW_GC
|
|
860 MARK_GREY (lheader);
|
|
861 #else /* not NEW_GC */
|
|
862 MARK_RECORD_HEADER (lheader);
|
|
863 #endif /* not NEW_GC */
|
|
864 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
|
|
865 }
|
|
866 }
|
|
867 }
|
|
868
|
|
869 #ifdef NEW_GC
|
|
870 #ifdef DEBUG_XEMACS
|
|
871 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
|
|
872 kkcc_gc_stack_push_lisp_object_1 (obj, level, pos)
|
|
873 #else
|
|
874 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
|
|
875 kkcc_gc_stack_push_lisp_object_1 (obj)
|
|
876 #endif
|
|
877
|
|
878 void
|
|
879 #ifdef DEBUG_XEMACS
|
|
880 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj, int level, int pos)
|
|
881 #else
|
|
882 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj)
|
|
883 #endif
|
|
884 {
|
|
885 if (XTYPE (obj) == Lisp_Type_Record)
|
|
886 {
|
|
887 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
888 const struct memory_description *desc;
|
|
889 GC_STAT_REPUSHED;
|
|
890 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
891 desc = RECORD_DESCRIPTION (lheader);
|
|
892 MARK_GREY (lheader);
|
|
893 kkcc_gc_stack_push ((void*) lheader, desc, level, pos);
|
|
894 }
|
|
895 }
|
|
896 #endif /* NEW_GC */
|
|
897
|
|
898 #ifdef ERROR_CHECK_GC
|
|
899 #define KKCC_DO_CHECK_FREE(obj, allow_free) \
|
|
900 do \
|
|
901 { \
|
|
902 if (!allow_free && XTYPE (obj) == Lisp_Type_Record) \
|
|
903 { \
|
|
904 struct lrecord_header *lheader = XRECORD_LHEADER (obj); \
|
|
905 GC_CHECK_NOT_FREE (lheader); \
|
|
906 } \
|
|
907 } while (0)
|
|
908 #else
|
|
909 #define KKCC_DO_CHECK_FREE(obj, allow_free)
|
|
910 #endif
|
|
911
|
|
912 #ifdef ERROR_CHECK_GC
|
|
913 #ifdef DEBUG_XEMACS
|
|
914 static void
|
|
915 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free,
|
|
916 int level, int pos)
|
|
917 #else
|
|
918 static void
|
|
919 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free)
|
|
920 #endif
|
|
921 {
|
|
922 KKCC_DO_CHECK_FREE (obj, allow_free);
|
|
923 kkcc_gc_stack_push_lisp_object (obj, level, pos);
|
|
924 }
|
|
925
|
|
926 #ifdef DEBUG_XEMACS
|
|
927 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
928 mark_object_maybe_checking_free_1 (obj, allow_free, level, pos)
|
|
929 #else
|
|
930 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
931 mark_object_maybe_checking_free_1 (obj, allow_free)
|
|
932 #endif
|
|
933 #else /* not ERROR_CHECK_GC */
|
|
934 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
|
|
935 kkcc_gc_stack_push_lisp_object (obj, level, pos)
|
|
936 #endif /* not ERROR_CHECK_GC */
|
|
937
|
|
938
|
|
939 /* This function loops all elements of a struct pointer and calls
|
|
940 mark_with_description with each element. */
|
|
941 static void
|
|
942 #ifdef DEBUG_XEMACS
|
|
943 mark_struct_contents_1 (const void *data,
|
|
944 const struct sized_memory_description *sdesc,
|
|
945 int count, int level, int pos)
|
|
946 #else
|
|
947 mark_struct_contents_1 (const void *data,
|
|
948 const struct sized_memory_description *sdesc,
|
|
949 int count)
|
|
950 #endif
|
|
951 {
|
|
952 int i;
|
|
953 Bytecount elsize;
|
|
954 elsize = lispdesc_block_size (data, sdesc);
|
|
955
|
|
956 for (i = 0; i < count; i++)
|
|
957 {
|
|
958 kkcc_gc_stack_push (((char *) data) + elsize * i, sdesc->description,
|
|
959 level, pos);
|
|
960 }
|
|
961 }
|
|
962
|
|
963 #ifdef DEBUG_XEMACS
|
|
964 #define mark_struct_contents(data, sdesc, count, level, pos) \
|
|
965 mark_struct_contents_1 (data, sdesc, count, level, pos)
|
|
966 #else
|
|
967 #define mark_struct_contents(data, sdesc, count, level, pos) \
|
|
968 mark_struct_contents_1 (data, sdesc, count)
|
|
969 #endif
|
|
970
|
|
971
|
|
972 #ifdef NEW_GC
|
|
973 /* This function loops all elements of a struct pointer and calls
|
|
974 mark_with_description with each element. */
|
|
975 static void
|
|
976 #ifdef DEBUG_XEMACS
|
|
977 mark_lisp_object_block_contents_1 (const void *data,
|
|
978 const struct sized_memory_description *sdesc,
|
|
979 int count, int level, int pos)
|
|
980 #else
|
|
981 mark_lisp_object_block_contents_1 (const void *data,
|
|
982 const struct sized_memory_description *sdesc,
|
|
983 int count)
|
|
984 #endif
|
|
985 {
|
|
986 int i;
|
|
987 Bytecount elsize;
|
|
988 elsize = lispdesc_block_size (data, sdesc);
|
|
989
|
|
990 for (i = 0; i < count; i++)
|
|
991 {
|
|
992 const Lisp_Object obj = wrap_pointer_1 (((char *) data) + elsize * i);
|
|
993 if (XTYPE (obj) == Lisp_Type_Record)
|
|
994 {
|
|
995 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
996 const struct memory_description *desc;
|
|
997 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
998 desc = sdesc->description;
|
|
999 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
1000 {
|
|
1001 MARK_GREY (lheader);
|
|
1002 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
|
|
1003 }
|
|
1004 }
|
|
1005 }
|
|
1006 }
|
|
1007
|
|
1008 #ifdef DEBUG_XEMACS
|
|
1009 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
|
|
1010 mark_lisp_object_block_contents_1 (data, sdesc, count, level, pos)
|
|
1011 #else
|
|
1012 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
|
|
1013 mark_lisp_object_block_contents_1 (data, sdesc, count)
|
|
1014 #endif
|
|
1015 #endif /* not NEW_GC */
|
|
1016
|
|
1017 /* This function implements the KKCC mark algorithm.
|
|
1018 Instead of calling mark_object, all the alive Lisp_Objects are pushed
|
|
1019 on the kkcc_gc_stack. This function processes all elements on the stack
|
|
1020 according to their descriptions. */
|
|
1021 static void
|
|
1022 kkcc_marking (
|
|
1023 #ifdef NEW_GC
|
|
1024 int cnt
|
|
1025 #else /* not NEW_GC */
|
|
1026 int UNUSED(cnt)
|
|
1027 #endif /* not NEW_GC */
|
|
1028 )
|
|
1029 {
|
|
1030 kkcc_gc_stack_entry *stack_entry = 0;
|
|
1031 void *data = 0;
|
|
1032 const struct memory_description *desc = 0;
|
|
1033 int pos;
|
|
1034 #ifdef NEW_GC
|
|
1035 int count = cnt;
|
|
1036 #endif /* NEW_GC */
|
|
1037 #ifdef DEBUG_XEMACS
|
|
1038 int level = 0;
|
|
1039 #endif
|
|
1040
|
|
1041 while ((stack_entry = kkcc_gc_stack_pop ()) != 0)
|
|
1042 {
|
|
1043 data = stack_entry->data;
|
|
1044 desc = stack_entry->desc;
|
|
1045 #ifdef DEBUG_XEMACS
|
|
1046 level = stack_entry->level + 1;
|
|
1047 #endif
|
|
1048 kkcc_bt_push (data, desc, stack_entry->level, stack_entry->pos);
|
|
1049
|
|
1050 #ifdef NEW_GC
|
|
1051 /* Mark black if object is currently grey. This first checks,
|
|
1052 if the object is really allocated on the mc-heap. If it is,
|
|
1053 it can be marked black; if it is not, it cannot be marked. */
|
|
1054 maybe_mark_black (data);
|
|
1055 #endif /* NEW_GC */
|
|
1056
|
|
1057 if (!data) continue;
|
|
1058
|
|
1059 gc_checking_assert (data);
|
|
1060 gc_checking_assert (desc);
|
|
1061
|
|
1062 for (pos = 0; desc[pos].type != XD_END; pos++)
|
|
1063 {
|
|
1064 const struct memory_description *desc1 = &desc[pos];
|
|
1065 const void *rdata =
|
|
1066 (const char *) data + lispdesc_indirect_count (desc1->offset,
|
|
1067 desc, data);
|
|
1068 union_switcheroo:
|
|
1069
|
|
1070 /* If the flag says don't mark, then don't mark. */
|
|
1071 if ((desc1->flags) & XD_FLAG_NO_KKCC)
|
|
1072 continue;
|
|
1073
|
|
1074 switch (desc1->type)
|
|
1075 {
|
|
1076 case XD_BYTECOUNT:
|
|
1077 case XD_ELEMCOUNT:
|
|
1078 case XD_HASHCODE:
|
|
1079 case XD_INT:
|
|
1080 case XD_LONG:
|
|
1081 case XD_INT_RESET:
|
|
1082 case XD_LO_LINK:
|
|
1083 case XD_OPAQUE_PTR:
|
|
1084 case XD_OPAQUE_DATA_PTR:
|
|
1085 case XD_ASCII_STRING:
|
|
1086 case XD_DOC_STRING:
|
|
1087 break;
|
|
1088 case XD_LISP_OBJECT:
|
|
1089 {
|
|
1090 const Lisp_Object *stored_obj = (const Lisp_Object *) rdata;
|
|
1091
|
|
1092 /* Because of the way that tagged objects work (pointers and
|
|
1093 Lisp_Objects have the same representation), XD_LISP_OBJECT
|
|
1094 can be used for untagged pointers. They might be NULL,
|
|
1095 though. */
|
|
1096 if (EQ (*stored_obj, Qnull_pointer))
|
|
1097 break;
|
3263
|
1098 #ifdef NEW_GC
|
3092
|
1099 mark_object_maybe_checking_free (*stored_obj, 0, level, pos);
|
3263
|
1100 #else /* not NEW_GC */
|
3092
|
1101 mark_object_maybe_checking_free
|
|
1102 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
|
|
1103 level, pos);
|
3263
|
1104 #endif /* not NEW_GC */
|
3092
|
1105 break;
|
|
1106 }
|
|
1107 case XD_LISP_OBJECT_ARRAY:
|
|
1108 {
|
|
1109 int i;
|
|
1110 EMACS_INT count =
|
|
1111 lispdesc_indirect_count (desc1->data1, desc, data);
|
|
1112
|
|
1113 for (i = 0; i < count; i++)
|
|
1114 {
|
|
1115 const Lisp_Object *stored_obj =
|
|
1116 (const Lisp_Object *) rdata + i;
|
|
1117
|
|
1118 if (EQ (*stored_obj, Qnull_pointer))
|
|
1119 break;
|
3263
|
1120 #ifdef NEW_GC
|
3092
|
1121 mark_object_maybe_checking_free
|
|
1122 (*stored_obj, 0, level, pos);
|
3263
|
1123 #else /* not NEW_GC */
|
3092
|
1124 mark_object_maybe_checking_free
|
|
1125 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
|
|
1126 level, pos);
|
3263
|
1127 #endif /* not NEW_GC */
|
3092
|
1128 }
|
|
1129 break;
|
|
1130 }
|
|
1131 #ifdef NEW_GC
|
|
1132 case XD_LISP_OBJECT_BLOCK_PTR:
|
|
1133 {
|
|
1134 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1135 data);
|
|
1136 const struct sized_memory_description *sdesc =
|
|
1137 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1138 const char *dobj = * (const char **) rdata;
|
|
1139 if (dobj)
|
|
1140 mark_lisp_object_block_contents
|
|
1141 (dobj, sdesc, count, level, pos);
|
|
1142 break;
|
|
1143 }
|
|
1144 #endif /* NEW_GC */
|
|
1145 case XD_BLOCK_PTR:
|
|
1146 {
|
|
1147 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1148 data);
|
|
1149 const struct sized_memory_description *sdesc =
|
|
1150 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1151 const char *dobj = * (const char **) rdata;
|
|
1152 if (dobj)
|
|
1153 mark_struct_contents (dobj, sdesc, count, level, pos);
|
|
1154 break;
|
|
1155 }
|
|
1156 case XD_BLOCK_ARRAY:
|
|
1157 {
|
|
1158 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
|
|
1159 data);
|
|
1160 const struct sized_memory_description *sdesc =
|
|
1161 lispdesc_indirect_description (data, desc1->data2.descr);
|
|
1162
|
|
1163 mark_struct_contents (rdata, sdesc, count, level, pos);
|
|
1164 break;
|
|
1165 }
|
|
1166 case XD_UNION:
|
|
1167 case XD_UNION_DYNAMIC_SIZE:
|
|
1168 desc1 = lispdesc_process_xd_union (desc1, desc, data);
|
|
1169 if (desc1)
|
|
1170 goto union_switcheroo;
|
|
1171 break;
|
|
1172
|
|
1173 default:
|
|
1174 stderr_out ("Unsupported description type : %d\n", desc1->type);
|
|
1175 kkcc_backtrace ();
|
|
1176 ABORT ();
|
|
1177 }
|
|
1178 }
|
|
1179
|
|
1180 #ifdef NEW_GC
|
|
1181 if (cnt)
|
|
1182 if (!--count)
|
|
1183 break;
|
|
1184 #endif /* NEW_GC */
|
|
1185 }
|
|
1186 }
|
|
1187 #endif /* USE_KKCC */
|
|
1188
|
|
1189 /* I hate duplicating all this crap! */
|
|
1190 int
|
|
1191 marked_p (Lisp_Object obj)
|
|
1192 {
|
|
1193 /* Checks we used to perform. */
|
|
1194 /* if (EQ (obj, Qnull_pointer)) return 1; */
|
|
1195 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return 1; */
|
|
1196 /* if (PURIFIED (XPNTR (obj))) return 1; */
|
|
1197
|
|
1198 if (XTYPE (obj) == Lisp_Type_Record)
|
|
1199 {
|
|
1200 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
1201
|
|
1202 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
1203
|
|
1204 return MARKED_RECORD_HEADER_P (lheader);
|
|
1205 }
|
|
1206 return 1;
|
|
1207 }
|
|
1208
|
|
1209
|
|
1210 /* Mark reference to a Lisp_Object. If the object referred to has not been
|
|
1211 seen yet, recursively mark all the references contained in it. */
|
|
1212 void
|
|
1213 mark_object (
|
|
1214 #ifdef USE_KKCC
|
|
1215 Lisp_Object UNUSED (obj)
|
|
1216 #else
|
|
1217 Lisp_Object obj
|
|
1218 #endif
|
|
1219 )
|
|
1220 {
|
|
1221 #ifdef USE_KKCC
|
|
1222 /* this code should never be reached when configured for KKCC */
|
|
1223 stderr_out ("KKCC: Invalid mark_object call.\n");
|
|
1224 stderr_out ("Replace mark_object with kkcc_gc_stack_push_lisp_object.\n");
|
|
1225 ABORT ();
|
|
1226 #else /* not USE_KKCC */
|
|
1227
|
|
1228 tail_recurse:
|
|
1229
|
|
1230 /* Checks we used to perform */
|
|
1231 /* if (EQ (obj, Qnull_pointer)) return; */
|
|
1232 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return; */
|
|
1233 /* if (PURIFIED (XPNTR (obj))) return; */
|
|
1234
|
|
1235 if (XTYPE (obj) == Lisp_Type_Record)
|
|
1236 {
|
|
1237 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
|
|
1238
|
|
1239 GC_CHECK_LHEADER_INVARIANTS (lheader);
|
|
1240
|
|
1241 /* We handle this separately, above, so we can mark free objects */
|
|
1242 GC_CHECK_NOT_FREE (lheader);
|
|
1243
|
|
1244 /* All c_readonly objects have their mark bit set,
|
|
1245 so that we only need to check the mark bit here. */
|
|
1246 if (! MARKED_RECORD_HEADER_P (lheader))
|
|
1247 {
|
|
1248 MARK_RECORD_HEADER (lheader);
|
|
1249
|
|
1250 if (RECORD_MARKER (lheader))
|
|
1251 {
|
|
1252 obj = RECORD_MARKER (lheader) (obj);
|
|
1253 if (!NILP (obj)) goto tail_recurse;
|
|
1254 }
|
|
1255 }
|
|
1256 }
|
|
1257 #endif /* not KKCC */
|
|
1258 }
|
|
1259
|
|
1260
|
|
1261 /************************************************************************/
|
|
1262 /* Hooks */
|
|
1263 /************************************************************************/
|
|
1264
|
|
1265 /* Nonzero when calling certain hooks or doing other things where a GC
|
|
1266 would be bad. It prevents infinite recursive calls to gc. */
|
|
1267 int gc_currently_forbidden;
|
|
1268
|
|
1269 int
|
|
1270 begin_gc_forbidden (void)
|
|
1271 {
|
|
1272 return internal_bind_int (&gc_currently_forbidden, 1);
|
|
1273 }
|
|
1274
|
|
1275 void
|
|
1276 end_gc_forbidden (int count)
|
|
1277 {
|
|
1278 unbind_to (count);
|
|
1279 }
|
|
1280
|
|
1281 /* Hooks. */
|
|
1282 Lisp_Object Vpre_gc_hook, Qpre_gc_hook;
|
|
1283 Lisp_Object Vpost_gc_hook, Qpost_gc_hook;
|
|
1284
|
|
1285 /* Maybe we want to use this when doing a "panic" gc after memory_full()? */
|
|
1286 static int gc_hooks_inhibited;
|
|
1287
|
|
1288 struct post_gc_action
|
|
1289 {
|
|
1290 void (*fun) (void *);
|
|
1291 void *arg;
|
|
1292 };
|
|
1293
|
|
1294 typedef struct post_gc_action post_gc_action;
|
|
1295
|
|
1296 typedef struct
|
|
1297 {
|
|
1298 Dynarr_declare (post_gc_action);
|
|
1299 } post_gc_action_dynarr;
|
|
1300
|
|
1301 static post_gc_action_dynarr *post_gc_actions;
|
|
1302
|
|
1303 /* Register an action to be called at the end of GC.
|
|
1304 gc_in_progress is 0 when this is called.
|
|
1305 This is used when it is discovered that an action needs to be taken,
|
|
1306 but it's during GC, so it's not safe. (e.g. in a finalize method.)
|
|
1307
|
|
1308 As a general rule, do not use Lisp objects here.
|
|
1309 And NEVER signal an error.
|
|
1310 */
|
|
1311
|
|
1312 void
|
|
1313 register_post_gc_action (void (*fun) (void *), void *arg)
|
|
1314 {
|
|
1315 post_gc_action action;
|
|
1316
|
|
1317 if (!post_gc_actions)
|
|
1318 post_gc_actions = Dynarr_new (post_gc_action);
|
|
1319
|
|
1320 action.fun = fun;
|
|
1321 action.arg = arg;
|
|
1322
|
|
1323 Dynarr_add (post_gc_actions, action);
|
|
1324 }
|
|
1325
|
|
1326 static void
|
|
1327 run_post_gc_actions (void)
|
|
1328 {
|
|
1329 int i;
|
|
1330
|
|
1331 if (post_gc_actions)
|
|
1332 {
|
|
1333 for (i = 0; i < Dynarr_length (post_gc_actions); i++)
|
|
1334 {
|
|
1335 post_gc_action action = Dynarr_at (post_gc_actions, i);
|
|
1336 (action.fun) (action.arg);
|
|
1337 }
|
|
1338
|
|
1339 Dynarr_reset (post_gc_actions);
|
|
1340 }
|
|
1341 }
|
|
1342
|
3263
|
1343 #ifdef NEW_GC
|
|
1344 /* Asynchronous finalization. */
|
|
1345 typedef struct finalize_elem
|
|
1346 {
|
|
1347 Lisp_Object obj;
|
|
1348 struct finalize_elem *next;
|
|
1349 } finalize_elem;
|
|
1350
|
|
1351 finalize_elem *Vall_finalizable_objs;
|
|
1352 Lisp_Object Vfinalizers_to_run;
|
|
1353
|
|
1354 void
|
|
1355 add_finalizable_obj (Lisp_Object obj)
|
|
1356 {
|
|
1357 finalize_elem *next = Vall_finalizable_objs;
|
|
1358 Vall_finalizable_objs =
|
|
1359 (finalize_elem *) xmalloc_and_zero (sizeof (finalize_elem));
|
|
1360 Vall_finalizable_objs->obj = obj;
|
|
1361 Vall_finalizable_objs->next = next;
|
|
1362 }
|
|
1363
|
|
1364 void
|
|
1365 register_for_finalization (void)
|
|
1366 {
|
|
1367 finalize_elem *rest = Vall_finalizable_objs;
|
|
1368
|
|
1369 if (!rest)
|
|
1370 return;
|
|
1371
|
|
1372 while (!marked_p (rest->obj))
|
|
1373 {
|
|
1374 finalize_elem *temp = rest;
|
|
1375 Vfinalizers_to_run = Fcons (rest->obj, Vfinalizers_to_run);
|
|
1376 Vall_finalizable_objs = rest->next;
|
|
1377 xfree (temp, finalize_elem *);
|
|
1378 rest = Vall_finalizable_objs;
|
|
1379 }
|
|
1380
|
|
1381 while (rest->next)
|
|
1382 {
|
|
1383 if (LRECORDP (rest->next->obj)
|
|
1384 && !marked_p (rest->next->obj))
|
|
1385 {
|
|
1386 finalize_elem *temp = rest->next;
|
|
1387 Vfinalizers_to_run = Fcons (rest->next->obj, Vfinalizers_to_run);
|
|
1388 rest->next = rest->next->next;
|
|
1389 xfree (temp, finalize_elem *);
|
|
1390 }
|
|
1391 else
|
|
1392 {
|
|
1393 rest = rest->next;
|
|
1394 }
|
|
1395 }
|
|
1396 /* Keep objects alive that need to be finalized by marking
|
|
1397 Vfinalizers_to_run transitively. */
|
|
1398 kkcc_gc_stack_push_lisp_object (Vfinalizers_to_run, 0, -1);
|
|
1399 kkcc_marking (0);
|
|
1400 }
|
|
1401
|
|
1402 void
|
|
1403 run_finalizers (void)
|
|
1404 {
|
|
1405 Lisp_Object rest;
|
|
1406 for (rest = Vfinalizers_to_run; !NILP (rest); rest = XCDR (rest))
|
|
1407 {
|
|
1408 MC_ALLOC_CALL_FINALIZER (XPNTR (XCAR (rest)));
|
|
1409 }
|
|
1410 Vfinalizers_to_run = Qnil;
|
|
1411 }
|
|
1412 #endif /* not NEW_GC */
|
3092
|
1413
|
|
1414
|
|
1415 /************************************************************************/
|
|
1416 /* Garbage Collection */
|
|
1417 /************************************************************************/
|
|
1418
|
|
1419 /* Enable/disable incremental garbage collection during runtime. */
|
|
1420 int allow_incremental_gc;
|
|
1421
|
|
1422 /* For profiling. */
|
|
1423 static Lisp_Object QSin_garbage_collection;
|
|
1424
|
|
1425 /* Nonzero means display messages at beginning and end of GC. */
|
|
1426 int garbage_collection_messages;
|
|
1427
|
|
1428 /* "Garbage collecting" */
|
|
1429 Lisp_Object Vgc_message;
|
|
1430 Lisp_Object Vgc_pointer_glyph;
|
|
1431 static const Ascbyte gc_default_message[] = "Garbage collecting";
|
|
1432 Lisp_Object Qgarbage_collecting;
|
|
1433
|
|
1434 /* "Locals" during GC. */
|
|
1435 struct frame *f;
|
|
1436 int speccount;
|
|
1437 int cursor_changed;
|
|
1438 Lisp_Object pre_gc_cursor;
|
|
1439
|
|
1440 /* PROFILE_DECLARE */
|
|
1441 int do_backtrace;
|
|
1442 struct backtrace backtrace;
|
|
1443
|
|
1444 /* Maximum amount of C stack to save when a GC happens. */
|
|
1445 #ifndef MAX_SAVE_STACK
|
|
1446 #define MAX_SAVE_STACK 0 /* 16000 */
|
|
1447 #endif
|
|
1448
|
|
1449 void
|
3267
|
1450 show_gc_cursor_and_message (void)
|
3092
|
1451 {
|
3267
|
1452 /* Now show the GC cursor/message. */
|
|
1453 pre_gc_cursor = Qnil;
|
|
1454 cursor_changed = 0;
|
3092
|
1455
|
|
1456 /* We used to call selected_frame() here.
|
|
1457
|
|
1458 The following functions cannot be called inside GC
|
|
1459 so we move to after the above tests. */
|
|
1460 {
|
|
1461 Lisp_Object frame;
|
|
1462 Lisp_Object device = Fselected_device (Qnil);
|
|
1463 if (NILP (device)) /* Could happen during startup, eg. if always_gc */
|
|
1464 return;
|
|
1465 frame = Fselected_frame (device);
|
|
1466 if (NILP (frame))
|
|
1467 invalid_state ("No frames exist on device", device);
|
|
1468 f = XFRAME (frame);
|
|
1469 }
|
|
1470
|
|
1471 if (!noninteractive)
|
|
1472 {
|
|
1473 if (FRAME_WIN_P (f))
|
|
1474 {
|
|
1475 Lisp_Object frame = wrap_frame (f);
|
|
1476 Lisp_Object cursor = glyph_image_instance (Vgc_pointer_glyph,
|
|
1477 FRAME_SELECTED_WINDOW (f),
|
|
1478 ERROR_ME_NOT, 1);
|
|
1479 pre_gc_cursor = f->pointer;
|
|
1480 if (POINTER_IMAGE_INSTANCEP (cursor)
|
|
1481 /* don't change if we don't know how to change back. */
|
|
1482 && POINTER_IMAGE_INSTANCEP (pre_gc_cursor))
|
|
1483 {
|
|
1484 cursor_changed = 1;
|
|
1485 Fset_frame_pointer (frame, cursor);
|
|
1486 }
|
|
1487 }
|
|
1488
|
|
1489 /* Don't print messages to the stream device. */
|
|
1490 if (!cursor_changed && !FRAME_STREAM_P (f))
|
|
1491 {
|
|
1492 if (garbage_collection_messages)
|
|
1493 {
|
|
1494 Lisp_Object args[2], whole_msg;
|
|
1495 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
|
|
1496 build_msg_string (gc_default_message));
|
|
1497 args[1] = build_string ("...");
|
|
1498 whole_msg = Fconcat (2, args);
|
|
1499 echo_area_message (f, (Ibyte *) 0, whole_msg, 0, -1,
|
|
1500 Qgarbage_collecting);
|
|
1501 }
|
|
1502 }
|
|
1503 }
|
3267
|
1504 }
|
|
1505
|
|
1506 void
|
|
1507 remove_gc_cursor_and_message (void)
|
|
1508 {
|
|
1509 /* Now remove the GC cursor/message */
|
|
1510 if (!noninteractive)
|
|
1511 {
|
|
1512 if (cursor_changed)
|
|
1513 Fset_frame_pointer (wrap_frame (f), pre_gc_cursor);
|
|
1514 else if (!FRAME_STREAM_P (f))
|
|
1515 {
|
|
1516 /* Show "...done" only if the echo area would otherwise be empty. */
|
|
1517 if (NILP (clear_echo_area (selected_frame (),
|
|
1518 Qgarbage_collecting, 0)))
|
|
1519 {
|
|
1520 if (garbage_collection_messages)
|
|
1521 {
|
|
1522 Lisp_Object args[2], whole_msg;
|
|
1523 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
|
|
1524 build_msg_string (gc_default_message));
|
|
1525 args[1] = build_msg_string ("... done");
|
|
1526 whole_msg = Fconcat (2, args);
|
|
1527 echo_area_message (selected_frame (), (Ibyte *) 0,
|
|
1528 whole_msg, 0, -1,
|
|
1529 Qgarbage_collecting);
|
|
1530 }
|
|
1531 }
|
|
1532 }
|
|
1533 }
|
|
1534 }
|
|
1535
|
|
1536 void
|
|
1537 gc_prepare (void)
|
|
1538 {
|
|
1539 #if MAX_SAVE_STACK > 0
|
|
1540 char stack_top_variable;
|
|
1541 extern char *stack_bottom;
|
|
1542 #endif
|
|
1543
|
|
1544 #ifdef NEW_GC
|
|
1545 GC_STAT_START_NEW_GC;
|
|
1546 GC_SET_PHASE (INIT_GC);
|
|
1547 #endif /* NEW_GC */
|
|
1548
|
|
1549 do_backtrace = profiling_active || backtrace_with_internal_sections;
|
|
1550
|
|
1551 assert (!gc_in_progress);
|
|
1552 assert (!in_display || gc_currently_forbidden);
|
|
1553
|
|
1554 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
|
|
1555
|
|
1556 need_to_signal_post_gc = 0;
|
|
1557 recompute_funcall_allocation_flag ();
|
|
1558
|
|
1559 if (!gc_hooks_inhibited)
|
|
1560 run_hook_trapping_problems
|
|
1561 (Qgarbage_collecting, Qpre_gc_hook,
|
|
1562 INHIBIT_EXISTING_PERMANENT_DISPLAY_OBJECT_DELETION);
|
3092
|
1563
|
|
1564 /***** Now we actually start the garbage collection. */
|
|
1565
|
|
1566 gc_in_progress = 1;
|
|
1567 #ifndef NEW_GC
|
|
1568 inhibit_non_essential_conversion_operations = 1;
|
3263
|
1569 #endif /* not NEW_GC */
|
3092
|
1570
|
|
1571 #if MAX_SAVE_STACK > 0
|
|
1572
|
|
1573 /* Save a copy of the contents of the stack, for debugging. */
|
|
1574 if (!purify_flag)
|
|
1575 {
|
|
1576 /* Static buffer in which we save a copy of the C stack at each GC. */
|
|
1577 static char *stack_copy;
|
|
1578 static Bytecount stack_copy_size;
|
|
1579
|
|
1580 ptrdiff_t stack_diff = &stack_top_variable - stack_bottom;
|
|
1581 Bytecount stack_size = (stack_diff > 0 ? stack_diff : -stack_diff);
|
|
1582 if (stack_size < MAX_SAVE_STACK)
|
|
1583 {
|
|
1584 if (stack_copy_size < stack_size)
|
|
1585 {
|
|
1586 stack_copy = (char *) xrealloc (stack_copy, stack_size);
|
|
1587 stack_copy_size = stack_size;
|
|
1588 }
|
|
1589
|
|
1590 memcpy (stack_copy,
|
|
1591 stack_diff > 0 ? stack_bottom : &stack_top_variable,
|
|
1592 stack_size);
|
|
1593 }
|
|
1594 }
|
|
1595 #endif /* MAX_SAVE_STACK > 0 */
|
|
1596
|
|
1597 /* Do some totally ad-hoc resource clearing. */
|
|
1598 /* #### generalize this? */
|
|
1599 clear_event_resource ();
|
|
1600 cleanup_specifiers ();
|
|
1601 cleanup_buffer_undo_lists ();
|
|
1602 }
|
|
1603
|
|
1604 void
|
|
1605 gc_mark_root_set (
|
|
1606 #ifdef NEW_GC
|
|
1607 enum gc_phase phase
|
|
1608 #else /* not NEW_GC */
|
|
1609 void
|
|
1610 #endif /* not NEW_GC */
|
|
1611 )
|
|
1612 {
|
|
1613 #ifdef NEW_GC
|
|
1614 GC_SET_PHASE (phase);
|
|
1615 #endif /* NEW_GC */
|
|
1616
|
|
1617 /* Mark all the special slots that serve as the roots of accessibility. */
|
|
1618
|
|
1619 #ifdef USE_KKCC
|
|
1620 # define mark_object(obj) kkcc_gc_stack_push_lisp_object (obj, 0, -1)
|
|
1621 #endif /* USE_KKCC */
|
|
1622
|
|
1623 { /* staticpro() */
|
|
1624 Lisp_Object **p = Dynarr_begin (staticpros);
|
|
1625 Elemcount count;
|
3486
|
1626 for (count = Dynarr_length (staticpros); count; count--, p++)
|
3092
|
1627 /* Need to check if the pointer in the staticpro array is not
|
|
1628 NULL. A gc can occur after variable is added to the staticpro
|
|
1629 array and _before_ it is correctly initialized. In this case
|
|
1630 its value is NULL, which we have to catch here. */
|
|
1631 if (*p)
|
3486
|
1632 mark_object (**p);
|
3092
|
1633 }
|
|
1634
|
|
1635 { /* staticpro_nodump() */
|
|
1636 Lisp_Object **p = Dynarr_begin (staticpros_nodump);
|
|
1637 Elemcount count;
|
3486
|
1638 for (count = Dynarr_length (staticpros_nodump); count; count--, p++)
|
3092
|
1639 /* Need to check if the pointer in the staticpro array is not
|
|
1640 NULL. A gc can occur after variable is added to the staticpro
|
|
1641 array and _before_ it is correctly initialized. In this case
|
|
1642 its value is NULL, which we have to catch here. */
|
|
1643 if (*p)
|
3486
|
1644 mark_object (**p);
|
3092
|
1645 }
|
|
1646
|
3263
|
1647 #ifdef NEW_GC
|
3092
|
1648 { /* mcpro () */
|
|
1649 Lisp_Object *p = Dynarr_begin (mcpros);
|
|
1650 Elemcount count;
|
|
1651 for (count = Dynarr_length (mcpros); count; count--)
|
|
1652 mark_object (*p++);
|
|
1653 }
|
3263
|
1654 #endif /* NEW_GC */
|
3092
|
1655
|
|
1656 { /* GCPRO() */
|
|
1657 struct gcpro *tail;
|
|
1658 int i;
|
|
1659 for (tail = gcprolist; tail; tail = tail->next)
|
|
1660 for (i = 0; i < tail->nvars; i++)
|
|
1661 mark_object (tail->var[i]);
|
|
1662 }
|
|
1663
|
|
1664 { /* specbind() */
|
|
1665 struct specbinding *bind;
|
|
1666 for (bind = specpdl; bind != specpdl_ptr; bind++)
|
|
1667 {
|
|
1668 mark_object (bind->symbol);
|
|
1669 mark_object (bind->old_value);
|
|
1670 }
|
|
1671 }
|
|
1672
|
|
1673 {
|
|
1674 struct catchtag *c;
|
|
1675 for (c = catchlist; c; c = c->next)
|
|
1676 {
|
|
1677 mark_object (c->tag);
|
|
1678 mark_object (c->val);
|
|
1679 mark_object (c->actual_tag);
|
|
1680 mark_object (c->backtrace);
|
|
1681 }
|
|
1682 }
|
|
1683
|
|
1684 {
|
|
1685 struct backtrace *backlist;
|
|
1686 for (backlist = backtrace_list; backlist; backlist = backlist->next)
|
|
1687 {
|
|
1688 int nargs = backlist->nargs;
|
|
1689 int i;
|
|
1690
|
|
1691 mark_object (*backlist->function);
|
|
1692 if (nargs < 0 /* nargs == UNEVALLED || nargs == MANY */
|
|
1693 /* might be fake (internal profiling entry) */
|
|
1694 && backlist->args)
|
|
1695 mark_object (backlist->args[0]);
|
|
1696 else
|
|
1697 for (i = 0; i < nargs; i++)
|
|
1698 mark_object (backlist->args[i]);
|
|
1699 }
|
|
1700 }
|
|
1701
|
|
1702 mark_profiling_info ();
|
|
1703 #ifdef USE_KKCC
|
|
1704 # undef mark_object
|
|
1705 #endif
|
|
1706 }
|
|
1707
|
|
1708 void
|
|
1709 gc_finish_mark (void)
|
|
1710 {
|
|
1711 #ifdef NEW_GC
|
|
1712 GC_SET_PHASE (FINISH_MARK);
|
|
1713 #endif /* NEW_GC */
|
|
1714 init_marking_ephemerons ();
|
|
1715
|
|
1716 while (finish_marking_weak_hash_tables () > 0 ||
|
|
1717 finish_marking_weak_lists () > 0 ||
|
|
1718 continue_marking_ephemerons () > 0)
|
|
1719 #ifdef USE_KKCC
|
|
1720 {
|
|
1721 kkcc_marking (0);
|
|
1722 }
|
|
1723 #else /* not USE_KKCC */
|
|
1724 ;
|
|
1725 #endif /* not USE_KKCC */
|
|
1726
|
|
1727 /* At this point, we know which objects need to be finalized: we
|
|
1728 still need to resurrect them */
|
|
1729
|
|
1730 while (finish_marking_ephemerons () > 0 ||
|
|
1731 finish_marking_weak_lists () > 0 ||
|
|
1732 finish_marking_weak_hash_tables () > 0)
|
|
1733 #ifdef USE_KKCC
|
|
1734 {
|
|
1735 kkcc_marking (0);
|
|
1736 }
|
|
1737 #else /* not USE_KKCC */
|
|
1738 ;
|
|
1739 #endif /* not USE_KKCC */
|
|
1740
|
|
1741 /* And prune (this needs to be called after everything else has been
|
|
1742 marked and before we do any sweeping). */
|
|
1743 /* #### this is somewhat ad-hoc and should probably be an object
|
|
1744 method */
|
|
1745 prune_weak_hash_tables ();
|
|
1746 prune_weak_lists ();
|
|
1747 prune_specifiers ();
|
|
1748 prune_syntax_tables ();
|
|
1749
|
|
1750 prune_ephemerons ();
|
|
1751 prune_weak_boxes ();
|
|
1752 }
|
|
1753
|
|
1754 #ifdef NEW_GC
|
|
1755 void
|
|
1756 gc_finalize (void)
|
|
1757 {
|
|
1758 GC_SET_PHASE (FINALIZE);
|
3263
|
1759 register_for_finalization ();
|
3092
|
1760 }
|
|
1761
|
|
1762 void
|
|
1763 gc_sweep (void)
|
|
1764 {
|
|
1765 GC_SET_PHASE (SWEEP);
|
|
1766 mc_sweep ();
|
|
1767 }
|
|
1768 #endif /* NEW_GC */
|
|
1769
|
|
1770
|
|
1771 void
|
|
1772 gc_finish (void)
|
|
1773 {
|
|
1774 #ifdef NEW_GC
|
|
1775 GC_SET_PHASE (FINISH_GC);
|
|
1776 #endif /* NEW_GC */
|
|
1777 consing_since_gc = 0;
|
|
1778 #ifndef DEBUG_XEMACS
|
|
1779 /* Allow you to set it really fucking low if you really want ... */
|
|
1780 if (gc_cons_threshold < 10000)
|
|
1781 gc_cons_threshold = 10000;
|
|
1782 #endif
|
|
1783 recompute_need_to_garbage_collect ();
|
|
1784
|
|
1785 #ifndef NEW_GC
|
|
1786 inhibit_non_essential_conversion_operations = 0;
|
|
1787 #endif /* not NEW_GC */
|
|
1788 gc_in_progress = 0;
|
|
1789
|
|
1790 run_post_gc_actions ();
|
|
1791
|
|
1792 /******* End of garbage collection ********/
|
|
1793
|
3263
|
1794 #ifndef NEW_GC
|
3092
|
1795 if (!breathing_space)
|
|
1796 {
|
|
1797 breathing_space = malloc (4096 - MALLOC_OVERHEAD);
|
|
1798 }
|
3263
|
1799 #endif /* not NEW_GC */
|
3092
|
1800
|
|
1801 need_to_signal_post_gc = 1;
|
|
1802 funcall_allocation_flag = 1;
|
|
1803
|
|
1804 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
|
|
1805
|
|
1806 #ifdef NEW_GC
|
|
1807 GC_SET_PHASE (NONE);
|
|
1808 #endif /* NEW_GC */
|
|
1809 }
|
|
1810
|
|
1811 #ifdef NEW_GC
|
|
1812 void
|
|
1813 gc_suspend_mark_phase (void)
|
|
1814 {
|
|
1815 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
|
|
1816 write_barrier_enabled = 1;
|
|
1817 consing_since_gc = 0;
|
|
1818 vdb_start_dirty_bits_recording ();
|
|
1819 }
|
|
1820
|
|
1821 int
|
|
1822 gc_resume_mark_phase (void)
|
|
1823 {
|
|
1824 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
|
|
1825 assert (write_barrier_enabled);
|
|
1826 vdb_stop_dirty_bits_recording ();
|
|
1827 write_barrier_enabled = 0;
|
|
1828 return vdb_read_dirty_bits ();
|
|
1829 }
|
|
1830
|
|
1831 int
|
|
1832 gc_mark (int incremental)
|
|
1833 {
|
|
1834 GC_SET_PHASE (MARK);
|
|
1835 if (!incremental)
|
|
1836 {
|
|
1837 kkcc_marking (0);
|
|
1838 }
|
|
1839 else
|
|
1840 {
|
|
1841 kkcc_marking (gc_incremental_traversal_threshold);
|
|
1842 if (!KKCC_GC_STACK_EMPTY)
|
|
1843 {
|
|
1844 gc_suspend_mark_phase ();
|
|
1845 return 0;
|
|
1846 }
|
|
1847 }
|
|
1848 return 1;
|
|
1849 }
|
|
1850
|
|
1851 int
|
|
1852 gc_resume_mark (int incremental)
|
|
1853 {
|
|
1854 if (!incremental)
|
|
1855 {
|
|
1856 if (!KKCC_GC_STACK_EMPTY)
|
|
1857 {
|
|
1858 GC_STAT_RESUME_GC;
|
|
1859 /* An incremental garbage collection is already running ---
|
|
1860 now wrap it up and resume it atomically. */
|
|
1861 gc_resume_mark_phase ();
|
|
1862 gc_mark_root_set (REPUSH_ROOT_SET);
|
|
1863 kkcc_marking (0);
|
|
1864 }
|
|
1865 }
|
|
1866 else
|
|
1867 {
|
|
1868 int repushed_objects;
|
|
1869 int mark_work;
|
|
1870 GC_STAT_RESUME_GC;
|
|
1871 repushed_objects = gc_resume_mark_phase ();
|
|
1872 mark_work = (gc_incremental_traversal_threshold > repushed_objects) ?
|
|
1873 gc_incremental_traversal_threshold : repushed_objects;
|
|
1874 kkcc_marking (mark_work);
|
|
1875 if (KKCC_GC_STACK_EMPTY)
|
|
1876 {
|
|
1877 /* Mark root set again and finish up marking. */
|
|
1878 gc_mark_root_set (REPUSH_ROOT_SET);
|
|
1879 kkcc_marking (0);
|
|
1880 }
|
|
1881 else
|
|
1882 {
|
|
1883 gc_suspend_mark_phase ();
|
|
1884 return 0;
|
|
1885 }
|
|
1886 }
|
|
1887 return 1;
|
|
1888 }
|
|
1889
|
|
1890
|
|
1891 void
|
|
1892 gc_1 (int incremental)
|
|
1893 {
|
|
1894 switch (GC_PHASE)
|
|
1895 {
|
|
1896 case NONE:
|
|
1897 gc_prepare ();
|
|
1898 kkcc_gc_stack_init();
|
|
1899 #ifdef DEBUG_XEMACS
|
|
1900 kkcc_bt_init ();
|
|
1901 #endif
|
|
1902 case INIT_GC:
|
|
1903 gc_mark_root_set (PUSH_ROOT_SET);
|
|
1904 case PUSH_ROOT_SET:
|
|
1905 if (!gc_mark (incremental))
|
|
1906 return; /* suspend gc */
|
|
1907 case MARK:
|
|
1908 if (!KKCC_GC_STACK_EMPTY)
|
|
1909 if (!gc_resume_mark (incremental))
|
|
1910 return; /* suspend gc */
|
|
1911 gc_finish_mark ();
|
3263
|
1912 case FINISH_MARK:
|
|
1913 gc_finalize ();
|
3092
|
1914 kkcc_gc_stack_free ();
|
|
1915 #ifdef DEBUG_XEMACS
|
|
1916 kkcc_bt_free ();
|
|
1917 #endif
|
|
1918 case FINALIZE:
|
|
1919 gc_sweep ();
|
|
1920 case SWEEP:
|
|
1921 gc_finish ();
|
|
1922 case FINISH_GC:
|
|
1923 break;
|
|
1924 }
|
|
1925 }
|
|
1926
|
|
1927 void gc (int incremental)
|
|
1928 {
|
|
1929 if (gc_currently_forbidden
|
|
1930 || in_display
|
|
1931 || preparing_for_armageddon)
|
|
1932 return;
|
|
1933
|
|
1934 /* Very important to prevent GC during any of the following
|
|
1935 stuff that might run Lisp code; otherwise, we'll likely
|
|
1936 have infinite GC recursion. */
|
|
1937 speccount = begin_gc_forbidden ();
|
|
1938
|
3267
|
1939 show_gc_cursor_and_message ();
|
|
1940
|
3092
|
1941 gc_1 (incremental);
|
|
1942
|
3267
|
1943 remove_gc_cursor_and_message ();
|
|
1944
|
3092
|
1945 /* now stop inhibiting GC */
|
|
1946 unbind_to (speccount);
|
|
1947 }
|
|
1948
|
|
1949 void
|
|
1950 gc_full (void)
|
|
1951 {
|
|
1952 gc (0);
|
|
1953 }
|
|
1954
|
|
1955 DEFUN ("gc-full", Fgc_full, 0, 0, "", /*
|
|
1956 This function performs a full garbage collection. If an incremental
|
|
1957 garbage collection is already running, it completes without any
|
|
1958 further interruption. This function guarantees that unused objects
|
|
1959 are freed when it returns. Garbage collection happens automatically if
|
|
1960 the client allocates more than `gc-cons-threshold' bytes of Lisp data
|
|
1961 since the previous garbage collection.
|
|
1962 */
|
|
1963 ())
|
|
1964 {
|
|
1965 gc_full ();
|
|
1966 return Qt;
|
|
1967 }
|
|
1968
|
|
1969 void
|
|
1970 gc_incremental (void)
|
|
1971 {
|
|
1972 gc (allow_incremental_gc);
|
|
1973 }
|
|
1974
|
|
1975 DEFUN ("gc-incremental", Fgc_incremental, 0, 0, "", /*
|
|
1976 This function starts an incremental garbage collection. If an
|
|
1977 incremental garbage collection is already running, the next cycle
|
|
1978 starts. Note that this function has not necessarily freed any memory
|
|
1979 when it returns. This function only guarantees, that the traversal of
|
|
1980 the heap makes progress. The next cycle of incremental garbage
|
|
1981 collection happens automatically if the client allocates more than
|
|
1982 `gc-incremental-cons-threshold' bytes of Lisp data since previous
|
|
1983 garbage collection.
|
|
1984 */
|
|
1985 ())
|
|
1986 {
|
|
1987 gc_incremental ();
|
|
1988 return Qt;
|
|
1989 }
|
|
1990 #else /* not NEW_GC */
|
|
1991 void garbage_collect_1 (void)
|
|
1992 {
|
|
1993 if (gc_in_progress
|
|
1994 || gc_currently_forbidden
|
|
1995 || in_display
|
|
1996 || preparing_for_armageddon)
|
|
1997 return;
|
|
1998
|
|
1999 /* Very important to prevent GC during any of the following
|
|
2000 stuff that might run Lisp code; otherwise, we'll likely
|
|
2001 have infinite GC recursion. */
|
|
2002 speccount = begin_gc_forbidden ();
|
|
2003
|
3267
|
2004 show_gc_cursor_and_message ();
|
|
2005
|
3092
|
2006 gc_prepare ();
|
|
2007 #ifdef USE_KKCC
|
|
2008 kkcc_gc_stack_init();
|
|
2009 #ifdef DEBUG_XEMACS
|
|
2010 kkcc_bt_init ();
|
|
2011 #endif
|
|
2012 #endif /* USE_KKCC */
|
|
2013 gc_mark_root_set ();
|
|
2014 #ifdef USE_KKCC
|
|
2015 kkcc_marking (0);
|
|
2016 #endif /* USE_KKCC */
|
|
2017 gc_finish_mark ();
|
|
2018 #ifdef USE_KKCC
|
|
2019 kkcc_gc_stack_free ();
|
|
2020 #ifdef DEBUG_XEMACS
|
|
2021 kkcc_bt_free ();
|
|
2022 #endif
|
|
2023 #endif /* USE_KKCC */
|
|
2024 gc_sweep_1 ();
|
|
2025 gc_finish ();
|
|
2026
|
3267
|
2027 remove_gc_cursor_and_message ();
|
|
2028
|
3092
|
2029 /* now stop inhibiting GC */
|
|
2030 unbind_to (speccount);
|
|
2031 }
|
|
2032 #endif /* not NEW_GC */
|
|
2033
|
|
2034
|
|
2035 /************************************************************************/
|
|
2036 /* Initializations */
|
|
2037 /************************************************************************/
|
|
2038
|
|
2039 /* Initialization */
|
|
2040 static void
|
|
2041 common_init_gc_early (void)
|
|
2042 {
|
|
2043 Vgc_message = Qzero;
|
|
2044
|
|
2045 gc_currently_forbidden = 0;
|
|
2046 gc_hooks_inhibited = 0;
|
|
2047
|
|
2048 need_to_garbage_collect = always_gc;
|
|
2049
|
|
2050 gc_cons_threshold = GC_CONS_THRESHOLD;
|
|
2051 gc_cons_percentage = 40; /* #### what is optimal? */
|
|
2052 total_gc_usage_set = 0;
|
|
2053 #ifdef NEW_GC
|
|
2054 gc_cons_incremental_threshold = GC_CONS_INCREMENTAL_THRESHOLD;
|
|
2055 gc_incremental_traversal_threshold = GC_INCREMENTAL_TRAVERSAL_THRESHOLD;
|
3263
|
2056 #endif /* NEW_GC */
|
3092
|
2057 }
|
|
2058
|
|
2059 void
|
|
2060 init_gc_early (void)
|
|
2061 {
|
3263
|
2062 #ifdef NEW_GC
|
|
2063 /* Reset the finalizers_to_run list after pdump_load. */
|
|
2064 Vfinalizers_to_run = Qnil;
|
|
2065 #endif /* NEW_GC */
|
3092
|
2066 }
|
|
2067
|
|
2068 void
|
|
2069 reinit_gc_early (void)
|
|
2070 {
|
|
2071 common_init_gc_early ();
|
|
2072 }
|
|
2073
|
|
2074 void
|
|
2075 init_gc_once_early (void)
|
|
2076 {
|
|
2077 common_init_gc_early ();
|
|
2078 }
|
|
2079
|
|
2080 void
|
|
2081 syms_of_gc (void)
|
|
2082 {
|
|
2083 DEFSYMBOL (Qpre_gc_hook);
|
|
2084 DEFSYMBOL (Qpost_gc_hook);
|
|
2085 #ifdef NEW_GC
|
|
2086 DEFSUBR (Fgc_full);
|
|
2087 DEFSUBR (Fgc_incremental);
|
|
2088 #ifdef ERROR_CHECK_GC
|
|
2089 DEFSUBR (Fgc_stats);
|
|
2090 #endif /* not ERROR_CHECK_GC */
|
|
2091 #endif /* NEW_GC */
|
|
2092 }
|
|
2093
|
|
2094 void
|
|
2095 vars_of_gc (void)
|
|
2096 {
|
|
2097 staticpro_nodump (&pre_gc_cursor);
|
|
2098
|
|
2099 QSin_garbage_collection = build_msg_string ("(in garbage collection)");
|
|
2100 staticpro (&QSin_garbage_collection);
|
|
2101
|
|
2102 DEFVAR_INT ("gc-cons-threshold", &gc_cons_threshold /*
|
|
2103 *Number of bytes of consing between full garbage collections.
|
|
2104 \"Consing\" is a misnomer in that this actually counts allocation
|
|
2105 of all different kinds of objects, not just conses.
|
|
2106 Garbage collection can happen automatically once this many bytes have been
|
|
2107 allocated since the last garbage collection. All data types count.
|
|
2108
|
|
2109 Garbage collection happens automatically when `eval' or `funcall' are
|
|
2110 called. (Note that `funcall' is called implicitly as part of evaluation.)
|
|
2111 By binding this temporarily to a large number, you can effectively
|
|
2112 prevent garbage collection during a part of the program.
|
|
2113
|
|
2114 Normally, you cannot set this value less than 10,000 (if you do, it is
|
|
2115 automatically reset during the next garbage collection). However, if
|
|
2116 XEmacs was compiled with DEBUG_XEMACS, this does not happen, allowing
|
|
2117 you to set this value very low to track down problems with insufficient
|
|
2118 GCPRO'ing. If you set this to a negative number, garbage collection will
|
|
2119 happen at *EVERY* call to `eval' or `funcall'. This is an extremely
|
|
2120 effective way to check GCPRO problems, but be warned that your XEmacs
|
|
2121 will be unusable! You almost certainly won't have the patience to wait
|
|
2122 long enough to be able to set it back.
|
|
2123
|
|
2124 See also `consing-since-gc' and `gc-cons-percentage'.
|
|
2125 */ );
|
|
2126
|
|
2127 DEFVAR_INT ("gc-cons-percentage", &gc_cons_percentage /*
|
|
2128 *Percentage of memory allocated between garbage collections.
|
|
2129
|
|
2130 Garbage collection will happen if this percentage of the total amount of
|
|
2131 memory used for data (see `lisp-object-memory-usage') has been allocated
|
|
2132 since the last garbage collection. However, it will not happen if less
|
|
2133 than `gc-cons-threshold' bytes have been allocated -- this sets an absolute
|
|
2134 minimum in case very little data has been allocated or the percentage is
|
|
2135 set very low. Set this to 0 to have garbage collection always happen after
|
|
2136 `gc-cons-threshold' bytes have been allocated, regardless of current memory
|
|
2137 usage.
|
|
2138
|
|
2139 See also `consing-since-gc' and `gc-cons-threshold'.
|
|
2140 */ );
|
|
2141
|
|
2142 #ifdef NEW_GC
|
|
2143 DEFVAR_INT ("gc-cons-incremental-threshold",
|
|
2144 &gc_cons_incremental_threshold /*
|
|
2145 *Number of bytes of consing between cycles of incremental garbage
|
|
2146 collections. \"Consing\" is a misnomer in that this actually counts
|
|
2147 allocation of all different kinds of objects, not just conses. The
|
|
2148 next garbage collection cycle can happen automatically once this many
|
|
2149 bytes have been allocated since the last garbage collection cycle.
|
|
2150 All data types count.
|
|
2151
|
|
2152 See also `gc-cons-threshold'.
|
|
2153 */ );
|
|
2154
|
|
2155 DEFVAR_INT ("gc-incremental-traversal-threshold",
|
|
2156 &gc_incremental_traversal_threshold /*
|
|
2157 *Number of elements processed in one cycle of incremental travesal.
|
|
2158 */ );
|
|
2159 #endif /* NEW_GC */
|
|
2160
|
|
2161 DEFVAR_BOOL ("purify-flag", &purify_flag /*
|
|
2162 Non-nil means loading Lisp code in order to dump an executable.
|
|
2163 This means that certain objects should be allocated in readonly space.
|
|
2164 */ );
|
|
2165
|
|
2166 DEFVAR_BOOL ("garbage-collection-messages", &garbage_collection_messages /*
|
|
2167 Non-nil means display messages at start and end of garbage collection.
|
|
2168 */ );
|
|
2169 garbage_collection_messages = 0;
|
|
2170
|
|
2171 DEFVAR_LISP ("pre-gc-hook", &Vpre_gc_hook /*
|
|
2172 Function or functions to be run just before each garbage collection.
|
|
2173 Interrupts, garbage collection, and errors are inhibited while this hook
|
|
2174 runs, so be extremely careful in what you add here. In particular, avoid
|
|
2175 consing, and do not interact with the user.
|
|
2176 */ );
|
|
2177 Vpre_gc_hook = Qnil;
|
|
2178
|
|
2179 DEFVAR_LISP ("post-gc-hook", &Vpost_gc_hook /*
|
|
2180 Function or functions to be run just after each garbage collection.
|
|
2181 Interrupts, garbage collection, and errors are inhibited while this hook
|
|
2182 runs. Each hook is called with one argument which is an alist with
|
|
2183 finalization data.
|
|
2184 */ );
|
|
2185 Vpost_gc_hook = Qnil;
|
|
2186
|
|
2187 DEFVAR_LISP ("gc-message", &Vgc_message /*
|
|
2188 String to print to indicate that a garbage collection is in progress.
|
|
2189 This is printed in the echo area. If the selected frame is on a
|
|
2190 window system and `gc-pointer-glyph' specifies a value (i.e. a pointer
|
|
2191 image instance) in the domain of the selected frame, the mouse pointer
|
|
2192 will change instead of this message being printed.
|
|
2193 */ );
|
|
2194 Vgc_message = build_string (gc_default_message);
|
|
2195
|
|
2196 DEFVAR_LISP ("gc-pointer-glyph", &Vgc_pointer_glyph /*
|
|
2197 Pointer glyph used to indicate that a garbage collection is in progress.
|
|
2198 If the selected window is on a window system and this glyph specifies a
|
|
2199 value (i.e. a pointer image instance) in the domain of the selected
|
|
2200 window, the pointer will be changed as specified during garbage collection.
|
|
2201 Otherwise, a message will be printed in the echo area, as controlled
|
|
2202 by `gc-message'.
|
|
2203 */ );
|
|
2204
|
|
2205 #ifdef NEW_GC
|
|
2206 DEFVAR_BOOL ("allow-incremental-gc", &allow_incremental_gc /*
|
|
2207 *Non-nil means to allow incremental garbage collection. Nil prevents
|
|
2208 *incremental garbage collection, the garbage collector then only does
|
|
2209 *full collects (even if (gc-incremental) is called).
|
|
2210 */ );
|
3263
|
2211
|
|
2212 Vfinalizers_to_run = Qnil;
|
|
2213 staticpro_nodump (&Vfinalizers_to_run);
|
3092
|
2214 #endif /* NEW_GC */
|
|
2215 }
|
|
2216
|
|
2217 void
|
|
2218 complex_vars_of_gc (void)
|
|
2219 {
|
|
2220 Vgc_pointer_glyph = Fmake_glyph_internal (Qpointer);
|
|
2221 }
|