Mercurial > hg > xemacs-beta
annotate src/gc.c @ 5054:24372c7e0e8f
fix build problem with pdump
-------------------- ChangeLog entries follow: --------------------
src/ChangeLog addition:
2010-02-21 Ben Wing <ben@xemacs.org>
* gc.c (kkcc_marking): Fix compile error.
* mc-alloc.c:
#if 0 out some unused functions.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Sun, 21 Feb 2010 20:58:45 -0600 |
parents | d4f666cda5e6 |
children | a9c41067dd88 |
rev | line source |
---|---|
3092 | 1 /* New incremental garbage collector for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
4934
714f7c9fabb1
make it easier to debug staticpro crashes.
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
3092 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with XEmacs; see the file COPYING. If not, write to | |
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 Boston, MA 02111-1307, USA. */ | |
21 | |
22 /* Synched up with: Not in FSF. */ | |
23 | |
24 #include <config.h> | |
25 #include "lisp.h" | |
26 | |
27 #include "backtrace.h" | |
28 #include "buffer.h" | |
29 #include "bytecode.h" | |
30 #include "chartab.h" | |
31 #include "console-stream.h" | |
32 #include "device.h" | |
33 #include "elhash.h" | |
34 #include "events.h" | |
35 #include "extents-impl.h" | |
36 #include "file-coding.h" | |
37 #include "frame-impl.h" | |
38 #include "gc.h" | |
39 #include "glyphs.h" | |
40 #include "opaque.h" | |
41 #include "lrecord.h" | |
42 #include "lstream.h" | |
43 #include "process.h" | |
44 #include "profile.h" | |
45 #include "redisplay.h" | |
46 #include "specifier.h" | |
47 #include "sysfile.h" | |
48 #include "sysdep.h" | |
49 #include "window.h" | |
50 #include "vdb.h" | |
51 | |
52 | |
53 #define GC_CONS_THRESHOLD 2000000 | |
54 #define GC_CONS_INCREMENTAL_THRESHOLD 200000 | |
55 #define GC_INCREMENTAL_TRAVERSAL_THRESHOLD 100000 | |
56 | |
57 /* Number of bytes of consing done since the last GC. */ | |
58 EMACS_INT consing_since_gc; | |
59 | |
60 /* Number of bytes of consing done since startup. */ | |
61 EMACS_UINT total_consing; | |
62 | |
63 /* Number of bytes of current allocated heap objects. */ | |
64 EMACS_INT total_gc_usage; | |
65 | |
66 /* If the above is set. */ | |
67 int total_gc_usage_set; | |
68 | |
69 /* Number of bytes of consing since gc before another gc should be done. */ | |
70 EMACS_INT gc_cons_threshold; | |
71 | |
72 /* Nonzero during gc */ | |
73 int gc_in_progress; | |
74 | |
75 /* Percentage of consing of total data size before another GC. */ | |
76 EMACS_INT gc_cons_percentage; | |
77 | |
78 #ifdef NEW_GC | |
79 /* Number of bytes of consing since gc before another cycle of the gc | |
80 should be done in incremental mode. */ | |
81 EMACS_INT gc_cons_incremental_threshold; | |
82 | |
83 /* Number of elements marked in one cycle of incremental GC. */ | |
84 EMACS_INT gc_incremental_traversal_threshold; | |
85 | |
86 /* Nonzero during write barrier */ | |
87 int write_barrier_enabled; | |
88 #endif /* NEW_GC */ | |
89 | |
90 | |
91 | |
92 #ifdef NEW_GC | |
93 /************************************************************************/ | |
94 /* Incremental State and Statistics */ | |
95 /************************************************************************/ | |
96 | |
97 enum gc_phase | |
98 { | |
99 NONE, | |
100 INIT_GC, | |
101 PUSH_ROOT_SET, | |
102 MARK, | |
103 REPUSH_ROOT_SET, | |
104 FINISH_MARK, | |
105 FINALIZE, | |
106 SWEEP, | |
107 FINISH_GC | |
108 }; | |
109 | |
110 #ifndef ERROR_CHECK_GC | |
4124 | 111 typedef struct gc_state_type |
3092 | 112 { |
113 enum gc_phase phase; | |
4124 | 114 } gc_state_type; |
3092 | 115 #else /* ERROR_CHECK_GC */ |
116 enum gc_stat_id | |
117 { | |
118 GC_STAT_TOTAL, | |
119 GC_STAT_IN_LAST_GC, | |
120 GC_STAT_IN_THIS_GC, | |
121 GC_STAT_IN_LAST_CYCLE, | |
122 GC_STAT_IN_THIS_CYCLE, | |
123 GC_STAT_COUNT /* has to be last */ | |
124 }; | |
125 | |
4124 | 126 typedef struct gc_state_type |
3092 | 127 { |
128 enum gc_phase phase; | |
3313 | 129 double n_gc[GC_STAT_COUNT]; |
130 double n_cycles[GC_STAT_COUNT]; | |
131 double enqueued[GC_STAT_COUNT]; | |
132 double dequeued[GC_STAT_COUNT]; | |
133 double repushed[GC_STAT_COUNT]; | |
134 double enqueued2[GC_STAT_COUNT]; | |
135 double dequeued2[GC_STAT_COUNT]; | |
136 double finalized[GC_STAT_COUNT]; | |
137 double freed[GC_STAT_COUNT]; | |
4124 | 138 } gc_state_type; |
3092 | 139 #endif /* ERROR_CHECK_GC */ |
140 | |
4124 | 141 gc_state_type gc_state; |
142 | |
3092 | 143 #define GC_PHASE gc_state.phase |
144 #define GC_SET_PHASE(p) GC_PHASE = p | |
145 | |
146 #ifdef ERROR_CHECK_GC | |
147 # define GC_STAT_START_NEW_GC gc_stat_start_new_gc () | |
148 # define GC_STAT_RESUME_GC gc_stat_resume_gc () | |
149 | |
150 #define GC_STAT_TICK(STAT) \ | |
151 gc_state.STAT[GC_STAT_TOTAL]++; \ | |
152 gc_state.STAT[GC_STAT_IN_THIS_GC]++; \ | |
153 gc_state.STAT[GC_STAT_IN_THIS_CYCLE]++ | |
154 | |
155 # define GC_STAT_ENQUEUED \ | |
156 if (GC_PHASE == REPUSH_ROOT_SET) \ | |
157 { \ | |
158 GC_STAT_TICK (enqueued2); \ | |
159 } \ | |
160 else \ | |
161 { \ | |
162 GC_STAT_TICK (enqueued); \ | |
163 } | |
164 | |
165 # define GC_STAT_DEQUEUED \ | |
166 if (gc_state.phase == REPUSH_ROOT_SET) \ | |
167 { \ | |
168 GC_STAT_TICK (dequeued2); \ | |
169 } \ | |
170 else \ | |
171 { \ | |
172 GC_STAT_TICK (dequeued); \ | |
173 } | |
174 # define GC_STAT_REPUSHED GC_STAT_TICK (repushed) | |
175 | |
176 #define GC_STAT_RESUME(stat) \ | |
177 gc_state.stat[GC_STAT_IN_LAST_CYCLE] = \ | |
178 gc_state.stat[GC_STAT_IN_THIS_CYCLE]; \ | |
179 gc_state.stat[GC_STAT_IN_THIS_CYCLE] = 0 | |
180 | |
181 #define GC_STAT_RESTART(stat) \ | |
182 gc_state.stat[GC_STAT_IN_LAST_GC] = \ | |
183 gc_state.stat[GC_STAT_IN_THIS_GC]; \ | |
184 gc_state.stat[GC_STAT_IN_THIS_GC] = 0; \ | |
185 GC_STAT_RESUME (stat) | |
186 | |
5046 | 187 static void |
3092 | 188 gc_stat_start_new_gc (void) |
189 { | |
190 gc_state.n_gc[GC_STAT_TOTAL]++; | |
191 gc_state.n_cycles[GC_STAT_TOTAL]++; | |
192 gc_state.n_cycles[GC_STAT_IN_LAST_GC] = gc_state.n_cycles[GC_STAT_IN_THIS_GC]; | |
193 gc_state.n_cycles[GC_STAT_IN_THIS_GC] = 1; | |
194 | |
195 GC_STAT_RESTART (enqueued); | |
196 GC_STAT_RESTART (dequeued); | |
197 GC_STAT_RESTART (repushed); | |
198 GC_STAT_RESTART (finalized); | |
199 GC_STAT_RESTART (enqueued2); | |
200 GC_STAT_RESTART (dequeued2); | |
201 GC_STAT_RESTART (freed); | |
202 } | |
203 | |
5046 | 204 static void |
3092 | 205 gc_stat_resume_gc (void) |
206 { | |
207 gc_state.n_cycles[GC_STAT_TOTAL]++; | |
208 gc_state.n_cycles[GC_STAT_IN_THIS_GC]++; | |
209 GC_STAT_RESUME (enqueued); | |
210 GC_STAT_RESUME (dequeued); | |
211 GC_STAT_RESUME (repushed); | |
212 GC_STAT_RESUME (finalized); | |
213 GC_STAT_RESUME (enqueued2); | |
214 GC_STAT_RESUME (dequeued2); | |
215 GC_STAT_RESUME (freed); | |
216 } | |
217 | |
218 void | |
219 gc_stat_finalized (void) | |
220 { | |
221 GC_STAT_TICK (finalized); | |
222 } | |
223 | |
224 void | |
225 gc_stat_freed (void) | |
226 { | |
227 GC_STAT_TICK (freed); | |
228 } | |
229 | |
230 DEFUN("gc-stats", Fgc_stats, 0, 0 ,"", /* | |
231 Return statistics about garbage collection cycles in a property list. | |
232 */ | |
233 ()) | |
234 { | |
235 Lisp_Object pl = Qnil; | |
236 #define PL(name,value) \ | |
3313 | 237 pl = cons3 (intern (name), make_float (gc_state.value), pl) |
3092 | 238 |
239 PL ("freed-in-this-cycle", freed[GC_STAT_IN_THIS_CYCLE]); | |
240 PL ("freed-in-this-gc", freed[GC_STAT_IN_THIS_GC]); | |
241 PL ("freed-in-last-cycle", freed[GC_STAT_IN_LAST_CYCLE]); | |
242 PL ("freed-in-last-gc", freed[GC_STAT_IN_LAST_GC]); | |
243 PL ("freed-total", freed[GC_STAT_TOTAL]); | |
244 PL ("finalized-in-this-cycle", finalized[GC_STAT_IN_THIS_CYCLE]); | |
245 PL ("finalized-in-this-gc", finalized[GC_STAT_IN_THIS_GC]); | |
246 PL ("finalized-in-last-cycle", finalized[GC_STAT_IN_LAST_CYCLE]); | |
247 PL ("finalized-in-last-gc", finalized[GC_STAT_IN_LAST_GC]); | |
248 PL ("finalized-total", finalized[GC_STAT_TOTAL]); | |
249 PL ("repushed-in-this-cycle", repushed[GC_STAT_IN_THIS_CYCLE]); | |
250 PL ("repushed-in-this-gc", repushed[GC_STAT_IN_THIS_GC]); | |
251 PL ("repushed-in-last-cycle", repushed[GC_STAT_IN_LAST_CYCLE]); | |
252 PL ("repushed-in-last-gc", repushed[GC_STAT_IN_LAST_GC]); | |
253 PL ("repushed-total", repushed[GC_STAT_TOTAL]); | |
254 PL ("dequeued2-in-this-cycle", dequeued2[GC_STAT_IN_THIS_CYCLE]); | |
255 PL ("dequeued2-in-this-gc", dequeued2[GC_STAT_IN_THIS_GC]); | |
256 PL ("dequeued2-in-last-cycle", dequeued2[GC_STAT_IN_LAST_CYCLE]); | |
257 PL ("dequeued2-in-last-gc", dequeued2[GC_STAT_IN_LAST_GC]); | |
258 PL ("dequeued2-total", dequeued2[GC_STAT_TOTAL]); | |
259 PL ("enqueued2-in-this-cycle", enqueued2[GC_STAT_IN_THIS_CYCLE]); | |
260 PL ("enqueued2-in-this-gc", enqueued2[GC_STAT_IN_THIS_GC]); | |
261 PL ("enqueued2-in-last-cycle", enqueued2[GC_STAT_IN_LAST_CYCLE]); | |
262 PL ("enqueued2-in-last-gc", enqueued2[GC_STAT_IN_LAST_GC]); | |
263 PL ("enqueued2-total", enqueued2[GC_STAT_TOTAL]); | |
264 PL ("dequeued-in-this-cycle", dequeued[GC_STAT_IN_THIS_CYCLE]); | |
265 PL ("dequeued-in-this-gc", dequeued[GC_STAT_IN_THIS_GC]); | |
266 PL ("dequeued-in-last-cycle", dequeued[GC_STAT_IN_LAST_CYCLE]); | |
267 PL ("dequeued-in-last-gc", dequeued[GC_STAT_IN_LAST_GC]); | |
268 PL ("dequeued-total", dequeued[GC_STAT_TOTAL]); | |
269 PL ("enqueued-in-this-cycle", enqueued[GC_STAT_IN_THIS_CYCLE]); | |
270 PL ("enqueued-in-this-gc", enqueued[GC_STAT_IN_THIS_GC]); | |
271 PL ("enqueued-in-last-cycle", enqueued[GC_STAT_IN_LAST_CYCLE]); | |
272 PL ("enqueued-in-last-gc", enqueued[GC_STAT_IN_LAST_GC]); | |
273 PL ("enqueued-total", enqueued[GC_STAT_TOTAL]); | |
274 PL ("n-cycles-in-this-gc", n_cycles[GC_STAT_IN_THIS_GC]); | |
275 PL ("n-cycles-in-last-gc", n_cycles[GC_STAT_IN_LAST_GC]); | |
276 PL ("n-cycles-total", n_cycles[GC_STAT_TOTAL]); | |
277 PL ("n-gc-total", n_gc[GC_STAT_TOTAL]); | |
278 PL ("phase", phase); | |
279 return pl; | |
280 } | |
281 #else /* not ERROR_CHECK_GC */ | |
282 # define GC_STAT_START_NEW_GC | |
283 # define GC_STAT_RESUME_GC | |
284 # define GC_STAT_ENQUEUED | |
285 # define GC_STAT_DEQUEUED | |
286 # define GC_STAT_REPUSHED | |
287 # define GC_STAT_REMOVED | |
288 #endif /* not ERROR_CHECK_GC */ | |
289 #endif /* NEW_GC */ | |
290 | |
291 | |
292 /************************************************************************/ | |
293 /* Recompute need to garbage collect */ | |
294 /************************************************************************/ | |
295 | |
296 int need_to_garbage_collect; | |
297 | |
298 #ifdef ERROR_CHECK_GC | |
299 int always_gc = 0; /* Debugging hack; equivalent to | |
300 (setq gc-cons-thresold -1) */ | |
301 #else | |
302 #define always_gc 0 | |
303 #endif | |
304 | |
305 /* True if it's time to garbage collect now. */ | |
306 void | |
307 recompute_need_to_garbage_collect (void) | |
308 { | |
309 if (always_gc) | |
310 need_to_garbage_collect = 1; | |
311 else | |
312 need_to_garbage_collect = | |
313 #ifdef NEW_GC | |
314 write_barrier_enabled ? | |
315 (consing_since_gc > gc_cons_incremental_threshold) : | |
316 #endif /* NEW_GC */ | |
317 (consing_since_gc > gc_cons_threshold | |
318 && | |
319 #if 0 /* #### implement this better */ | |
4115 | 320 ((double)consing_since_gc) / total_data_usage()) >= |
321 ((double)gc_cons_percentage / 100) | |
3092 | 322 #else |
323 (!total_gc_usage_set || | |
4115 | 324 ((double)consing_since_gc / total_gc_usage) >= |
325 ((double)gc_cons_percentage / 100)) | |
3092 | 326 #endif |
327 ); | |
328 recompute_funcall_allocation_flag (); | |
329 } | |
330 | |
331 | |
332 | |
333 /************************************************************************/ | |
334 /* Mark Phase */ | |
335 /************************************************************************/ | |
336 | |
337 static const struct memory_description lisp_object_description_1[] = { | |
338 { XD_LISP_OBJECT, 0 }, | |
339 { XD_END } | |
340 }; | |
341 | |
342 const struct sized_memory_description lisp_object_description = { | |
343 sizeof (Lisp_Object), | |
344 lisp_object_description_1 | |
345 }; | |
346 | |
347 #if defined (USE_KKCC) || defined (PDUMP) | |
348 | |
349 /* This function extracts the value of a count variable described somewhere | |
350 else in the description. It is converted corresponding to the type */ | |
351 EMACS_INT | |
352 lispdesc_indirect_count_1 (EMACS_INT code, | |
353 const struct memory_description *idesc, | |
354 const void *idata) | |
355 { | |
356 EMACS_INT count; | |
357 const void *irdata; | |
358 | |
359 int line = XD_INDIRECT_VAL (code); | |
360 int delta = XD_INDIRECT_DELTA (code); | |
361 | |
362 irdata = ((char *) idata) + | |
363 lispdesc_indirect_count (idesc[line].offset, idesc, idata); | |
364 switch (idesc[line].type) | |
365 { | |
366 case XD_BYTECOUNT: | |
367 count = * (Bytecount *) irdata; | |
368 break; | |
369 case XD_ELEMCOUNT: | |
370 count = * (Elemcount *) irdata; | |
371 break; | |
372 case XD_HASHCODE: | |
373 count = * (Hashcode *) irdata; | |
374 break; | |
375 case XD_INT: | |
376 count = * (int *) irdata; | |
377 break; | |
378 case XD_LONG: | |
379 count = * (long *) irdata; | |
380 break; | |
381 default: | |
382 stderr_out ("Unsupported count type : %d (line = %d, code = %ld)\n", | |
383 idesc[line].type, line, (long) code); | |
384 #if defined(USE_KKCC) && defined(DEBUG_XEMACS) | |
385 if (gc_in_progress) | |
386 kkcc_backtrace (); | |
387 #endif | |
388 #ifdef PDUMP | |
389 if (in_pdump) | |
390 pdump_backtrace (); | |
391 #endif | |
392 count = 0; /* warning suppression */ | |
393 ABORT (); | |
394 } | |
395 count += delta; | |
396 return count; | |
397 } | |
398 | |
399 /* SDESC is a "description map" (basically, a list of offsets used for | |
400 successive indirections) and OBJ is the first object to indirect off of. | |
401 Return the description ultimately found. */ | |
402 | |
403 const struct sized_memory_description * | |
404 lispdesc_indirect_description_1 (const void *obj, | |
405 const struct sized_memory_description *sdesc) | |
406 { | |
407 int pos; | |
408 | |
409 for (pos = 0; sdesc[pos].size >= 0; pos++) | |
410 obj = * (const void **) ((const char *) obj + sdesc[pos].size); | |
411 | |
412 return (const struct sized_memory_description *) obj; | |
413 } | |
414 | |
415 /* Compute the size of the data at RDATA, described by a single entry | |
416 DESC1 in a description array. OBJ and DESC are used for | |
417 XD_INDIRECT references. */ | |
418 | |
419 static Bytecount | |
420 lispdesc_one_description_line_size (void *rdata, | |
421 const struct memory_description *desc1, | |
422 const void *obj, | |
423 const struct memory_description *desc) | |
424 { | |
425 union_switcheroo: | |
426 switch (desc1->type) | |
427 { | |
428 case XD_LISP_OBJECT_ARRAY: | |
429 { | |
430 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
431 return (val * sizeof (Lisp_Object)); | |
432 } | |
433 case XD_LISP_OBJECT: | |
434 case XD_LO_LINK: | |
435 return sizeof (Lisp_Object); | |
436 case XD_OPAQUE_PTR: | |
437 return sizeof (void *); | |
438 #ifdef NEW_GC | |
439 case XD_LISP_OBJECT_BLOCK_PTR: | |
440 #endif /* NEW_GC */ | |
441 case XD_BLOCK_PTR: | |
442 { | |
443 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
444 return val * sizeof (void *); | |
445 } | |
446 case XD_BLOCK_ARRAY: | |
447 { | |
448 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
449 | |
450 return (val * | |
451 lispdesc_block_size | |
452 (rdata, | |
453 lispdesc_indirect_description (obj, desc1->data2.descr))); | |
454 } | |
455 case XD_OPAQUE_DATA_PTR: | |
456 return sizeof (void *); | |
457 case XD_UNION_DYNAMIC_SIZE: | |
458 { | |
459 /* If an explicit size was given in the first-level structure | |
460 description, use it; else compute size based on current union | |
461 constant. */ | |
462 const struct sized_memory_description *sdesc = | |
463 lispdesc_indirect_description (obj, desc1->data2.descr); | |
464 if (sdesc->size) | |
465 return sdesc->size; | |
466 else | |
467 { | |
468 desc1 = lispdesc_process_xd_union (desc1, desc, obj); | |
469 if (desc1) | |
470 goto union_switcheroo; | |
471 break; | |
472 } | |
473 } | |
474 case XD_UNION: | |
475 { | |
476 /* If an explicit size was given in the first-level structure | |
477 description, use it; else compute size based on maximum of all | |
478 possible structures. */ | |
479 const struct sized_memory_description *sdesc = | |
480 lispdesc_indirect_description (obj, desc1->data2.descr); | |
481 if (sdesc->size) | |
482 return sdesc->size; | |
483 else | |
484 { | |
485 int count; | |
486 Bytecount max_size = -1, size; | |
487 | |
488 desc1 = sdesc->description; | |
489 | |
490 for (count = 0; desc1[count].type != XD_END; count++) | |
491 { | |
492 size = lispdesc_one_description_line_size (rdata, | |
493 &desc1[count], | |
494 obj, desc); | |
495 if (size > max_size) | |
496 max_size = size; | |
497 } | |
498 return max_size; | |
499 } | |
500 } | |
501 case XD_ASCII_STRING: | |
502 return sizeof (void *); | |
503 case XD_DOC_STRING: | |
504 return sizeof (void *); | |
505 case XD_INT_RESET: | |
506 return sizeof (int); | |
507 case XD_BYTECOUNT: | |
508 return sizeof (Bytecount); | |
509 case XD_ELEMCOUNT: | |
510 return sizeof (Elemcount); | |
511 case XD_HASHCODE: | |
512 return sizeof (Hashcode); | |
513 case XD_INT: | |
514 return sizeof (int); | |
515 case XD_LONG: | |
516 return sizeof (long); | |
517 default: | |
518 stderr_out ("Unsupported dump type : %d\n", desc1->type); | |
519 ABORT (); | |
520 } | |
521 | |
522 return 0; | |
523 } | |
524 | |
525 | |
526 /* Return the size of the memory block (NOT necessarily a structure!) | |
527 described by SDESC and pointed to by OBJ. If SDESC records an | |
528 explicit size (i.e. non-zero), it is simply returned; otherwise, | |
529 the size is calculated by the maximum offset and the size of the | |
530 object at that offset, rounded up to the maximum alignment. In | |
531 this case, we may need the object, for example when retrieving an | |
532 "indirect count" of an inlined array (the count is not constant, | |
533 but is specified by one of the elements of the memory block). (It | |
534 is generally not a problem if we return an overly large size -- we | |
535 will simply end up reserving more space than necessary; but if the | |
536 size is too small we could be in serious trouble, in particular | |
537 with nested inlined structures, where there may be alignment | |
538 padding in the middle of a block. #### In fact there is an (at | |
539 least theoretical) problem with an overly large size -- we may | |
540 trigger a protection fault when reading from invalid memory. We | |
541 need to handle this -- perhaps in a stupid but dependable way, | |
542 i.e. by trapping SIGSEGV and SIGBUS.) */ | |
543 | |
544 Bytecount | |
545 lispdesc_block_size_1 (const void *obj, Bytecount size, | |
546 const struct memory_description *desc) | |
547 { | |
548 EMACS_INT max_offset = -1; | |
549 int max_offset_pos = -1; | |
550 int pos; | |
551 | |
552 if (size) | |
553 return size; | |
554 | |
555 for (pos = 0; desc[pos].type != XD_END; pos++) | |
556 { | |
557 EMACS_INT offset = lispdesc_indirect_count (desc[pos].offset, desc, obj); | |
558 if (offset == max_offset) | |
559 { | |
560 stderr_out ("Two relocatable elements at same offset?\n"); | |
561 ABORT (); | |
562 } | |
563 else if (offset > max_offset) | |
564 { | |
565 max_offset = offset; | |
566 max_offset_pos = pos; | |
567 } | |
568 } | |
569 | |
570 if (max_offset_pos < 0) | |
571 return 0; | |
572 | |
573 { | |
574 Bytecount size_at_max; | |
575 size_at_max = | |
576 lispdesc_one_description_line_size ((char *) obj + max_offset, | |
577 &desc[max_offset_pos], obj, desc); | |
578 | |
579 /* We have no way of knowing the required alignment for this structure, | |
580 so just make it maximally aligned. */ | |
581 return MAX_ALIGN_SIZE (max_offset + size_at_max); | |
582 } | |
583 } | |
584 #endif /* defined (USE_KKCC) || defined (PDUMP) */ | |
585 | |
3263 | 586 #ifdef NEW_GC |
3092 | 587 #define GC_CHECK_NOT_FREE(lheader) \ |
588 gc_checking_assert (! LRECORD_FREE_P (lheader)); | |
3263 | 589 #else /* not NEW_GC */ |
3092 | 590 #define GC_CHECK_NOT_FREE(lheader) \ |
591 gc_checking_assert (! LRECORD_FREE_P (lheader)); \ | |
592 gc_checking_assert (LHEADER_IMPLEMENTATION (lheader)->basic_p || \ | |
593 ! ((struct old_lcrecord_header *) lheader)->free) | |
3263 | 594 #endif /* not NEW_GC */ |
3092 | 595 |
596 #ifdef USE_KKCC | |
597 /* The following functions implement the new mark algorithm. | |
598 They mark objects according to their descriptions. They | |
599 are modeled on the corresponding pdumper procedures. */ | |
600 | |
601 #if 0 | |
602 # define KKCC_STACK_AS_QUEUE 1 | |
603 #endif | |
604 | |
605 #ifdef DEBUG_XEMACS | |
606 /* The backtrace for the KKCC mark functions. */ | |
607 #define KKCC_INIT_BT_STACK_SIZE 4096 | |
608 | |
609 typedef struct | |
610 { | |
611 void *obj; | |
612 const struct memory_description *desc; | |
613 int pos; | |
614 } kkcc_bt_stack_entry; | |
615 | |
616 static kkcc_bt_stack_entry *kkcc_bt; | |
617 static int kkcc_bt_stack_size; | |
618 static int kkcc_bt_depth = 0; | |
619 | |
620 static void | |
621 kkcc_bt_init (void) | |
622 { | |
623 kkcc_bt_depth = 0; | |
624 kkcc_bt_stack_size = KKCC_INIT_BT_STACK_SIZE; | |
625 kkcc_bt = (kkcc_bt_stack_entry *) | |
626 xmalloc_and_zero (kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry)); | |
627 if (!kkcc_bt) | |
628 { | |
629 stderr_out ("KKCC backtrace stack init failed for size %d\n", | |
630 kkcc_bt_stack_size); | |
631 ABORT (); | |
632 } | |
633 } | |
634 | |
635 void | |
636 kkcc_backtrace (void) | |
637 { | |
638 int i; | |
639 stderr_out ("KKCC mark stack backtrace :\n"); | |
640 for (i = kkcc_bt_depth - 1; i >= 0; i--) | |
641 { | |
642 Lisp_Object obj = wrap_pointer_1 (kkcc_bt[i].obj); | |
643 stderr_out (" [%d]", i); | |
644 if ((XRECORD_LHEADER (obj)->type >= lrecord_type_last_built_in_type) | |
645 || (!LRECORDP (obj)) | |
646 || (!XRECORD_LHEADER_IMPLEMENTATION (obj))) | |
647 { | |
648 stderr_out (" non Lisp Object"); | |
649 } | |
650 else | |
651 { | |
652 stderr_out (" %s", | |
653 XRECORD_LHEADER_IMPLEMENTATION (obj)->name); | |
654 } | |
3519 | 655 stderr_out (" (addr: %p, desc: %p, ", |
656 (void *) kkcc_bt[i].obj, | |
657 (void *) kkcc_bt[i].desc); | |
3092 | 658 if (kkcc_bt[i].pos >= 0) |
659 stderr_out ("pos: %d)\n", kkcc_bt[i].pos); | |
660 else | |
661 if (kkcc_bt[i].pos == -1) | |
662 stderr_out ("root set)\n"); | |
663 else if (kkcc_bt[i].pos == -2) | |
664 stderr_out ("dirty object)\n"); | |
665 } | |
666 } | |
667 | |
668 static void | |
669 kkcc_bt_stack_realloc (void) | |
670 { | |
671 kkcc_bt_stack_size *= 2; | |
672 kkcc_bt = (kkcc_bt_stack_entry *) | |
673 xrealloc (kkcc_bt, kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry)); | |
674 if (!kkcc_bt) | |
675 { | |
676 stderr_out ("KKCC backtrace stack realloc failed for size %d\n", | |
677 kkcc_bt_stack_size); | |
678 ABORT (); | |
679 } | |
680 } | |
681 | |
682 static void | |
683 kkcc_bt_free (void) | |
684 { | |
685 xfree_1 (kkcc_bt); | |
686 kkcc_bt = 0; | |
687 kkcc_bt_stack_size = 0; | |
688 } | |
689 | |
690 static void | |
691 kkcc_bt_push (void *obj, const struct memory_description *desc, | |
692 int level, int pos) | |
693 { | |
694 kkcc_bt_depth = level; | |
695 kkcc_bt[kkcc_bt_depth].obj = obj; | |
696 kkcc_bt[kkcc_bt_depth].desc = desc; | |
697 kkcc_bt[kkcc_bt_depth].pos = pos; | |
698 kkcc_bt_depth++; | |
699 if (kkcc_bt_depth >= kkcc_bt_stack_size) | |
700 kkcc_bt_stack_realloc (); | |
701 } | |
702 | |
703 #else /* not DEBUG_XEMACS */ | |
704 #define kkcc_bt_init() | |
705 #define kkcc_bt_push(obj, desc, level, pos) | |
706 #endif /* not DEBUG_XEMACS */ | |
707 | |
708 /* Object memory descriptions are in the lrecord_implementation structure. | |
709 But copying them to a parallel array is much more cache-friendly. */ | |
710 const struct memory_description *lrecord_memory_descriptions[countof (lrecord_implementations_table)]; | |
711 | |
712 /* the initial stack size in kkcc_gc_stack_entries */ | |
713 #define KKCC_INIT_GC_STACK_SIZE 16384 | |
714 | |
715 typedef struct | |
716 { | |
717 void *data; | |
718 const struct memory_description *desc; | |
719 #ifdef DEBUG_XEMACS | |
720 int level; | |
721 int pos; | |
722 #endif | |
723 } kkcc_gc_stack_entry; | |
724 | |
725 | |
726 static kkcc_gc_stack_entry *kkcc_gc_stack_ptr; | |
727 static int kkcc_gc_stack_front; | |
728 static int kkcc_gc_stack_rear; | |
729 static int kkcc_gc_stack_size; | |
730 | |
731 #define KKCC_INC(i) ((i + 1) % kkcc_gc_stack_size) | |
732 #define KKCC_INC2(i) ((i + 2) % kkcc_gc_stack_size) | |
733 | |
734 #define KKCC_GC_STACK_FULL (KKCC_INC2 (kkcc_gc_stack_rear) == kkcc_gc_stack_front) | |
735 #define KKCC_GC_STACK_EMPTY (KKCC_INC (kkcc_gc_stack_rear) == kkcc_gc_stack_front) | |
736 | |
737 static void | |
738 kkcc_gc_stack_init (void) | |
739 { | |
740 kkcc_gc_stack_size = KKCC_INIT_GC_STACK_SIZE; | |
741 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *) | |
742 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry)); | |
743 if (!kkcc_gc_stack_ptr) | |
744 { | |
745 stderr_out ("stack init failed for size %d\n", kkcc_gc_stack_size); | |
746 ABORT (); | |
747 } | |
748 kkcc_gc_stack_front = 0; | |
749 kkcc_gc_stack_rear = kkcc_gc_stack_size - 1; | |
750 } | |
751 | |
752 static void | |
753 kkcc_gc_stack_free (void) | |
754 { | |
755 xfree_1 (kkcc_gc_stack_ptr); | |
756 kkcc_gc_stack_ptr = 0; | |
757 kkcc_gc_stack_front = 0; | |
758 kkcc_gc_stack_rear = 0; | |
759 kkcc_gc_stack_size = 0; | |
760 } | |
761 | |
762 static void | |
763 kkcc_gc_stack_realloc (void) | |
764 { | |
765 kkcc_gc_stack_entry *old_ptr = kkcc_gc_stack_ptr; | |
766 int old_size = kkcc_gc_stack_size; | |
767 kkcc_gc_stack_size *= 2; | |
768 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *) | |
769 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry)); | |
770 if (!kkcc_gc_stack_ptr) | |
771 { | |
772 stderr_out ("stack realloc failed for size %d\n", kkcc_gc_stack_size); | |
773 ABORT (); | |
774 } | |
775 if (kkcc_gc_stack_rear >= kkcc_gc_stack_front) | |
776 { | |
777 int number_elements = kkcc_gc_stack_rear - kkcc_gc_stack_front + 1; | |
778 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front], | |
779 number_elements * sizeof (kkcc_gc_stack_entry)); | |
780 kkcc_gc_stack_front = 0; | |
781 kkcc_gc_stack_rear = number_elements - 1; | |
782 } | |
783 else | |
784 { | |
785 int number_elements = old_size - kkcc_gc_stack_front; | |
786 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front], | |
787 number_elements * sizeof (kkcc_gc_stack_entry)); | |
788 memcpy (&kkcc_gc_stack_ptr[number_elements], &old_ptr[0], | |
789 (kkcc_gc_stack_rear + 1) * sizeof (kkcc_gc_stack_entry)); | |
790 kkcc_gc_stack_front = 0; | |
791 kkcc_gc_stack_rear = kkcc_gc_stack_rear + number_elements; | |
792 } | |
793 xfree_1 (old_ptr); | |
794 } | |
795 | |
796 static void | |
797 #ifdef DEBUG_XEMACS | |
798 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc, | |
799 int level, int pos) | |
800 #else | |
801 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc) | |
802 #endif | |
803 { | |
804 #ifdef NEW_GC | |
805 GC_STAT_ENQUEUED; | |
806 #endif /* NEW_GC */ | |
807 if (KKCC_GC_STACK_FULL) | |
808 kkcc_gc_stack_realloc(); | |
809 kkcc_gc_stack_rear = KKCC_INC (kkcc_gc_stack_rear); | |
810 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].data = data; | |
811 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].desc = desc; | |
812 #ifdef DEBUG_XEMACS | |
813 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].level = level; | |
814 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].pos = pos; | |
815 #endif | |
816 } | |
817 | |
818 #ifdef DEBUG_XEMACS | |
819 #define kkcc_gc_stack_push(data, desc, level, pos) \ | |
820 kkcc_gc_stack_push_1 (data, desc, level, pos) | |
821 #else | |
822 #define kkcc_gc_stack_push(data, desc, level, pos) \ | |
823 kkcc_gc_stack_push_1 (data, desc) | |
824 #endif | |
825 | |
826 static kkcc_gc_stack_entry * | |
827 kkcc_gc_stack_pop (void) | |
828 { | |
829 if (KKCC_GC_STACK_EMPTY) | |
830 return 0; | |
831 #ifdef NEW_GC | |
832 GC_STAT_DEQUEUED; | |
833 #endif /* NEW_GC */ | |
834 #ifndef KKCC_STACK_AS_QUEUE | |
835 /* stack behaviour */ | |
836 return &kkcc_gc_stack_ptr[kkcc_gc_stack_rear--]; | |
837 #else | |
838 /* queue behaviour */ | |
839 { | |
840 int old_front = kkcc_gc_stack_front; | |
841 kkcc_gc_stack_front = KKCC_INC (kkcc_gc_stack_front); | |
842 return &kkcc_gc_stack_ptr[old_front]; | |
843 } | |
844 #endif | |
845 } | |
846 | |
847 void | |
848 #ifdef DEBUG_XEMACS | |
849 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj, int level, int pos) | |
850 #else | |
851 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj) | |
852 #endif | |
853 { | |
854 if (XTYPE (obj) == Lisp_Type_Record) | |
855 { | |
856 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
857 const struct memory_description *desc; | |
858 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
859 desc = RECORD_DESCRIPTION (lheader); | |
860 if (! MARKED_RECORD_HEADER_P (lheader)) | |
861 { | |
862 #ifdef NEW_GC | |
863 MARK_GREY (lheader); | |
864 #else /* not NEW_GC */ | |
865 MARK_RECORD_HEADER (lheader); | |
866 #endif /* not NEW_GC */ | |
867 kkcc_gc_stack_push ((void *) lheader, desc, level, pos); | |
868 } | |
869 } | |
870 } | |
871 | |
872 #ifdef NEW_GC | |
873 #ifdef DEBUG_XEMACS | |
874 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \ | |
875 kkcc_gc_stack_push_lisp_object_1 (obj, level, pos) | |
876 #else | |
877 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \ | |
878 kkcc_gc_stack_push_lisp_object_1 (obj) | |
879 #endif | |
880 | |
881 void | |
882 #ifdef DEBUG_XEMACS | |
883 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj, int level, int pos) | |
884 #else | |
885 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj) | |
886 #endif | |
887 { | |
888 if (XTYPE (obj) == Lisp_Type_Record) | |
889 { | |
890 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
891 const struct memory_description *desc; | |
892 GC_STAT_REPUSHED; | |
893 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
894 desc = RECORD_DESCRIPTION (lheader); | |
895 MARK_GREY (lheader); | |
896 kkcc_gc_stack_push ((void*) lheader, desc, level, pos); | |
897 } | |
898 } | |
899 #endif /* NEW_GC */ | |
900 | |
901 #ifdef ERROR_CHECK_GC | |
902 #define KKCC_DO_CHECK_FREE(obj, allow_free) \ | |
903 do \ | |
904 { \ | |
905 if (!allow_free && XTYPE (obj) == Lisp_Type_Record) \ | |
906 { \ | |
907 struct lrecord_header *lheader = XRECORD_LHEADER (obj); \ | |
908 GC_CHECK_NOT_FREE (lheader); \ | |
909 } \ | |
910 } while (0) | |
911 #else | |
912 #define KKCC_DO_CHECK_FREE(obj, allow_free) | |
913 #endif | |
914 | |
915 #ifdef ERROR_CHECK_GC | |
916 #ifdef DEBUG_XEMACS | |
917 static void | |
918 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free, | |
919 int level, int pos) | |
920 #else | |
921 static void | |
922 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free) | |
923 #endif | |
924 { | |
925 KKCC_DO_CHECK_FREE (obj, allow_free); | |
926 kkcc_gc_stack_push_lisp_object (obj, level, pos); | |
927 } | |
928 | |
929 #ifdef DEBUG_XEMACS | |
930 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
931 mark_object_maybe_checking_free_1 (obj, allow_free, level, pos) | |
932 #else | |
933 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
934 mark_object_maybe_checking_free_1 (obj, allow_free) | |
935 #endif | |
936 #else /* not ERROR_CHECK_GC */ | |
937 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
938 kkcc_gc_stack_push_lisp_object (obj, level, pos) | |
939 #endif /* not ERROR_CHECK_GC */ | |
940 | |
941 | |
942 /* This function loops all elements of a struct pointer and calls | |
943 mark_with_description with each element. */ | |
944 static void | |
945 #ifdef DEBUG_XEMACS | |
946 mark_struct_contents_1 (const void *data, | |
947 const struct sized_memory_description *sdesc, | |
948 int count, int level, int pos) | |
949 #else | |
950 mark_struct_contents_1 (const void *data, | |
951 const struct sized_memory_description *sdesc, | |
952 int count) | |
953 #endif | |
954 { | |
955 int i; | |
956 Bytecount elsize; | |
957 elsize = lispdesc_block_size (data, sdesc); | |
958 | |
959 for (i = 0; i < count; i++) | |
960 { | |
961 kkcc_gc_stack_push (((char *) data) + elsize * i, sdesc->description, | |
962 level, pos); | |
963 } | |
964 } | |
965 | |
966 #ifdef DEBUG_XEMACS | |
967 #define mark_struct_contents(data, sdesc, count, level, pos) \ | |
968 mark_struct_contents_1 (data, sdesc, count, level, pos) | |
969 #else | |
970 #define mark_struct_contents(data, sdesc, count, level, pos) \ | |
971 mark_struct_contents_1 (data, sdesc, count) | |
972 #endif | |
973 | |
974 | |
975 #ifdef NEW_GC | |
976 /* This function loops all elements of a struct pointer and calls | |
977 mark_with_description with each element. */ | |
978 static void | |
979 #ifdef DEBUG_XEMACS | |
980 mark_lisp_object_block_contents_1 (const void *data, | |
981 const struct sized_memory_description *sdesc, | |
982 int count, int level, int pos) | |
983 #else | |
984 mark_lisp_object_block_contents_1 (const void *data, | |
985 const struct sized_memory_description *sdesc, | |
986 int count) | |
987 #endif | |
988 { | |
989 int i; | |
990 Bytecount elsize; | |
991 elsize = lispdesc_block_size (data, sdesc); | |
992 | |
993 for (i = 0; i < count; i++) | |
994 { | |
995 const Lisp_Object obj = wrap_pointer_1 (((char *) data) + elsize * i); | |
996 if (XTYPE (obj) == Lisp_Type_Record) | |
997 { | |
998 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
999 const struct memory_description *desc; | |
1000 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1001 desc = sdesc->description; | |
1002 if (! MARKED_RECORD_HEADER_P (lheader)) | |
1003 { | |
1004 MARK_GREY (lheader); | |
1005 kkcc_gc_stack_push ((void *) lheader, desc, level, pos); | |
1006 } | |
1007 } | |
1008 } | |
1009 } | |
1010 | |
1011 #ifdef DEBUG_XEMACS | |
1012 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \ | |
1013 mark_lisp_object_block_contents_1 (data, sdesc, count, level, pos) | |
1014 #else | |
1015 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \ | |
1016 mark_lisp_object_block_contents_1 (data, sdesc, count) | |
1017 #endif | |
1018 #endif /* not NEW_GC */ | |
1019 | |
1020 /* This function implements the KKCC mark algorithm. | |
1021 Instead of calling mark_object, all the alive Lisp_Objects are pushed | |
1022 on the kkcc_gc_stack. This function processes all elements on the stack | |
1023 according to their descriptions. */ | |
1024 static void | |
5054 | 1025 kkcc_marking (int USED_IF_NEW_GC (cnt)) |
3092 | 1026 { |
1027 kkcc_gc_stack_entry *stack_entry = 0; | |
1028 void *data = 0; | |
1029 const struct memory_description *desc = 0; | |
1030 int pos; | |
1031 #ifdef NEW_GC | |
5046 | 1032 int obj_count = cnt; |
3092 | 1033 #endif /* NEW_GC */ |
1034 #ifdef DEBUG_XEMACS | |
1035 int level = 0; | |
1036 #endif | |
1037 | |
1038 while ((stack_entry = kkcc_gc_stack_pop ()) != 0) | |
1039 { | |
1040 data = stack_entry->data; | |
1041 desc = stack_entry->desc; | |
1042 #ifdef DEBUG_XEMACS | |
1043 level = stack_entry->level + 1; | |
1044 #endif | |
1045 kkcc_bt_push (data, desc, stack_entry->level, stack_entry->pos); | |
1046 | |
1047 #ifdef NEW_GC | |
1048 /* Mark black if object is currently grey. This first checks, | |
1049 if the object is really allocated on the mc-heap. If it is, | |
1050 it can be marked black; if it is not, it cannot be marked. */ | |
1051 maybe_mark_black (data); | |
1052 #endif /* NEW_GC */ | |
1053 | |
1054 if (!data) continue; | |
1055 | |
1056 gc_checking_assert (data); | |
1057 gc_checking_assert (desc); | |
1058 | |
1059 for (pos = 0; desc[pos].type != XD_END; pos++) | |
1060 { | |
1061 const struct memory_description *desc1 = &desc[pos]; | |
1062 const void *rdata = | |
1063 (const char *) data + lispdesc_indirect_count (desc1->offset, | |
1064 desc, data); | |
1065 union_switcheroo: | |
1066 | |
1067 /* If the flag says don't mark, then don't mark. */ | |
1068 if ((desc1->flags) & XD_FLAG_NO_KKCC) | |
1069 continue; | |
1070 | |
1071 switch (desc1->type) | |
1072 { | |
1073 case XD_BYTECOUNT: | |
1074 case XD_ELEMCOUNT: | |
1075 case XD_HASHCODE: | |
1076 case XD_INT: | |
1077 case XD_LONG: | |
1078 case XD_INT_RESET: | |
1079 case XD_LO_LINK: | |
1080 case XD_OPAQUE_PTR: | |
1081 case XD_OPAQUE_DATA_PTR: | |
1082 case XD_ASCII_STRING: | |
1083 case XD_DOC_STRING: | |
1084 break; | |
1085 case XD_LISP_OBJECT: | |
1086 { | |
1087 const Lisp_Object *stored_obj = (const Lisp_Object *) rdata; | |
1088 | |
1089 /* Because of the way that tagged objects work (pointers and | |
1090 Lisp_Objects have the same representation), XD_LISP_OBJECT | |
1091 can be used for untagged pointers. They might be NULL, | |
1092 though. */ | |
1093 if (EQ (*stored_obj, Qnull_pointer)) | |
1094 break; | |
3263 | 1095 #ifdef NEW_GC |
3092 | 1096 mark_object_maybe_checking_free (*stored_obj, 0, level, pos); |
3263 | 1097 #else /* not NEW_GC */ |
3092 | 1098 mark_object_maybe_checking_free |
1099 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT, | |
1100 level, pos); | |
3263 | 1101 #endif /* not NEW_GC */ |
3092 | 1102 break; |
1103 } | |
1104 case XD_LISP_OBJECT_ARRAY: | |
1105 { | |
1106 int i; | |
1107 EMACS_INT count = | |
1108 lispdesc_indirect_count (desc1->data1, desc, data); | |
1109 | |
1110 for (i = 0; i < count; i++) | |
1111 { | |
1112 const Lisp_Object *stored_obj = | |
1113 (const Lisp_Object *) rdata + i; | |
1114 | |
1115 if (EQ (*stored_obj, Qnull_pointer)) | |
1116 break; | |
3263 | 1117 #ifdef NEW_GC |
3092 | 1118 mark_object_maybe_checking_free |
1119 (*stored_obj, 0, level, pos); | |
3263 | 1120 #else /* not NEW_GC */ |
3092 | 1121 mark_object_maybe_checking_free |
1122 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT, | |
1123 level, pos); | |
3263 | 1124 #endif /* not NEW_GC */ |
3092 | 1125 } |
1126 break; | |
1127 } | |
1128 #ifdef NEW_GC | |
1129 case XD_LISP_OBJECT_BLOCK_PTR: | |
1130 { | |
1131 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1132 data); | |
1133 const struct sized_memory_description *sdesc = | |
1134 lispdesc_indirect_description (data, desc1->data2.descr); | |
1135 const char *dobj = * (const char **) rdata; | |
1136 if (dobj) | |
1137 mark_lisp_object_block_contents | |
1138 (dobj, sdesc, count, level, pos); | |
1139 break; | |
1140 } | |
1141 #endif /* NEW_GC */ | |
1142 case XD_BLOCK_PTR: | |
1143 { | |
1144 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1145 data); | |
1146 const struct sized_memory_description *sdesc = | |
1147 lispdesc_indirect_description (data, desc1->data2.descr); | |
1148 const char *dobj = * (const char **) rdata; | |
1149 if (dobj) | |
1150 mark_struct_contents (dobj, sdesc, count, level, pos); | |
1151 break; | |
1152 } | |
1153 case XD_BLOCK_ARRAY: | |
1154 { | |
1155 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1156 data); | |
1157 const struct sized_memory_description *sdesc = | |
1158 lispdesc_indirect_description (data, desc1->data2.descr); | |
1159 | |
1160 mark_struct_contents (rdata, sdesc, count, level, pos); | |
1161 break; | |
1162 } | |
1163 case XD_UNION: | |
1164 case XD_UNION_DYNAMIC_SIZE: | |
1165 desc1 = lispdesc_process_xd_union (desc1, desc, data); | |
1166 if (desc1) | |
1167 goto union_switcheroo; | |
1168 break; | |
1169 | |
1170 default: | |
1171 stderr_out ("Unsupported description type : %d\n", desc1->type); | |
1172 kkcc_backtrace (); | |
1173 ABORT (); | |
1174 } | |
1175 } | |
1176 | |
1177 #ifdef NEW_GC | |
1178 if (cnt) | |
5046 | 1179 if (!--obj_count) |
3092 | 1180 break; |
1181 #endif /* NEW_GC */ | |
1182 } | |
1183 } | |
1184 #endif /* USE_KKCC */ | |
1185 | |
1186 /* I hate duplicating all this crap! */ | |
1187 int | |
1188 marked_p (Lisp_Object obj) | |
1189 { | |
1190 /* Checks we used to perform. */ | |
1191 /* if (EQ (obj, Qnull_pointer)) return 1; */ | |
1192 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return 1; */ | |
1193 /* if (PURIFIED (XPNTR (obj))) return 1; */ | |
1194 | |
1195 if (XTYPE (obj) == Lisp_Type_Record) | |
1196 { | |
1197 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
1198 | |
1199 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1200 | |
1201 return MARKED_RECORD_HEADER_P (lheader); | |
1202 } | |
1203 return 1; | |
1204 } | |
1205 | |
1206 | |
1207 /* Mark reference to a Lisp_Object. If the object referred to has not been | |
1208 seen yet, recursively mark all the references contained in it. */ | |
1209 void | |
1210 mark_object ( | |
1211 #ifdef USE_KKCC | |
1212 Lisp_Object UNUSED (obj) | |
1213 #else | |
1214 Lisp_Object obj | |
1215 #endif | |
1216 ) | |
1217 { | |
1218 #ifdef USE_KKCC | |
1219 /* this code should never be reached when configured for KKCC */ | |
1220 stderr_out ("KKCC: Invalid mark_object call.\n"); | |
1221 stderr_out ("Replace mark_object with kkcc_gc_stack_push_lisp_object.\n"); | |
1222 ABORT (); | |
1223 #else /* not USE_KKCC */ | |
1224 | |
1225 tail_recurse: | |
1226 | |
1227 /* Checks we used to perform */ | |
1228 /* if (EQ (obj, Qnull_pointer)) return; */ | |
1229 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return; */ | |
1230 /* if (PURIFIED (XPNTR (obj))) return; */ | |
1231 | |
1232 if (XTYPE (obj) == Lisp_Type_Record) | |
1233 { | |
1234 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
1235 | |
1236 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1237 | |
1238 /* We handle this separately, above, so we can mark free objects */ | |
1239 GC_CHECK_NOT_FREE (lheader); | |
1240 | |
1241 /* All c_readonly objects have their mark bit set, | |
1242 so that we only need to check the mark bit here. */ | |
1243 if (! MARKED_RECORD_HEADER_P (lheader)) | |
1244 { | |
1245 MARK_RECORD_HEADER (lheader); | |
1246 | |
1247 if (RECORD_MARKER (lheader)) | |
1248 { | |
1249 obj = RECORD_MARKER (lheader) (obj); | |
1250 if (!NILP (obj)) goto tail_recurse; | |
1251 } | |
1252 } | |
1253 } | |
1254 #endif /* not KKCC */ | |
1255 } | |
1256 | |
1257 | |
1258 /************************************************************************/ | |
1259 /* Hooks */ | |
1260 /************************************************************************/ | |
1261 | |
1262 /* Nonzero when calling certain hooks or doing other things where a GC | |
1263 would be bad. It prevents infinite recursive calls to gc. */ | |
1264 int gc_currently_forbidden; | |
1265 | |
1266 int | |
1267 begin_gc_forbidden (void) | |
1268 { | |
1269 return internal_bind_int (&gc_currently_forbidden, 1); | |
1270 } | |
1271 | |
1272 void | |
1273 end_gc_forbidden (int count) | |
1274 { | |
1275 unbind_to (count); | |
1276 } | |
1277 | |
1278 /* Hooks. */ | |
1279 Lisp_Object Vpre_gc_hook, Qpre_gc_hook; | |
1280 Lisp_Object Vpost_gc_hook, Qpost_gc_hook; | |
1281 | |
1282 /* Maybe we want to use this when doing a "panic" gc after memory_full()? */ | |
1283 static int gc_hooks_inhibited; | |
1284 | |
1285 struct post_gc_action | |
1286 { | |
1287 void (*fun) (void *); | |
1288 void *arg; | |
1289 }; | |
1290 | |
1291 typedef struct post_gc_action post_gc_action; | |
1292 | |
1293 typedef struct | |
1294 { | |
1295 Dynarr_declare (post_gc_action); | |
1296 } post_gc_action_dynarr; | |
1297 | |
1298 static post_gc_action_dynarr *post_gc_actions; | |
1299 | |
1300 /* Register an action to be called at the end of GC. | |
1301 gc_in_progress is 0 when this is called. | |
1302 This is used when it is discovered that an action needs to be taken, | |
1303 but it's during GC, so it's not safe. (e.g. in a finalize method.) | |
1304 | |
1305 As a general rule, do not use Lisp objects here. | |
1306 And NEVER signal an error. | |
1307 */ | |
1308 | |
1309 void | |
1310 register_post_gc_action (void (*fun) (void *), void *arg) | |
1311 { | |
1312 post_gc_action action; | |
1313 | |
1314 if (!post_gc_actions) | |
1315 post_gc_actions = Dynarr_new (post_gc_action); | |
1316 | |
1317 action.fun = fun; | |
1318 action.arg = arg; | |
1319 | |
1320 Dynarr_add (post_gc_actions, action); | |
1321 } | |
1322 | |
1323 static void | |
1324 run_post_gc_actions (void) | |
1325 { | |
1326 int i; | |
1327 | |
1328 if (post_gc_actions) | |
1329 { | |
1330 for (i = 0; i < Dynarr_length (post_gc_actions); i++) | |
1331 { | |
1332 post_gc_action action = Dynarr_at (post_gc_actions, i); | |
1333 (action.fun) (action.arg); | |
1334 } | |
1335 | |
1336 Dynarr_reset (post_gc_actions); | |
1337 } | |
1338 } | |
1339 | |
3263 | 1340 #ifdef NEW_GC |
1341 /* Asynchronous finalization. */ | |
1342 typedef struct finalize_elem | |
1343 { | |
1344 Lisp_Object obj; | |
1345 struct finalize_elem *next; | |
1346 } finalize_elem; | |
1347 | |
1348 finalize_elem *Vall_finalizable_objs; | |
1349 Lisp_Object Vfinalizers_to_run; | |
1350 | |
1351 void | |
1352 add_finalizable_obj (Lisp_Object obj) | |
1353 { | |
1354 finalize_elem *next = Vall_finalizable_objs; | |
1355 Vall_finalizable_objs = | |
1356 (finalize_elem *) xmalloc_and_zero (sizeof (finalize_elem)); | |
1357 Vall_finalizable_objs->obj = obj; | |
1358 Vall_finalizable_objs->next = next; | |
1359 } | |
1360 | |
1361 void | |
1362 register_for_finalization (void) | |
1363 { | |
1364 finalize_elem *rest = Vall_finalizable_objs; | |
1365 | |
1366 if (!rest) | |
1367 return; | |
1368 | |
1369 while (!marked_p (rest->obj)) | |
1370 { | |
1371 finalize_elem *temp = rest; | |
1372 Vfinalizers_to_run = Fcons (rest->obj, Vfinalizers_to_run); | |
1373 Vall_finalizable_objs = rest->next; | |
4976
16112448d484
Rename xfree(FOO, TYPE) -> xfree(FOO)
Ben Wing <ben@xemacs.org>
parents:
4969
diff
changeset
|
1374 xfree (temp); |
3263 | 1375 rest = Vall_finalizable_objs; |
1376 } | |
1377 | |
1378 while (rest->next) | |
1379 { | |
1380 if (LRECORDP (rest->next->obj) | |
1381 && !marked_p (rest->next->obj)) | |
1382 { | |
1383 finalize_elem *temp = rest->next; | |
1384 Vfinalizers_to_run = Fcons (rest->next->obj, Vfinalizers_to_run); | |
1385 rest->next = rest->next->next; | |
4976
16112448d484
Rename xfree(FOO, TYPE) -> xfree(FOO)
Ben Wing <ben@xemacs.org>
parents:
4969
diff
changeset
|
1386 xfree (temp); |
3263 | 1387 } |
1388 else | |
1389 { | |
1390 rest = rest->next; | |
1391 } | |
1392 } | |
1393 /* Keep objects alive that need to be finalized by marking | |
1394 Vfinalizers_to_run transitively. */ | |
1395 kkcc_gc_stack_push_lisp_object (Vfinalizers_to_run, 0, -1); | |
1396 kkcc_marking (0); | |
1397 } | |
1398 | |
1399 void | |
1400 run_finalizers (void) | |
1401 { | |
1402 Lisp_Object rest; | |
1403 for (rest = Vfinalizers_to_run; !NILP (rest); rest = XCDR (rest)) | |
1404 { | |
1405 MC_ALLOC_CALL_FINALIZER (XPNTR (XCAR (rest))); | |
1406 } | |
1407 Vfinalizers_to_run = Qnil; | |
1408 } | |
1409 #endif /* not NEW_GC */ | |
3092 | 1410 |
1411 | |
1412 /************************************************************************/ | |
1413 /* Garbage Collection */ | |
1414 /************************************************************************/ | |
1415 | |
1416 /* Enable/disable incremental garbage collection during runtime. */ | |
1417 int allow_incremental_gc; | |
1418 | |
1419 /* For profiling. */ | |
1420 static Lisp_Object QSin_garbage_collection; | |
1421 | |
1422 /* Nonzero means display messages at beginning and end of GC. */ | |
1423 int garbage_collection_messages; | |
1424 | |
1425 /* "Garbage collecting" */ | |
1426 Lisp_Object Vgc_message; | |
1427 Lisp_Object Vgc_pointer_glyph; | |
1428 static const Ascbyte gc_default_message[] = "Garbage collecting"; | |
1429 Lisp_Object Qgarbage_collecting; | |
1430 | |
1431 /* "Locals" during GC. */ | |
1432 struct frame *f; | |
1433 int speccount; | |
1434 int cursor_changed; | |
1435 Lisp_Object pre_gc_cursor; | |
1436 | |
1437 /* PROFILE_DECLARE */ | |
1438 int do_backtrace; | |
1439 struct backtrace backtrace; | |
1440 | |
1441 /* Maximum amount of C stack to save when a GC happens. */ | |
1442 #ifndef MAX_SAVE_STACK | |
1443 #define MAX_SAVE_STACK 0 /* 16000 */ | |
1444 #endif | |
1445 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1446 static void |
3267 | 1447 show_gc_cursor_and_message (void) |
3092 | 1448 { |
3267 | 1449 /* Now show the GC cursor/message. */ |
1450 pre_gc_cursor = Qnil; | |
1451 cursor_changed = 0; | |
3092 | 1452 |
1453 /* We used to call selected_frame() here. | |
1454 | |
1455 The following functions cannot be called inside GC | |
1456 so we move to after the above tests. */ | |
1457 { | |
1458 Lisp_Object frame; | |
1459 Lisp_Object device = Fselected_device (Qnil); | |
1460 if (NILP (device)) /* Could happen during startup, eg. if always_gc */ | |
1461 return; | |
1462 frame = Fselected_frame (device); | |
1463 if (NILP (frame)) | |
1464 invalid_state ("No frames exist on device", device); | |
1465 f = XFRAME (frame); | |
1466 } | |
1467 | |
1468 if (!noninteractive) | |
1469 { | |
1470 if (FRAME_WIN_P (f)) | |
1471 { | |
1472 Lisp_Object frame = wrap_frame (f); | |
1473 Lisp_Object cursor = glyph_image_instance (Vgc_pointer_glyph, | |
1474 FRAME_SELECTED_WINDOW (f), | |
1475 ERROR_ME_NOT, 1); | |
1476 pre_gc_cursor = f->pointer; | |
1477 if (POINTER_IMAGE_INSTANCEP (cursor) | |
1478 /* don't change if we don't know how to change back. */ | |
1479 && POINTER_IMAGE_INSTANCEP (pre_gc_cursor)) | |
1480 { | |
1481 cursor_changed = 1; | |
1482 Fset_frame_pointer (frame, cursor); | |
1483 } | |
1484 } | |
1485 | |
1486 /* Don't print messages to the stream device. */ | |
1487 if (!cursor_changed && !FRAME_STREAM_P (f)) | |
1488 { | |
1489 if (garbage_collection_messages) | |
1490 { | |
1491 Lisp_Object args[2], whole_msg; | |
1492 args[0] = (STRINGP (Vgc_message) ? Vgc_message : | |
1493 build_msg_string (gc_default_message)); | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
1494 args[1] = build_ascstring ("..."); |
3092 | 1495 whole_msg = Fconcat (2, args); |
1496 echo_area_message (f, (Ibyte *) 0, whole_msg, 0, -1, | |
1497 Qgarbage_collecting); | |
1498 } | |
1499 } | |
1500 } | |
3267 | 1501 } |
1502 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1503 static void |
3267 | 1504 remove_gc_cursor_and_message (void) |
1505 { | |
1506 /* Now remove the GC cursor/message */ | |
1507 if (!noninteractive) | |
1508 { | |
1509 if (cursor_changed) | |
1510 Fset_frame_pointer (wrap_frame (f), pre_gc_cursor); | |
1511 else if (!FRAME_STREAM_P (f)) | |
1512 { | |
1513 /* Show "...done" only if the echo area would otherwise be empty. */ | |
1514 if (NILP (clear_echo_area (selected_frame (), | |
1515 Qgarbage_collecting, 0))) | |
1516 { | |
1517 if (garbage_collection_messages) | |
1518 { | |
1519 Lisp_Object args[2], whole_msg; | |
1520 args[0] = (STRINGP (Vgc_message) ? Vgc_message : | |
1521 build_msg_string (gc_default_message)); | |
1522 args[1] = build_msg_string ("... done"); | |
1523 whole_msg = Fconcat (2, args); | |
1524 echo_area_message (selected_frame (), (Ibyte *) 0, | |
1525 whole_msg, 0, -1, | |
1526 Qgarbage_collecting); | |
1527 } | |
1528 } | |
1529 } | |
1530 } | |
1531 } | |
1532 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1533 static void |
3267 | 1534 gc_prepare (void) |
1535 { | |
1536 #if MAX_SAVE_STACK > 0 | |
1537 char stack_top_variable; | |
1538 extern char *stack_bottom; | |
1539 #endif | |
1540 | |
1541 #ifdef NEW_GC | |
1542 GC_STAT_START_NEW_GC; | |
1543 GC_SET_PHASE (INIT_GC); | |
1544 #endif /* NEW_GC */ | |
1545 | |
1546 do_backtrace = profiling_active || backtrace_with_internal_sections; | |
1547 | |
1548 assert (!gc_in_progress); | |
1549 assert (!in_display || gc_currently_forbidden); | |
1550 | |
1551 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection); | |
1552 | |
1553 need_to_signal_post_gc = 0; | |
1554 recompute_funcall_allocation_flag (); | |
1555 | |
1556 if (!gc_hooks_inhibited) | |
1557 run_hook_trapping_problems | |
1558 (Qgarbage_collecting, Qpre_gc_hook, | |
1559 INHIBIT_EXISTING_PERMANENT_DISPLAY_OBJECT_DELETION); | |
3092 | 1560 |
1561 /***** Now we actually start the garbage collection. */ | |
1562 | |
1563 gc_in_progress = 1; | |
1564 #ifndef NEW_GC | |
5014
c2e0c3af5fe3
cleanups to debug-print, try harder to make it work during GC
Ben Wing <ben@xemacs.org>
parents:
4976
diff
changeset
|
1565 inhibit_non_essential_conversion_operations++; |
3263 | 1566 #endif /* not NEW_GC */ |
3092 | 1567 |
1568 #if MAX_SAVE_STACK > 0 | |
1569 | |
1570 /* Save a copy of the contents of the stack, for debugging. */ | |
1571 if (!purify_flag) | |
1572 { | |
1573 /* Static buffer in which we save a copy of the C stack at each GC. */ | |
1574 static char *stack_copy; | |
1575 static Bytecount stack_copy_size; | |
1576 | |
1577 ptrdiff_t stack_diff = &stack_top_variable - stack_bottom; | |
1578 Bytecount stack_size = (stack_diff > 0 ? stack_diff : -stack_diff); | |
1579 if (stack_size < MAX_SAVE_STACK) | |
1580 { | |
1581 if (stack_copy_size < stack_size) | |
1582 { | |
1583 stack_copy = (char *) xrealloc (stack_copy, stack_size); | |
1584 stack_copy_size = stack_size; | |
1585 } | |
1586 | |
1587 memcpy (stack_copy, | |
1588 stack_diff > 0 ? stack_bottom : &stack_top_variable, | |
1589 stack_size); | |
1590 } | |
1591 } | |
1592 #endif /* MAX_SAVE_STACK > 0 */ | |
1593 | |
1594 /* Do some totally ad-hoc resource clearing. */ | |
1595 /* #### generalize this? */ | |
1596 clear_event_resource (); | |
1597 cleanup_specifiers (); | |
1598 cleanup_buffer_undo_lists (); | |
1599 } | |
1600 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1601 static void |
3092 | 1602 gc_mark_root_set ( |
1603 #ifdef NEW_GC | |
1604 enum gc_phase phase | |
1605 #else /* not NEW_GC */ | |
1606 void | |
1607 #endif /* not NEW_GC */ | |
1608 ) | |
1609 { | |
1610 #ifdef NEW_GC | |
1611 GC_SET_PHASE (phase); | |
1612 #endif /* NEW_GC */ | |
1613 | |
1614 /* Mark all the special slots that serve as the roots of accessibility. */ | |
1615 | |
1616 #ifdef USE_KKCC | |
1617 # define mark_object(obj) kkcc_gc_stack_push_lisp_object (obj, 0, -1) | |
1618 #endif /* USE_KKCC */ | |
1619 | |
1620 { /* staticpro() */ | |
1621 Lisp_Object **p = Dynarr_begin (staticpros); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1622 Elemcount len = Dynarr_length (staticpros); |
3092 | 1623 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1624 for (count = 0; count < len; count++, p++) |
3092 | 1625 /* Need to check if the pointer in the staticpro array is not |
1626 NULL. A gc can occur after variable is added to the staticpro | |
1627 array and _before_ it is correctly initialized. In this case | |
1628 its value is NULL, which we have to catch here. */ | |
1629 if (*p) | |
3486 | 1630 mark_object (**p); |
3092 | 1631 } |
1632 | |
1633 { /* staticpro_nodump() */ | |
1634 Lisp_Object **p = Dynarr_begin (staticpros_nodump); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1635 Elemcount len = Dynarr_length (staticpros_nodump); |
3092 | 1636 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1637 for (count = 0; count < len; count++, p++) |
3092 | 1638 /* Need to check if the pointer in the staticpro array is not |
1639 NULL. A gc can occur after variable is added to the staticpro | |
1640 array and _before_ it is correctly initialized. In this case | |
1641 its value is NULL, which we have to catch here. */ | |
1642 if (*p) | |
3486 | 1643 mark_object (**p); |
3092 | 1644 } |
1645 | |
3263 | 1646 #ifdef NEW_GC |
3092 | 1647 { /* mcpro () */ |
1648 Lisp_Object *p = Dynarr_begin (mcpros); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1649 Elemcount len = Dynarr_length (mcpros); |
3092 | 1650 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1651 for (count = 0; count < len; count++, p++) |
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1652 mark_object (*p); |
3092 | 1653 } |
3263 | 1654 #endif /* NEW_GC */ |
3092 | 1655 |
1656 { /* GCPRO() */ | |
1657 struct gcpro *tail; | |
1658 int i; | |
1659 for (tail = gcprolist; tail; tail = tail->next) | |
1660 for (i = 0; i < tail->nvars; i++) | |
1661 mark_object (tail->var[i]); | |
1662 } | |
1663 | |
1664 { /* specbind() */ | |
1665 struct specbinding *bind; | |
1666 for (bind = specpdl; bind != specpdl_ptr; bind++) | |
1667 { | |
1668 mark_object (bind->symbol); | |
1669 mark_object (bind->old_value); | |
1670 } | |
1671 } | |
1672 | |
1673 { | |
1674 struct catchtag *c; | |
1675 for (c = catchlist; c; c = c->next) | |
1676 { | |
1677 mark_object (c->tag); | |
1678 mark_object (c->val); | |
1679 mark_object (c->actual_tag); | |
1680 mark_object (c->backtrace); | |
1681 } | |
1682 } | |
1683 | |
1684 { | |
1685 struct backtrace *backlist; | |
1686 for (backlist = backtrace_list; backlist; backlist = backlist->next) | |
1687 { | |
1688 int nargs = backlist->nargs; | |
1689 int i; | |
1690 | |
1691 mark_object (*backlist->function); | |
1692 if (nargs < 0 /* nargs == UNEVALLED || nargs == MANY */ | |
1693 /* might be fake (internal profiling entry) */ | |
1694 && backlist->args) | |
1695 mark_object (backlist->args[0]); | |
1696 else | |
1697 for (i = 0; i < nargs; i++) | |
1698 mark_object (backlist->args[i]); | |
1699 } | |
1700 } | |
1701 | |
1702 mark_profiling_info (); | |
1703 #ifdef USE_KKCC | |
1704 # undef mark_object | |
1705 #endif | |
1706 } | |
1707 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1708 static void |
3092 | 1709 gc_finish_mark (void) |
1710 { | |
1711 #ifdef NEW_GC | |
1712 GC_SET_PHASE (FINISH_MARK); | |
1713 #endif /* NEW_GC */ | |
1714 init_marking_ephemerons (); | |
1715 | |
1716 while (finish_marking_weak_hash_tables () > 0 || | |
1717 finish_marking_weak_lists () > 0 || | |
1718 continue_marking_ephemerons () > 0) | |
1719 #ifdef USE_KKCC | |
1720 { | |
1721 kkcc_marking (0); | |
1722 } | |
1723 #else /* not USE_KKCC */ | |
1724 ; | |
1725 #endif /* not USE_KKCC */ | |
1726 | |
1727 /* At this point, we know which objects need to be finalized: we | |
1728 still need to resurrect them */ | |
1729 | |
1730 while (finish_marking_ephemerons () > 0 || | |
1731 finish_marking_weak_lists () > 0 || | |
1732 finish_marking_weak_hash_tables () > 0) | |
1733 #ifdef USE_KKCC | |
1734 { | |
1735 kkcc_marking (0); | |
1736 } | |
1737 #else /* not USE_KKCC */ | |
1738 ; | |
1739 #endif /* not USE_KKCC */ | |
1740 | |
1741 /* And prune (this needs to be called after everything else has been | |
1742 marked and before we do any sweeping). */ | |
1743 /* #### this is somewhat ad-hoc and should probably be an object | |
1744 method */ | |
1745 prune_weak_hash_tables (); | |
1746 prune_weak_lists (); | |
1747 prune_specifiers (); | |
1748 prune_syntax_tables (); | |
1749 | |
1750 prune_ephemerons (); | |
1751 prune_weak_boxes (); | |
1752 } | |
1753 | |
1754 #ifdef NEW_GC | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1755 static void |
3092 | 1756 gc_finalize (void) |
1757 { | |
1758 GC_SET_PHASE (FINALIZE); | |
3263 | 1759 register_for_finalization (); |
3092 | 1760 } |
1761 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1762 static void |
3092 | 1763 gc_sweep (void) |
1764 { | |
1765 GC_SET_PHASE (SWEEP); | |
1766 mc_sweep (); | |
1767 } | |
1768 #endif /* NEW_GC */ | |
1769 | |
1770 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1771 static void |
3092 | 1772 gc_finish (void) |
1773 { | |
1774 #ifdef NEW_GC | |
1775 GC_SET_PHASE (FINISH_GC); | |
1776 #endif /* NEW_GC */ | |
1777 consing_since_gc = 0; | |
1778 #ifndef DEBUG_XEMACS | |
1779 /* Allow you to set it really fucking low if you really want ... */ | |
1780 if (gc_cons_threshold < 10000) | |
1781 gc_cons_threshold = 10000; | |
1782 #endif | |
1783 recompute_need_to_garbage_collect (); | |
1784 | |
1785 #ifndef NEW_GC | |
5014
c2e0c3af5fe3
cleanups to debug-print, try harder to make it work during GC
Ben Wing <ben@xemacs.org>
parents:
4976
diff
changeset
|
1786 inhibit_non_essential_conversion_operations--; |
3092 | 1787 #endif /* not NEW_GC */ |
1788 gc_in_progress = 0; | |
1789 | |
1790 run_post_gc_actions (); | |
1791 | |
1792 /******* End of garbage collection ********/ | |
1793 | |
3263 | 1794 #ifndef NEW_GC |
3092 | 1795 if (!breathing_space) |
1796 { | |
1797 breathing_space = malloc (4096 - MALLOC_OVERHEAD); | |
1798 } | |
3263 | 1799 #endif /* not NEW_GC */ |
3092 | 1800 |
1801 need_to_signal_post_gc = 1; | |
1802 funcall_allocation_flag = 1; | |
1803 | |
1804 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection); | |
1805 | |
1806 #ifdef NEW_GC | |
1807 GC_SET_PHASE (NONE); | |
1808 #endif /* NEW_GC */ | |
1809 } | |
1810 | |
1811 #ifdef NEW_GC | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1812 static void |
3092 | 1813 gc_suspend_mark_phase (void) |
1814 { | |
1815 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection); | |
1816 write_barrier_enabled = 1; | |
1817 consing_since_gc = 0; | |
1818 vdb_start_dirty_bits_recording (); | |
1819 } | |
1820 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1821 static int |
3092 | 1822 gc_resume_mark_phase (void) |
1823 { | |
1824 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection); | |
1825 assert (write_barrier_enabled); | |
1826 vdb_stop_dirty_bits_recording (); | |
1827 write_barrier_enabled = 0; | |
1828 return vdb_read_dirty_bits (); | |
1829 } | |
1830 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1831 static int |
3092 | 1832 gc_mark (int incremental) |
1833 { | |
1834 GC_SET_PHASE (MARK); | |
1835 if (!incremental) | |
1836 { | |
1837 kkcc_marking (0); | |
1838 } | |
1839 else | |
1840 { | |
1841 kkcc_marking (gc_incremental_traversal_threshold); | |
1842 if (!KKCC_GC_STACK_EMPTY) | |
1843 { | |
1844 gc_suspend_mark_phase (); | |
1845 return 0; | |
1846 } | |
1847 } | |
1848 return 1; | |
1849 } | |
1850 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1851 static int |
3092 | 1852 gc_resume_mark (int incremental) |
1853 { | |
1854 if (!incremental) | |
1855 { | |
1856 if (!KKCC_GC_STACK_EMPTY) | |
1857 { | |
1858 GC_STAT_RESUME_GC; | |
1859 /* An incremental garbage collection is already running --- | |
1860 now wrap it up and resume it atomically. */ | |
1861 gc_resume_mark_phase (); | |
1862 gc_mark_root_set (REPUSH_ROOT_SET); | |
1863 kkcc_marking (0); | |
1864 } | |
1865 } | |
1866 else | |
1867 { | |
1868 int repushed_objects; | |
1869 int mark_work; | |
1870 GC_STAT_RESUME_GC; | |
1871 repushed_objects = gc_resume_mark_phase (); | |
1872 mark_work = (gc_incremental_traversal_threshold > repushed_objects) ? | |
1873 gc_incremental_traversal_threshold : repushed_objects; | |
1874 kkcc_marking (mark_work); | |
1875 if (KKCC_GC_STACK_EMPTY) | |
1876 { | |
1877 /* Mark root set again and finish up marking. */ | |
1878 gc_mark_root_set (REPUSH_ROOT_SET); | |
1879 kkcc_marking (0); | |
1880 } | |
1881 else | |
1882 { | |
1883 gc_suspend_mark_phase (); | |
1884 return 0; | |
1885 } | |
1886 } | |
1887 return 1; | |
1888 } | |
1889 | |
1890 | |
5046 | 1891 static void |
3092 | 1892 gc_1 (int incremental) |
1893 { | |
1894 switch (GC_PHASE) | |
1895 { | |
1896 case NONE: | |
1897 gc_prepare (); | |
1898 kkcc_gc_stack_init(); | |
1899 #ifdef DEBUG_XEMACS | |
1900 kkcc_bt_init (); | |
1901 #endif | |
1902 case INIT_GC: | |
1903 gc_mark_root_set (PUSH_ROOT_SET); | |
1904 case PUSH_ROOT_SET: | |
1905 if (!gc_mark (incremental)) | |
1906 return; /* suspend gc */ | |
1907 case MARK: | |
1908 if (!KKCC_GC_STACK_EMPTY) | |
1909 if (!gc_resume_mark (incremental)) | |
1910 return; /* suspend gc */ | |
1911 gc_finish_mark (); | |
3263 | 1912 case FINISH_MARK: |
1913 gc_finalize (); | |
3092 | 1914 kkcc_gc_stack_free (); |
1915 #ifdef DEBUG_XEMACS | |
1916 kkcc_bt_free (); | |
1917 #endif | |
1918 case FINALIZE: | |
1919 gc_sweep (); | |
1920 case SWEEP: | |
1921 gc_finish (); | |
1922 case FINISH_GC: | |
1923 break; | |
1924 } | |
1925 } | |
1926 | |
5046 | 1927 static void |
1928 gc (int incremental) | |
3092 | 1929 { |
1930 if (gc_currently_forbidden | |
1931 || in_display | |
1932 || preparing_for_armageddon) | |
1933 return; | |
1934 | |
1935 /* Very important to prevent GC during any of the following | |
1936 stuff that might run Lisp code; otherwise, we'll likely | |
1937 have infinite GC recursion. */ | |
1938 speccount = begin_gc_forbidden (); | |
1939 | |
3267 | 1940 show_gc_cursor_and_message (); |
1941 | |
3092 | 1942 gc_1 (incremental); |
1943 | |
3267 | 1944 remove_gc_cursor_and_message (); |
1945 | |
3092 | 1946 /* now stop inhibiting GC */ |
1947 unbind_to (speccount); | |
1948 } | |
1949 | |
1950 void | |
1951 gc_full (void) | |
1952 { | |
1953 gc (0); | |
1954 } | |
1955 | |
1956 DEFUN ("gc-full", Fgc_full, 0, 0, "", /* | |
1957 This function performs a full garbage collection. If an incremental | |
1958 garbage collection is already running, it completes without any | |
1959 further interruption. This function guarantees that unused objects | |
1960 are freed when it returns. Garbage collection happens automatically if | |
1961 the client allocates more than `gc-cons-threshold' bytes of Lisp data | |
1962 since the previous garbage collection. | |
1963 */ | |
1964 ()) | |
1965 { | |
1966 gc_full (); | |
1967 return Qt; | |
1968 } | |
1969 | |
1970 void | |
1971 gc_incremental (void) | |
1972 { | |
1973 gc (allow_incremental_gc); | |
1974 } | |
1975 | |
1976 DEFUN ("gc-incremental", Fgc_incremental, 0, 0, "", /* | |
1977 This function starts an incremental garbage collection. If an | |
1978 incremental garbage collection is already running, the next cycle | |
1979 starts. Note that this function has not necessarily freed any memory | |
1980 when it returns. This function only guarantees, that the traversal of | |
1981 the heap makes progress. The next cycle of incremental garbage | |
1982 collection happens automatically if the client allocates more than | |
1983 `gc-incremental-cons-threshold' bytes of Lisp data since previous | |
1984 garbage collection. | |
1985 */ | |
1986 ()) | |
1987 { | |
1988 gc_incremental (); | |
1989 return Qt; | |
1990 } | |
1991 #else /* not NEW_GC */ | |
1992 void garbage_collect_1 (void) | |
1993 { | |
1994 if (gc_in_progress | |
1995 || gc_currently_forbidden | |
1996 || in_display | |
1997 || preparing_for_armageddon) | |
1998 return; | |
1999 | |
2000 /* Very important to prevent GC during any of the following | |
2001 stuff that might run Lisp code; otherwise, we'll likely | |
2002 have infinite GC recursion. */ | |
2003 speccount = begin_gc_forbidden (); | |
2004 | |
3267 | 2005 show_gc_cursor_and_message (); |
2006 | |
3092 | 2007 gc_prepare (); |
2008 #ifdef USE_KKCC | |
2009 kkcc_gc_stack_init(); | |
2010 #ifdef DEBUG_XEMACS | |
2011 kkcc_bt_init (); | |
2012 #endif | |
2013 #endif /* USE_KKCC */ | |
2014 gc_mark_root_set (); | |
2015 #ifdef USE_KKCC | |
2016 kkcc_marking (0); | |
2017 #endif /* USE_KKCC */ | |
2018 gc_finish_mark (); | |
2019 #ifdef USE_KKCC | |
2020 kkcc_gc_stack_free (); | |
2021 #ifdef DEBUG_XEMACS | |
2022 kkcc_bt_free (); | |
2023 #endif | |
2024 #endif /* USE_KKCC */ | |
2025 gc_sweep_1 (); | |
2026 gc_finish (); | |
2027 | |
3267 | 2028 remove_gc_cursor_and_message (); |
2029 | |
3092 | 2030 /* now stop inhibiting GC */ |
2031 unbind_to (speccount); | |
2032 } | |
2033 #endif /* not NEW_GC */ | |
2034 | |
2035 | |
2036 /************************************************************************/ | |
2037 /* Initializations */ | |
2038 /************************************************************************/ | |
2039 | |
2040 /* Initialization */ | |
2041 static void | |
2042 common_init_gc_early (void) | |
2043 { | |
2044 Vgc_message = Qzero; | |
2045 | |
2046 gc_currently_forbidden = 0; | |
2047 gc_hooks_inhibited = 0; | |
2048 | |
2049 need_to_garbage_collect = always_gc; | |
2050 | |
2051 gc_cons_threshold = GC_CONS_THRESHOLD; | |
2052 gc_cons_percentage = 40; /* #### what is optimal? */ | |
2053 total_gc_usage_set = 0; | |
2054 #ifdef NEW_GC | |
2055 gc_cons_incremental_threshold = GC_CONS_INCREMENTAL_THRESHOLD; | |
2056 gc_incremental_traversal_threshold = GC_INCREMENTAL_TRAVERSAL_THRESHOLD; | |
3263 | 2057 #endif /* NEW_GC */ |
3092 | 2058 } |
2059 | |
2060 void | |
2061 init_gc_early (void) | |
2062 { | |
3263 | 2063 #ifdef NEW_GC |
2064 /* Reset the finalizers_to_run list after pdump_load. */ | |
2065 Vfinalizers_to_run = Qnil; | |
2066 #endif /* NEW_GC */ | |
3092 | 2067 } |
2068 | |
2069 void | |
2070 reinit_gc_early (void) | |
2071 { | |
2072 common_init_gc_early (); | |
2073 } | |
2074 | |
2075 void | |
2076 init_gc_once_early (void) | |
2077 { | |
2078 common_init_gc_early (); | |
2079 } | |
2080 | |
2081 void | |
2082 syms_of_gc (void) | |
2083 { | |
2084 DEFSYMBOL (Qpre_gc_hook); | |
2085 DEFSYMBOL (Qpost_gc_hook); | |
2086 #ifdef NEW_GC | |
2087 DEFSUBR (Fgc_full); | |
2088 DEFSUBR (Fgc_incremental); | |
2089 #ifdef ERROR_CHECK_GC | |
2090 DEFSUBR (Fgc_stats); | |
2091 #endif /* not ERROR_CHECK_GC */ | |
2092 #endif /* NEW_GC */ | |
2093 } | |
2094 | |
2095 void | |
2096 vars_of_gc (void) | |
2097 { | |
2098 staticpro_nodump (&pre_gc_cursor); | |
2099 | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
2100 QSin_garbage_collection = build_defer_string ("(in garbage collection)"); |
3092 | 2101 staticpro (&QSin_garbage_collection); |
2102 | |
2103 DEFVAR_INT ("gc-cons-threshold", &gc_cons_threshold /* | |
2104 *Number of bytes of consing between full garbage collections. | |
2105 \"Consing\" is a misnomer in that this actually counts allocation | |
2106 of all different kinds of objects, not just conses. | |
2107 Garbage collection can happen automatically once this many bytes have been | |
2108 allocated since the last garbage collection. All data types count. | |
2109 | |
2110 Garbage collection happens automatically when `eval' or `funcall' are | |
2111 called. (Note that `funcall' is called implicitly as part of evaluation.) | |
2112 By binding this temporarily to a large number, you can effectively | |
2113 prevent garbage collection during a part of the program. | |
2114 | |
2115 Normally, you cannot set this value less than 10,000 (if you do, it is | |
2116 automatically reset during the next garbage collection). However, if | |
2117 XEmacs was compiled with DEBUG_XEMACS, this does not happen, allowing | |
2118 you to set this value very low to track down problems with insufficient | |
2119 GCPRO'ing. If you set this to a negative number, garbage collection will | |
2120 happen at *EVERY* call to `eval' or `funcall'. This is an extremely | |
2121 effective way to check GCPRO problems, but be warned that your XEmacs | |
2122 will be unusable! You almost certainly won't have the patience to wait | |
2123 long enough to be able to set it back. | |
2124 | |
2125 See also `consing-since-gc' and `gc-cons-percentage'. | |
2126 */ ); | |
2127 | |
2128 DEFVAR_INT ("gc-cons-percentage", &gc_cons_percentage /* | |
2129 *Percentage of memory allocated between garbage collections. | |
2130 | |
2131 Garbage collection will happen if this percentage of the total amount of | |
2132 memory used for data (see `lisp-object-memory-usage') has been allocated | |
2133 since the last garbage collection. However, it will not happen if less | |
2134 than `gc-cons-threshold' bytes have been allocated -- this sets an absolute | |
2135 minimum in case very little data has been allocated or the percentage is | |
2136 set very low. Set this to 0 to have garbage collection always happen after | |
2137 `gc-cons-threshold' bytes have been allocated, regardless of current memory | |
2138 usage. | |
2139 | |
2140 See also `consing-since-gc' and `gc-cons-threshold'. | |
2141 */ ); | |
2142 | |
2143 #ifdef NEW_GC | |
2144 DEFVAR_INT ("gc-cons-incremental-threshold", | |
2145 &gc_cons_incremental_threshold /* | |
2146 *Number of bytes of consing between cycles of incremental garbage | |
2147 collections. \"Consing\" is a misnomer in that this actually counts | |
2148 allocation of all different kinds of objects, not just conses. The | |
2149 next garbage collection cycle can happen automatically once this many | |
2150 bytes have been allocated since the last garbage collection cycle. | |
2151 All data types count. | |
2152 | |
2153 See also `gc-cons-threshold'. | |
2154 */ ); | |
2155 | |
2156 DEFVAR_INT ("gc-incremental-traversal-threshold", | |
2157 &gc_incremental_traversal_threshold /* | |
2158 *Number of elements processed in one cycle of incremental travesal. | |
2159 */ ); | |
2160 #endif /* NEW_GC */ | |
2161 | |
2162 DEFVAR_BOOL ("purify-flag", &purify_flag /* | |
2163 Non-nil means loading Lisp code in order to dump an executable. | |
2164 This means that certain objects should be allocated in readonly space. | |
2165 */ ); | |
2166 | |
2167 DEFVAR_BOOL ("garbage-collection-messages", &garbage_collection_messages /* | |
4502
8748a3f7ceb4
Handle varalias chains, custom variables in #'user-variable-p.
Aidan Kehoe <kehoea@parhasard.net>
parents:
4124
diff
changeset
|
2168 *Non-nil means display messages at start and end of garbage collection. |
3092 | 2169 */ ); |
2170 garbage_collection_messages = 0; | |
2171 | |
2172 DEFVAR_LISP ("pre-gc-hook", &Vpre_gc_hook /* | |
2173 Function or functions to be run just before each garbage collection. | |
2174 Interrupts, garbage collection, and errors are inhibited while this hook | |
2175 runs, so be extremely careful in what you add here. In particular, avoid | |
2176 consing, and do not interact with the user. | |
2177 */ ); | |
2178 Vpre_gc_hook = Qnil; | |
2179 | |
2180 DEFVAR_LISP ("post-gc-hook", &Vpost_gc_hook /* | |
2181 Function or functions to be run just after each garbage collection. | |
2182 Interrupts, garbage collection, and errors are inhibited while this hook | |
2183 runs. Each hook is called with one argument which is an alist with | |
2184 finalization data. | |
2185 */ ); | |
2186 Vpost_gc_hook = Qnil; | |
2187 | |
2188 DEFVAR_LISP ("gc-message", &Vgc_message /* | |
2189 String to print to indicate that a garbage collection is in progress. | |
2190 This is printed in the echo area. If the selected frame is on a | |
2191 window system and `gc-pointer-glyph' specifies a value (i.e. a pointer | |
2192 image instance) in the domain of the selected frame, the mouse pointer | |
2193 will change instead of this message being printed. | |
2194 */ ); | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
2195 Vgc_message = build_defer_string (gc_default_message); |
3092 | 2196 |
2197 DEFVAR_LISP ("gc-pointer-glyph", &Vgc_pointer_glyph /* | |
2198 Pointer glyph used to indicate that a garbage collection is in progress. | |
2199 If the selected window is on a window system and this glyph specifies a | |
2200 value (i.e. a pointer image instance) in the domain of the selected | |
2201 window, the pointer will be changed as specified during garbage collection. | |
2202 Otherwise, a message will be printed in the echo area, as controlled | |
2203 by `gc-message'. | |
2204 */ ); | |
2205 | |
2206 #ifdef NEW_GC | |
2207 DEFVAR_BOOL ("allow-incremental-gc", &allow_incremental_gc /* | |
2208 *Non-nil means to allow incremental garbage collection. Nil prevents | |
2209 *incremental garbage collection, the garbage collector then only does | |
2210 *full collects (even if (gc-incremental) is called). | |
2211 */ ); | |
3263 | 2212 |
2213 Vfinalizers_to_run = Qnil; | |
2214 staticpro_nodump (&Vfinalizers_to_run); | |
3092 | 2215 #endif /* NEW_GC */ |
2216 } | |
2217 | |
2218 void | |
2219 complex_vars_of_gc (void) | |
2220 { | |
2221 Vgc_pointer_glyph = Fmake_glyph_internal (Qpointer); | |
2222 } |