Mercurial > hg > xemacs-beta
annotate src/gc.c @ 5168:cf900a2f1fa3
extract gap array from extents.c, use in range tables
-------------------- ChangeLog entries follow: --------------------
src/ChangeLog addition:
2010-03-22 Ben Wing <ben@xemacs.org>
* Makefile.in.in (objs):
* array.c:
* array.c (gap_array_adjust_markers):
* array.c (gap_array_move_gap):
* array.c (gap_array_make_gap):
* array.c (gap_array_insert_els):
* array.c (gap_array_delete_els):
* array.c (gap_array_make_marker):
* array.c (gap_array_delete_marker):
* array.c (gap_array_delete_all_markers):
* array.c (gap_array_clone):
* array.h:
* depend:
* emacs.c (main_1):
* extents.c:
* extents.c (EXTENT_GAP_ARRAY_AT):
* extents.c (extent_list_num_els):
* extents.c (extent_list_locate):
* extents.c (extent_list_at):
* extents.c (extent_list_delete_all):
* extents.c (allocate_extent_list):
* extents.c (syms_of_extents):
* extents.h:
* extents.h (XEXTENT_LIST_MARKER):
* lisp.h:
* rangetab.c:
* rangetab.c (mark_range_table):
* rangetab.c (print_range_table):
* rangetab.c (range_table_equal):
* rangetab.c (range_table_hash):
* rangetab.c (verify_range_table):
* rangetab.c (get_range_table_pos):
* rangetab.c (Fmake_range_table):
* rangetab.c (Fcopy_range_table):
* rangetab.c (Fget_range_table):
* rangetab.c (put_range_table):
* rangetab.c (Fclear_range_table):
* rangetab.c (Fmap_range_table):
* rangetab.c (unified_range_table_bytes_needed):
* rangetab.c (unified_range_table_copy_data):
* rangetab.c (unified_range_table_lookup):
* rangetab.h:
* rangetab.h (struct range_table_entry):
* rangetab.h (struct Lisp_Range_Table):
* rangetab.h (rangetab_gap_array_at):
* symsinit.h:
Rename dynarr.c to array.c. Move gap array from extents.c to array.c.
Extract dynarr, gap array and stack-like malloc into new file array.h.
Rename GAP_ARRAY_NUM_ELS -> gap_array_length(). Add gap_array_at(),
gap_array_atp().
Rewrite range table code to use gap arrays. Make put_range_table()
smarter so that its operation is O(log n) for adding a localized
range.
* gc.c (lispdesc_block_size_1):
Don't ABORT() when two elements are located at the same place.
This will happen with a size-0 gap array -- both parts of the array
(before and after gap) are in the same place.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Mon, 22 Mar 2010 19:12:15 -0500 |
parents | 9e0b43d3095c |
children | 6c6d78781d59 |
rev | line source |
---|---|
3092 | 1 /* New incremental garbage collector for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
4934
714f7c9fabb1
make it easier to debug staticpro crashes.
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
3092 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with XEmacs; see the file COPYING. If not, write to | |
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 Boston, MA 02111-1307, USA. */ | |
21 | |
22 /* Synched up with: Not in FSF. */ | |
23 | |
24 #include <config.h> | |
25 #include "lisp.h" | |
26 | |
27 #include "backtrace.h" | |
28 #include "buffer.h" | |
29 #include "bytecode.h" | |
30 #include "chartab.h" | |
31 #include "console-stream.h" | |
32 #include "device.h" | |
33 #include "elhash.h" | |
34 #include "events.h" | |
35 #include "extents-impl.h" | |
36 #include "file-coding.h" | |
37 #include "frame-impl.h" | |
38 #include "gc.h" | |
39 #include "glyphs.h" | |
40 #include "opaque.h" | |
41 #include "lrecord.h" | |
42 #include "lstream.h" | |
43 #include "process.h" | |
44 #include "profile.h" | |
45 #include "redisplay.h" | |
46 #include "specifier.h" | |
47 #include "sysfile.h" | |
48 #include "sysdep.h" | |
49 #include "window.h" | |
50 #include "vdb.h" | |
51 | |
52 | |
53 #define GC_CONS_THRESHOLD 2000000 | |
54 #define GC_CONS_INCREMENTAL_THRESHOLD 200000 | |
55 #define GC_INCREMENTAL_TRAVERSAL_THRESHOLD 100000 | |
56 | |
57 /* Number of bytes of consing done since the last GC. */ | |
58 EMACS_INT consing_since_gc; | |
59 | |
60 /* Number of bytes of consing done since startup. */ | |
61 EMACS_UINT total_consing; | |
62 | |
63 /* Number of bytes of current allocated heap objects. */ | |
64 EMACS_INT total_gc_usage; | |
65 | |
66 /* If the above is set. */ | |
67 int total_gc_usage_set; | |
68 | |
69 /* Number of bytes of consing since gc before another gc should be done. */ | |
70 EMACS_INT gc_cons_threshold; | |
71 | |
72 /* Nonzero during gc */ | |
73 int gc_in_progress; | |
74 | |
75 /* Percentage of consing of total data size before another GC. */ | |
76 EMACS_INT gc_cons_percentage; | |
77 | |
78 #ifdef NEW_GC | |
79 /* Number of bytes of consing since gc before another cycle of the gc | |
80 should be done in incremental mode. */ | |
81 EMACS_INT gc_cons_incremental_threshold; | |
82 | |
83 /* Number of elements marked in one cycle of incremental GC. */ | |
84 EMACS_INT gc_incremental_traversal_threshold; | |
85 | |
86 /* Nonzero during write barrier */ | |
87 int write_barrier_enabled; | |
88 #endif /* NEW_GC */ | |
89 | |
90 | |
91 | |
92 #ifdef NEW_GC | |
93 /************************************************************************/ | |
94 /* Incremental State and Statistics */ | |
95 /************************************************************************/ | |
96 | |
97 enum gc_phase | |
98 { | |
99 NONE, | |
100 INIT_GC, | |
101 PUSH_ROOT_SET, | |
102 MARK, | |
103 REPUSH_ROOT_SET, | |
104 FINISH_MARK, | |
105 FINALIZE, | |
106 SWEEP, | |
107 FINISH_GC | |
108 }; | |
109 | |
110 #ifndef ERROR_CHECK_GC | |
4124 | 111 typedef struct gc_state_type |
3092 | 112 { |
113 enum gc_phase phase; | |
4124 | 114 } gc_state_type; |
3092 | 115 #else /* ERROR_CHECK_GC */ |
116 enum gc_stat_id | |
117 { | |
118 GC_STAT_TOTAL, | |
119 GC_STAT_IN_LAST_GC, | |
120 GC_STAT_IN_THIS_GC, | |
121 GC_STAT_IN_LAST_CYCLE, | |
122 GC_STAT_IN_THIS_CYCLE, | |
123 GC_STAT_COUNT /* has to be last */ | |
124 }; | |
125 | |
4124 | 126 typedef struct gc_state_type |
3092 | 127 { |
128 enum gc_phase phase; | |
3313 | 129 double n_gc[GC_STAT_COUNT]; |
130 double n_cycles[GC_STAT_COUNT]; | |
131 double enqueued[GC_STAT_COUNT]; | |
132 double dequeued[GC_STAT_COUNT]; | |
133 double repushed[GC_STAT_COUNT]; | |
134 double enqueued2[GC_STAT_COUNT]; | |
135 double dequeued2[GC_STAT_COUNT]; | |
136 double finalized[GC_STAT_COUNT]; | |
137 double freed[GC_STAT_COUNT]; | |
4124 | 138 } gc_state_type; |
3092 | 139 #endif /* ERROR_CHECK_GC */ |
140 | |
4124 | 141 gc_state_type gc_state; |
142 | |
3092 | 143 #define GC_PHASE gc_state.phase |
144 #define GC_SET_PHASE(p) GC_PHASE = p | |
145 | |
146 #ifdef ERROR_CHECK_GC | |
147 # define GC_STAT_START_NEW_GC gc_stat_start_new_gc () | |
148 # define GC_STAT_RESUME_GC gc_stat_resume_gc () | |
149 | |
150 #define GC_STAT_TICK(STAT) \ | |
151 gc_state.STAT[GC_STAT_TOTAL]++; \ | |
152 gc_state.STAT[GC_STAT_IN_THIS_GC]++; \ | |
153 gc_state.STAT[GC_STAT_IN_THIS_CYCLE]++ | |
154 | |
155 # define GC_STAT_ENQUEUED \ | |
156 if (GC_PHASE == REPUSH_ROOT_SET) \ | |
157 { \ | |
158 GC_STAT_TICK (enqueued2); \ | |
159 } \ | |
160 else \ | |
161 { \ | |
162 GC_STAT_TICK (enqueued); \ | |
163 } | |
164 | |
165 # define GC_STAT_DEQUEUED \ | |
166 if (gc_state.phase == REPUSH_ROOT_SET) \ | |
167 { \ | |
168 GC_STAT_TICK (dequeued2); \ | |
169 } \ | |
170 else \ | |
171 { \ | |
172 GC_STAT_TICK (dequeued); \ | |
173 } | |
174 # define GC_STAT_REPUSHED GC_STAT_TICK (repushed) | |
175 | |
176 #define GC_STAT_RESUME(stat) \ | |
177 gc_state.stat[GC_STAT_IN_LAST_CYCLE] = \ | |
178 gc_state.stat[GC_STAT_IN_THIS_CYCLE]; \ | |
179 gc_state.stat[GC_STAT_IN_THIS_CYCLE] = 0 | |
180 | |
181 #define GC_STAT_RESTART(stat) \ | |
182 gc_state.stat[GC_STAT_IN_LAST_GC] = \ | |
183 gc_state.stat[GC_STAT_IN_THIS_GC]; \ | |
184 gc_state.stat[GC_STAT_IN_THIS_GC] = 0; \ | |
185 GC_STAT_RESUME (stat) | |
186 | |
5046 | 187 static void |
3092 | 188 gc_stat_start_new_gc (void) |
189 { | |
190 gc_state.n_gc[GC_STAT_TOTAL]++; | |
191 gc_state.n_cycles[GC_STAT_TOTAL]++; | |
192 gc_state.n_cycles[GC_STAT_IN_LAST_GC] = gc_state.n_cycles[GC_STAT_IN_THIS_GC]; | |
193 gc_state.n_cycles[GC_STAT_IN_THIS_GC] = 1; | |
194 | |
195 GC_STAT_RESTART (enqueued); | |
196 GC_STAT_RESTART (dequeued); | |
197 GC_STAT_RESTART (repushed); | |
198 GC_STAT_RESTART (finalized); | |
199 GC_STAT_RESTART (enqueued2); | |
200 GC_STAT_RESTART (dequeued2); | |
201 GC_STAT_RESTART (freed); | |
202 } | |
203 | |
5046 | 204 static void |
3092 | 205 gc_stat_resume_gc (void) |
206 { | |
207 gc_state.n_cycles[GC_STAT_TOTAL]++; | |
208 gc_state.n_cycles[GC_STAT_IN_THIS_GC]++; | |
209 GC_STAT_RESUME (enqueued); | |
210 GC_STAT_RESUME (dequeued); | |
211 GC_STAT_RESUME (repushed); | |
212 GC_STAT_RESUME (finalized); | |
213 GC_STAT_RESUME (enqueued2); | |
214 GC_STAT_RESUME (dequeued2); | |
215 GC_STAT_RESUME (freed); | |
216 } | |
217 | |
218 void | |
219 gc_stat_finalized (void) | |
220 { | |
221 GC_STAT_TICK (finalized); | |
222 } | |
223 | |
224 void | |
225 gc_stat_freed (void) | |
226 { | |
227 GC_STAT_TICK (freed); | |
228 } | |
229 | |
230 DEFUN("gc-stats", Fgc_stats, 0, 0 ,"", /* | |
231 Return statistics about garbage collection cycles in a property list. | |
232 */ | |
233 ()) | |
234 { | |
235 Lisp_Object pl = Qnil; | |
236 #define PL(name,value) \ | |
3313 | 237 pl = cons3 (intern (name), make_float (gc_state.value), pl) |
3092 | 238 |
239 PL ("freed-in-this-cycle", freed[GC_STAT_IN_THIS_CYCLE]); | |
240 PL ("freed-in-this-gc", freed[GC_STAT_IN_THIS_GC]); | |
241 PL ("freed-in-last-cycle", freed[GC_STAT_IN_LAST_CYCLE]); | |
242 PL ("freed-in-last-gc", freed[GC_STAT_IN_LAST_GC]); | |
243 PL ("freed-total", freed[GC_STAT_TOTAL]); | |
244 PL ("finalized-in-this-cycle", finalized[GC_STAT_IN_THIS_CYCLE]); | |
245 PL ("finalized-in-this-gc", finalized[GC_STAT_IN_THIS_GC]); | |
246 PL ("finalized-in-last-cycle", finalized[GC_STAT_IN_LAST_CYCLE]); | |
247 PL ("finalized-in-last-gc", finalized[GC_STAT_IN_LAST_GC]); | |
248 PL ("finalized-total", finalized[GC_STAT_TOTAL]); | |
249 PL ("repushed-in-this-cycle", repushed[GC_STAT_IN_THIS_CYCLE]); | |
250 PL ("repushed-in-this-gc", repushed[GC_STAT_IN_THIS_GC]); | |
251 PL ("repushed-in-last-cycle", repushed[GC_STAT_IN_LAST_CYCLE]); | |
252 PL ("repushed-in-last-gc", repushed[GC_STAT_IN_LAST_GC]); | |
253 PL ("repushed-total", repushed[GC_STAT_TOTAL]); | |
254 PL ("dequeued2-in-this-cycle", dequeued2[GC_STAT_IN_THIS_CYCLE]); | |
255 PL ("dequeued2-in-this-gc", dequeued2[GC_STAT_IN_THIS_GC]); | |
256 PL ("dequeued2-in-last-cycle", dequeued2[GC_STAT_IN_LAST_CYCLE]); | |
257 PL ("dequeued2-in-last-gc", dequeued2[GC_STAT_IN_LAST_GC]); | |
258 PL ("dequeued2-total", dequeued2[GC_STAT_TOTAL]); | |
259 PL ("enqueued2-in-this-cycle", enqueued2[GC_STAT_IN_THIS_CYCLE]); | |
260 PL ("enqueued2-in-this-gc", enqueued2[GC_STAT_IN_THIS_GC]); | |
261 PL ("enqueued2-in-last-cycle", enqueued2[GC_STAT_IN_LAST_CYCLE]); | |
262 PL ("enqueued2-in-last-gc", enqueued2[GC_STAT_IN_LAST_GC]); | |
263 PL ("enqueued2-total", enqueued2[GC_STAT_TOTAL]); | |
264 PL ("dequeued-in-this-cycle", dequeued[GC_STAT_IN_THIS_CYCLE]); | |
265 PL ("dequeued-in-this-gc", dequeued[GC_STAT_IN_THIS_GC]); | |
266 PL ("dequeued-in-last-cycle", dequeued[GC_STAT_IN_LAST_CYCLE]); | |
267 PL ("dequeued-in-last-gc", dequeued[GC_STAT_IN_LAST_GC]); | |
268 PL ("dequeued-total", dequeued[GC_STAT_TOTAL]); | |
269 PL ("enqueued-in-this-cycle", enqueued[GC_STAT_IN_THIS_CYCLE]); | |
270 PL ("enqueued-in-this-gc", enqueued[GC_STAT_IN_THIS_GC]); | |
271 PL ("enqueued-in-last-cycle", enqueued[GC_STAT_IN_LAST_CYCLE]); | |
272 PL ("enqueued-in-last-gc", enqueued[GC_STAT_IN_LAST_GC]); | |
273 PL ("enqueued-total", enqueued[GC_STAT_TOTAL]); | |
274 PL ("n-cycles-in-this-gc", n_cycles[GC_STAT_IN_THIS_GC]); | |
275 PL ("n-cycles-in-last-gc", n_cycles[GC_STAT_IN_LAST_GC]); | |
276 PL ("n-cycles-total", n_cycles[GC_STAT_TOTAL]); | |
277 PL ("n-gc-total", n_gc[GC_STAT_TOTAL]); | |
278 PL ("phase", phase); | |
279 return pl; | |
280 } | |
281 #else /* not ERROR_CHECK_GC */ | |
282 # define GC_STAT_START_NEW_GC | |
283 # define GC_STAT_RESUME_GC | |
284 # define GC_STAT_ENQUEUED | |
285 # define GC_STAT_DEQUEUED | |
286 # define GC_STAT_REPUSHED | |
287 # define GC_STAT_REMOVED | |
288 #endif /* not ERROR_CHECK_GC */ | |
289 #endif /* NEW_GC */ | |
290 | |
291 | |
292 /************************************************************************/ | |
293 /* Recompute need to garbage collect */ | |
294 /************************************************************************/ | |
295 | |
296 int need_to_garbage_collect; | |
297 | |
298 #ifdef ERROR_CHECK_GC | |
299 int always_gc = 0; /* Debugging hack; equivalent to | |
300 (setq gc-cons-thresold -1) */ | |
301 #else | |
302 #define always_gc 0 | |
303 #endif | |
304 | |
305 /* True if it's time to garbage collect now. */ | |
306 void | |
307 recompute_need_to_garbage_collect (void) | |
308 { | |
309 if (always_gc) | |
310 need_to_garbage_collect = 1; | |
311 else | |
312 need_to_garbage_collect = | |
313 #ifdef NEW_GC | |
314 write_barrier_enabled ? | |
315 (consing_since_gc > gc_cons_incremental_threshold) : | |
316 #endif /* NEW_GC */ | |
317 (consing_since_gc > gc_cons_threshold | |
318 && | |
319 #if 0 /* #### implement this better */ | |
4115 | 320 ((double)consing_since_gc) / total_data_usage()) >= |
321 ((double)gc_cons_percentage / 100) | |
3092 | 322 #else |
323 (!total_gc_usage_set || | |
4115 | 324 ((double)consing_since_gc / total_gc_usage) >= |
325 ((double)gc_cons_percentage / 100)) | |
3092 | 326 #endif |
327 ); | |
328 recompute_funcall_allocation_flag (); | |
329 } | |
330 | |
331 | |
332 | |
333 /************************************************************************/ | |
334 /* Mark Phase */ | |
335 /************************************************************************/ | |
336 | |
337 static const struct memory_description lisp_object_description_1[] = { | |
338 { XD_LISP_OBJECT, 0 }, | |
339 { XD_END } | |
340 }; | |
341 | |
342 const struct sized_memory_description lisp_object_description = { | |
343 sizeof (Lisp_Object), | |
344 lisp_object_description_1 | |
345 }; | |
346 | |
347 #if defined (USE_KKCC) || defined (PDUMP) | |
348 | |
349 /* This function extracts the value of a count variable described somewhere | |
350 else in the description. It is converted corresponding to the type */ | |
351 EMACS_INT | |
352 lispdesc_indirect_count_1 (EMACS_INT code, | |
353 const struct memory_description *idesc, | |
354 const void *idata) | |
355 { | |
356 EMACS_INT count; | |
357 const void *irdata; | |
358 | |
359 int line = XD_INDIRECT_VAL (code); | |
360 int delta = XD_INDIRECT_DELTA (code); | |
361 | |
362 irdata = ((char *) idata) + | |
363 lispdesc_indirect_count (idesc[line].offset, idesc, idata); | |
364 switch (idesc[line].type) | |
365 { | |
366 case XD_BYTECOUNT: | |
367 count = * (Bytecount *) irdata; | |
368 break; | |
369 case XD_ELEMCOUNT: | |
370 count = * (Elemcount *) irdata; | |
371 break; | |
372 case XD_HASHCODE: | |
373 count = * (Hashcode *) irdata; | |
374 break; | |
375 case XD_INT: | |
376 count = * (int *) irdata; | |
377 break; | |
378 case XD_LONG: | |
379 count = * (long *) irdata; | |
380 break; | |
381 default: | |
382 stderr_out ("Unsupported count type : %d (line = %d, code = %ld)\n", | |
383 idesc[line].type, line, (long) code); | |
384 #if defined(USE_KKCC) && defined(DEBUG_XEMACS) | |
385 if (gc_in_progress) | |
386 kkcc_backtrace (); | |
387 #endif | |
388 #ifdef PDUMP | |
389 if (in_pdump) | |
390 pdump_backtrace (); | |
391 #endif | |
392 count = 0; /* warning suppression */ | |
393 ABORT (); | |
394 } | |
395 count += delta; | |
396 return count; | |
397 } | |
398 | |
399 /* SDESC is a "description map" (basically, a list of offsets used for | |
400 successive indirections) and OBJ is the first object to indirect off of. | |
401 Return the description ultimately found. */ | |
402 | |
403 const struct sized_memory_description * | |
404 lispdesc_indirect_description_1 (const void *obj, | |
405 const struct sized_memory_description *sdesc) | |
406 { | |
407 int pos; | |
408 | |
409 for (pos = 0; sdesc[pos].size >= 0; pos++) | |
410 obj = * (const void **) ((const char *) obj + sdesc[pos].size); | |
411 | |
412 return (const struct sized_memory_description *) obj; | |
413 } | |
414 | |
415 /* Compute the size of the data at RDATA, described by a single entry | |
416 DESC1 in a description array. OBJ and DESC are used for | |
417 XD_INDIRECT references. */ | |
418 | |
419 static Bytecount | |
420 lispdesc_one_description_line_size (void *rdata, | |
421 const struct memory_description *desc1, | |
422 const void *obj, | |
423 const struct memory_description *desc) | |
424 { | |
425 union_switcheroo: | |
426 switch (desc1->type) | |
427 { | |
428 case XD_LISP_OBJECT_ARRAY: | |
429 { | |
430 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
431 return (val * sizeof (Lisp_Object)); | |
432 } | |
433 case XD_LISP_OBJECT: | |
434 case XD_LO_LINK: | |
435 return sizeof (Lisp_Object); | |
436 case XD_OPAQUE_PTR: | |
437 return sizeof (void *); | |
438 #ifdef NEW_GC | |
439 case XD_LISP_OBJECT_BLOCK_PTR: | |
440 #endif /* NEW_GC */ | |
441 case XD_BLOCK_PTR: | |
442 { | |
443 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
444 return val * sizeof (void *); | |
445 } | |
446 case XD_BLOCK_ARRAY: | |
447 { | |
448 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj); | |
449 | |
450 return (val * | |
451 lispdesc_block_size | |
452 (rdata, | |
453 lispdesc_indirect_description (obj, desc1->data2.descr))); | |
454 } | |
455 case XD_OPAQUE_DATA_PTR: | |
456 return sizeof (void *); | |
457 case XD_UNION_DYNAMIC_SIZE: | |
458 { | |
459 /* If an explicit size was given in the first-level structure | |
460 description, use it; else compute size based on current union | |
461 constant. */ | |
462 const struct sized_memory_description *sdesc = | |
463 lispdesc_indirect_description (obj, desc1->data2.descr); | |
464 if (sdesc->size) | |
465 return sdesc->size; | |
466 else | |
467 { | |
468 desc1 = lispdesc_process_xd_union (desc1, desc, obj); | |
469 if (desc1) | |
470 goto union_switcheroo; | |
471 break; | |
472 } | |
473 } | |
474 case XD_UNION: | |
475 { | |
476 /* If an explicit size was given in the first-level structure | |
477 description, use it; else compute size based on maximum of all | |
478 possible structures. */ | |
479 const struct sized_memory_description *sdesc = | |
480 lispdesc_indirect_description (obj, desc1->data2.descr); | |
481 if (sdesc->size) | |
482 return sdesc->size; | |
483 else | |
484 { | |
485 int count; | |
486 Bytecount max_size = -1, size; | |
487 | |
488 desc1 = sdesc->description; | |
489 | |
490 for (count = 0; desc1[count].type != XD_END; count++) | |
491 { | |
492 size = lispdesc_one_description_line_size (rdata, | |
493 &desc1[count], | |
494 obj, desc); | |
495 if (size > max_size) | |
496 max_size = size; | |
497 } | |
498 return max_size; | |
499 } | |
500 } | |
501 case XD_ASCII_STRING: | |
502 return sizeof (void *); | |
503 case XD_DOC_STRING: | |
504 return sizeof (void *); | |
505 case XD_INT_RESET: | |
506 return sizeof (int); | |
507 case XD_BYTECOUNT: | |
508 return sizeof (Bytecount); | |
509 case XD_ELEMCOUNT: | |
510 return sizeof (Elemcount); | |
511 case XD_HASHCODE: | |
512 return sizeof (Hashcode); | |
513 case XD_INT: | |
514 return sizeof (int); | |
515 case XD_LONG: | |
516 return sizeof (long); | |
517 default: | |
518 stderr_out ("Unsupported dump type : %d\n", desc1->type); | |
519 ABORT (); | |
520 } | |
521 | |
522 return 0; | |
523 } | |
524 | |
525 | |
526 /* Return the size of the memory block (NOT necessarily a structure!) | |
527 described by SDESC and pointed to by OBJ. If SDESC records an | |
528 explicit size (i.e. non-zero), it is simply returned; otherwise, | |
529 the size is calculated by the maximum offset and the size of the | |
530 object at that offset, rounded up to the maximum alignment. In | |
531 this case, we may need the object, for example when retrieving an | |
532 "indirect count" of an inlined array (the count is not constant, | |
533 but is specified by one of the elements of the memory block). (It | |
534 is generally not a problem if we return an overly large size -- we | |
535 will simply end up reserving more space than necessary; but if the | |
536 size is too small we could be in serious trouble, in particular | |
537 with nested inlined structures, where there may be alignment | |
538 padding in the middle of a block. #### In fact there is an (at | |
539 least theoretical) problem with an overly large size -- we may | |
540 trigger a protection fault when reading from invalid memory. We | |
541 need to handle this -- perhaps in a stupid but dependable way, | |
542 i.e. by trapping SIGSEGV and SIGBUS.) */ | |
543 | |
544 Bytecount | |
545 lispdesc_block_size_1 (const void *obj, Bytecount size, | |
546 const struct memory_description *desc) | |
547 { | |
548 EMACS_INT max_offset = -1; | |
549 int max_offset_pos = -1; | |
550 int pos; | |
551 | |
552 if (size) | |
553 return size; | |
554 | |
555 for (pos = 0; desc[pos].type != XD_END; pos++) | |
556 { | |
557 EMACS_INT offset = lispdesc_indirect_count (desc[pos].offset, desc, obj); | |
558 if (offset == max_offset) | |
559 { | |
5168
cf900a2f1fa3
extract gap array from extents.c, use in range tables
Ben Wing <ben@xemacs.org>
parents:
5158
diff
changeset
|
560 #if 0 |
cf900a2f1fa3
extract gap array from extents.c, use in range tables
Ben Wing <ben@xemacs.org>
parents:
5158
diff
changeset
|
561 /* This can legitimately happen with gap arrays -- if there are |
cf900a2f1fa3
extract gap array from extents.c, use in range tables
Ben Wing <ben@xemacs.org>
parents:
5158
diff
changeset
|
562 no elements in the array, and the gap size is 0, then both |
cf900a2f1fa3
extract gap array from extents.c, use in range tables
Ben Wing <ben@xemacs.org>
parents:
5158
diff
changeset
|
563 parts of the array will be of size 0 and in the same place. */ |
3092 | 564 stderr_out ("Two relocatable elements at same offset?\n"); |
565 ABORT (); | |
5168
cf900a2f1fa3
extract gap array from extents.c, use in range tables
Ben Wing <ben@xemacs.org>
parents:
5158
diff
changeset
|
566 #endif |
3092 | 567 } |
568 else if (offset > max_offset) | |
569 { | |
570 max_offset = offset; | |
571 max_offset_pos = pos; | |
572 } | |
573 } | |
574 | |
575 if (max_offset_pos < 0) | |
576 return 0; | |
577 | |
578 { | |
579 Bytecount size_at_max; | |
580 size_at_max = | |
581 lispdesc_one_description_line_size ((char *) obj + max_offset, | |
582 &desc[max_offset_pos], obj, desc); | |
583 | |
584 /* We have no way of knowing the required alignment for this structure, | |
585 so just make it maximally aligned. */ | |
586 return MAX_ALIGN_SIZE (max_offset + size_at_max); | |
587 } | |
588 } | |
589 #endif /* defined (USE_KKCC) || defined (PDUMP) */ | |
590 | |
3263 | 591 #ifdef NEW_GC |
3092 | 592 #define GC_CHECK_NOT_FREE(lheader) \ |
593 gc_checking_assert (! LRECORD_FREE_P (lheader)); | |
3263 | 594 #else /* not NEW_GC */ |
3092 | 595 #define GC_CHECK_NOT_FREE(lheader) \ |
596 gc_checking_assert (! LRECORD_FREE_P (lheader)); \ | |
5142
f965e31a35f0
reduce lcrecord headers to 2 words, rename printing_unreadable_object
Ben Wing <ben@xemacs.org>
parents:
5127
diff
changeset
|
597 gc_checking_assert (LHEADER_IMPLEMENTATION (lheader)->frob_block_p || \ |
f965e31a35f0
reduce lcrecord headers to 2 words, rename printing_unreadable_object
Ben Wing <ben@xemacs.org>
parents:
5127
diff
changeset
|
598 ! (lheader)->free) |
3263 | 599 #endif /* not NEW_GC */ |
3092 | 600 |
601 #ifdef USE_KKCC | |
602 /* The following functions implement the new mark algorithm. | |
603 They mark objects according to their descriptions. They | |
604 are modeled on the corresponding pdumper procedures. */ | |
605 | |
606 #if 0 | |
607 # define KKCC_STACK_AS_QUEUE 1 | |
608 #endif | |
609 | |
610 #ifdef DEBUG_XEMACS | |
611 /* The backtrace for the KKCC mark functions. */ | |
612 #define KKCC_INIT_BT_STACK_SIZE 4096 | |
613 | |
614 typedef struct | |
615 { | |
616 void *obj; | |
617 const struct memory_description *desc; | |
618 int pos; | |
619 } kkcc_bt_stack_entry; | |
620 | |
621 static kkcc_bt_stack_entry *kkcc_bt; | |
622 static int kkcc_bt_stack_size; | |
623 static int kkcc_bt_depth = 0; | |
624 | |
625 static void | |
626 kkcc_bt_init (void) | |
627 { | |
628 kkcc_bt_depth = 0; | |
629 kkcc_bt_stack_size = KKCC_INIT_BT_STACK_SIZE; | |
630 kkcc_bt = (kkcc_bt_stack_entry *) | |
631 xmalloc_and_zero (kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry)); | |
632 if (!kkcc_bt) | |
633 { | |
634 stderr_out ("KKCC backtrace stack init failed for size %d\n", | |
635 kkcc_bt_stack_size); | |
636 ABORT (); | |
637 } | |
638 } | |
639 | |
640 void | |
641 kkcc_backtrace (void) | |
642 { | |
643 int i; | |
644 stderr_out ("KKCC mark stack backtrace :\n"); | |
645 for (i = kkcc_bt_depth - 1; i >= 0; i--) | |
646 { | |
647 Lisp_Object obj = wrap_pointer_1 (kkcc_bt[i].obj); | |
648 stderr_out (" [%d]", i); | |
649 if ((XRECORD_LHEADER (obj)->type >= lrecord_type_last_built_in_type) | |
650 || (!LRECORDP (obj)) | |
651 || (!XRECORD_LHEADER_IMPLEMENTATION (obj))) | |
652 { | |
653 stderr_out (" non Lisp Object"); | |
654 } | |
655 else | |
656 { | |
657 stderr_out (" %s", | |
658 XRECORD_LHEADER_IMPLEMENTATION (obj)->name); | |
659 } | |
3519 | 660 stderr_out (" (addr: %p, desc: %p, ", |
661 (void *) kkcc_bt[i].obj, | |
662 (void *) kkcc_bt[i].desc); | |
3092 | 663 if (kkcc_bt[i].pos >= 0) |
664 stderr_out ("pos: %d)\n", kkcc_bt[i].pos); | |
665 else | |
666 if (kkcc_bt[i].pos == -1) | |
667 stderr_out ("root set)\n"); | |
668 else if (kkcc_bt[i].pos == -2) | |
669 stderr_out ("dirty object)\n"); | |
670 } | |
671 } | |
672 | |
673 static void | |
674 kkcc_bt_stack_realloc (void) | |
675 { | |
676 kkcc_bt_stack_size *= 2; | |
677 kkcc_bt = (kkcc_bt_stack_entry *) | |
678 xrealloc (kkcc_bt, kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry)); | |
679 if (!kkcc_bt) | |
680 { | |
681 stderr_out ("KKCC backtrace stack realloc failed for size %d\n", | |
682 kkcc_bt_stack_size); | |
683 ABORT (); | |
684 } | |
685 } | |
686 | |
687 static void | |
688 kkcc_bt_free (void) | |
689 { | |
690 xfree_1 (kkcc_bt); | |
691 kkcc_bt = 0; | |
692 kkcc_bt_stack_size = 0; | |
693 } | |
694 | |
695 static void | |
696 kkcc_bt_push (void *obj, const struct memory_description *desc, | |
697 int level, int pos) | |
698 { | |
699 kkcc_bt_depth = level; | |
700 kkcc_bt[kkcc_bt_depth].obj = obj; | |
701 kkcc_bt[kkcc_bt_depth].desc = desc; | |
702 kkcc_bt[kkcc_bt_depth].pos = pos; | |
703 kkcc_bt_depth++; | |
704 if (kkcc_bt_depth >= kkcc_bt_stack_size) | |
705 kkcc_bt_stack_realloc (); | |
706 } | |
707 | |
708 #else /* not DEBUG_XEMACS */ | |
709 #define kkcc_bt_init() | |
710 #define kkcc_bt_push(obj, desc, level, pos) | |
711 #endif /* not DEBUG_XEMACS */ | |
712 | |
713 /* Object memory descriptions are in the lrecord_implementation structure. | |
714 But copying them to a parallel array is much more cache-friendly. */ | |
715 const struct memory_description *lrecord_memory_descriptions[countof (lrecord_implementations_table)]; | |
716 | |
717 /* the initial stack size in kkcc_gc_stack_entries */ | |
718 #define KKCC_INIT_GC_STACK_SIZE 16384 | |
719 | |
720 typedef struct | |
721 { | |
722 void *data; | |
723 const struct memory_description *desc; | |
724 #ifdef DEBUG_XEMACS | |
725 int level; | |
726 int pos; | |
727 #endif | |
728 } kkcc_gc_stack_entry; | |
729 | |
730 | |
731 static kkcc_gc_stack_entry *kkcc_gc_stack_ptr; | |
732 static int kkcc_gc_stack_front; | |
733 static int kkcc_gc_stack_rear; | |
734 static int kkcc_gc_stack_size; | |
735 | |
736 #define KKCC_INC(i) ((i + 1) % kkcc_gc_stack_size) | |
737 #define KKCC_INC2(i) ((i + 2) % kkcc_gc_stack_size) | |
738 | |
739 #define KKCC_GC_STACK_FULL (KKCC_INC2 (kkcc_gc_stack_rear) == kkcc_gc_stack_front) | |
740 #define KKCC_GC_STACK_EMPTY (KKCC_INC (kkcc_gc_stack_rear) == kkcc_gc_stack_front) | |
741 | |
742 static void | |
743 kkcc_gc_stack_init (void) | |
744 { | |
745 kkcc_gc_stack_size = KKCC_INIT_GC_STACK_SIZE; | |
746 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *) | |
747 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry)); | |
748 if (!kkcc_gc_stack_ptr) | |
749 { | |
750 stderr_out ("stack init failed for size %d\n", kkcc_gc_stack_size); | |
751 ABORT (); | |
752 } | |
753 kkcc_gc_stack_front = 0; | |
754 kkcc_gc_stack_rear = kkcc_gc_stack_size - 1; | |
755 } | |
756 | |
757 static void | |
758 kkcc_gc_stack_free (void) | |
759 { | |
760 xfree_1 (kkcc_gc_stack_ptr); | |
761 kkcc_gc_stack_ptr = 0; | |
762 kkcc_gc_stack_front = 0; | |
763 kkcc_gc_stack_rear = 0; | |
764 kkcc_gc_stack_size = 0; | |
765 } | |
766 | |
767 static void | |
768 kkcc_gc_stack_realloc (void) | |
769 { | |
770 kkcc_gc_stack_entry *old_ptr = kkcc_gc_stack_ptr; | |
771 int old_size = kkcc_gc_stack_size; | |
772 kkcc_gc_stack_size *= 2; | |
773 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *) | |
774 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry)); | |
775 if (!kkcc_gc_stack_ptr) | |
776 { | |
777 stderr_out ("stack realloc failed for size %d\n", kkcc_gc_stack_size); | |
778 ABORT (); | |
779 } | |
780 if (kkcc_gc_stack_rear >= kkcc_gc_stack_front) | |
781 { | |
782 int number_elements = kkcc_gc_stack_rear - kkcc_gc_stack_front + 1; | |
783 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front], | |
784 number_elements * sizeof (kkcc_gc_stack_entry)); | |
785 kkcc_gc_stack_front = 0; | |
786 kkcc_gc_stack_rear = number_elements - 1; | |
787 } | |
788 else | |
789 { | |
790 int number_elements = old_size - kkcc_gc_stack_front; | |
791 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front], | |
792 number_elements * sizeof (kkcc_gc_stack_entry)); | |
793 memcpy (&kkcc_gc_stack_ptr[number_elements], &old_ptr[0], | |
794 (kkcc_gc_stack_rear + 1) * sizeof (kkcc_gc_stack_entry)); | |
795 kkcc_gc_stack_front = 0; | |
796 kkcc_gc_stack_rear = kkcc_gc_stack_rear + number_elements; | |
797 } | |
798 xfree_1 (old_ptr); | |
799 } | |
800 | |
801 static void | |
802 #ifdef DEBUG_XEMACS | |
803 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc, | |
804 int level, int pos) | |
805 #else | |
806 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc) | |
807 #endif | |
808 { | |
809 #ifdef NEW_GC | |
810 GC_STAT_ENQUEUED; | |
811 #endif /* NEW_GC */ | |
812 if (KKCC_GC_STACK_FULL) | |
813 kkcc_gc_stack_realloc(); | |
814 kkcc_gc_stack_rear = KKCC_INC (kkcc_gc_stack_rear); | |
815 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].data = data; | |
816 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].desc = desc; | |
817 #ifdef DEBUG_XEMACS | |
818 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].level = level; | |
819 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].pos = pos; | |
820 #endif | |
821 } | |
822 | |
823 #ifdef DEBUG_XEMACS | |
824 #define kkcc_gc_stack_push(data, desc, level, pos) \ | |
825 kkcc_gc_stack_push_1 (data, desc, level, pos) | |
826 #else | |
827 #define kkcc_gc_stack_push(data, desc, level, pos) \ | |
828 kkcc_gc_stack_push_1 (data, desc) | |
829 #endif | |
830 | |
831 static kkcc_gc_stack_entry * | |
832 kkcc_gc_stack_pop (void) | |
833 { | |
834 if (KKCC_GC_STACK_EMPTY) | |
835 return 0; | |
836 #ifdef NEW_GC | |
837 GC_STAT_DEQUEUED; | |
838 #endif /* NEW_GC */ | |
839 #ifndef KKCC_STACK_AS_QUEUE | |
840 /* stack behaviour */ | |
841 return &kkcc_gc_stack_ptr[kkcc_gc_stack_rear--]; | |
842 #else | |
843 /* queue behaviour */ | |
844 { | |
845 int old_front = kkcc_gc_stack_front; | |
846 kkcc_gc_stack_front = KKCC_INC (kkcc_gc_stack_front); | |
847 return &kkcc_gc_stack_ptr[old_front]; | |
848 } | |
849 #endif | |
850 } | |
851 | |
852 void | |
853 #ifdef DEBUG_XEMACS | |
854 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj, int level, int pos) | |
855 #else | |
856 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj) | |
857 #endif | |
858 { | |
859 if (XTYPE (obj) == Lisp_Type_Record) | |
860 { | |
861 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
862 const struct memory_description *desc; | |
863 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
864 desc = RECORD_DESCRIPTION (lheader); | |
865 if (! MARKED_RECORD_HEADER_P (lheader)) | |
866 { | |
867 #ifdef NEW_GC | |
868 MARK_GREY (lheader); | |
869 #else /* not NEW_GC */ | |
870 MARK_RECORD_HEADER (lheader); | |
871 #endif /* not NEW_GC */ | |
872 kkcc_gc_stack_push ((void *) lheader, desc, level, pos); | |
873 } | |
874 } | |
875 } | |
876 | |
877 #ifdef NEW_GC | |
878 #ifdef DEBUG_XEMACS | |
879 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \ | |
880 kkcc_gc_stack_push_lisp_object_1 (obj, level, pos) | |
881 #else | |
882 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \ | |
883 kkcc_gc_stack_push_lisp_object_1 (obj) | |
884 #endif | |
885 | |
886 void | |
887 #ifdef DEBUG_XEMACS | |
888 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj, int level, int pos) | |
889 #else | |
890 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj) | |
891 #endif | |
892 { | |
893 if (XTYPE (obj) == Lisp_Type_Record) | |
894 { | |
895 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
896 const struct memory_description *desc; | |
897 GC_STAT_REPUSHED; | |
898 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
899 desc = RECORD_DESCRIPTION (lheader); | |
900 MARK_GREY (lheader); | |
901 kkcc_gc_stack_push ((void*) lheader, desc, level, pos); | |
902 } | |
903 } | |
904 #endif /* NEW_GC */ | |
905 | |
906 #ifdef ERROR_CHECK_GC | |
907 #define KKCC_DO_CHECK_FREE(obj, allow_free) \ | |
908 do \ | |
909 { \ | |
910 if (!allow_free && XTYPE (obj) == Lisp_Type_Record) \ | |
911 { \ | |
912 struct lrecord_header *lheader = XRECORD_LHEADER (obj); \ | |
913 GC_CHECK_NOT_FREE (lheader); \ | |
914 } \ | |
915 } while (0) | |
916 #else | |
917 #define KKCC_DO_CHECK_FREE(obj, allow_free) | |
918 #endif | |
919 | |
920 #ifdef ERROR_CHECK_GC | |
921 #ifdef DEBUG_XEMACS | |
922 static void | |
923 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free, | |
924 int level, int pos) | |
925 #else | |
926 static void | |
927 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free) | |
928 #endif | |
929 { | |
930 KKCC_DO_CHECK_FREE (obj, allow_free); | |
931 kkcc_gc_stack_push_lisp_object (obj, level, pos); | |
932 } | |
933 | |
934 #ifdef DEBUG_XEMACS | |
935 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
936 mark_object_maybe_checking_free_1 (obj, allow_free, level, pos) | |
937 #else | |
938 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
939 mark_object_maybe_checking_free_1 (obj, allow_free) | |
940 #endif | |
941 #else /* not ERROR_CHECK_GC */ | |
942 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \ | |
943 kkcc_gc_stack_push_lisp_object (obj, level, pos) | |
944 #endif /* not ERROR_CHECK_GC */ | |
945 | |
946 | |
947 /* This function loops all elements of a struct pointer and calls | |
948 mark_with_description with each element. */ | |
949 static void | |
950 #ifdef DEBUG_XEMACS | |
951 mark_struct_contents_1 (const void *data, | |
952 const struct sized_memory_description *sdesc, | |
953 int count, int level, int pos) | |
954 #else | |
955 mark_struct_contents_1 (const void *data, | |
956 const struct sized_memory_description *sdesc, | |
957 int count) | |
958 #endif | |
959 { | |
960 int i; | |
961 Bytecount elsize; | |
962 elsize = lispdesc_block_size (data, sdesc); | |
963 | |
964 for (i = 0; i < count; i++) | |
965 { | |
966 kkcc_gc_stack_push (((char *) data) + elsize * i, sdesc->description, | |
967 level, pos); | |
968 } | |
969 } | |
970 | |
971 #ifdef DEBUG_XEMACS | |
972 #define mark_struct_contents(data, sdesc, count, level, pos) \ | |
973 mark_struct_contents_1 (data, sdesc, count, level, pos) | |
974 #else | |
975 #define mark_struct_contents(data, sdesc, count, level, pos) \ | |
976 mark_struct_contents_1 (data, sdesc, count) | |
977 #endif | |
978 | |
979 | |
980 #ifdef NEW_GC | |
981 /* This function loops all elements of a struct pointer and calls | |
982 mark_with_description with each element. */ | |
983 static void | |
984 #ifdef DEBUG_XEMACS | |
985 mark_lisp_object_block_contents_1 (const void *data, | |
986 const struct sized_memory_description *sdesc, | |
987 int count, int level, int pos) | |
988 #else | |
989 mark_lisp_object_block_contents_1 (const void *data, | |
990 const struct sized_memory_description *sdesc, | |
991 int count) | |
992 #endif | |
993 { | |
994 int i; | |
995 Bytecount elsize; | |
996 elsize = lispdesc_block_size (data, sdesc); | |
997 | |
998 for (i = 0; i < count; i++) | |
999 { | |
1000 const Lisp_Object obj = wrap_pointer_1 (((char *) data) + elsize * i); | |
1001 if (XTYPE (obj) == Lisp_Type_Record) | |
1002 { | |
1003 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
1004 const struct memory_description *desc; | |
1005 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1006 desc = sdesc->description; | |
1007 if (! MARKED_RECORD_HEADER_P (lheader)) | |
1008 { | |
1009 MARK_GREY (lheader); | |
1010 kkcc_gc_stack_push ((void *) lheader, desc, level, pos); | |
1011 } | |
1012 } | |
1013 } | |
1014 } | |
1015 | |
1016 #ifdef DEBUG_XEMACS | |
1017 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \ | |
1018 mark_lisp_object_block_contents_1 (data, sdesc, count, level, pos) | |
1019 #else | |
1020 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \ | |
1021 mark_lisp_object_block_contents_1 (data, sdesc, count) | |
1022 #endif | |
1023 #endif /* not NEW_GC */ | |
1024 | |
1025 /* This function implements the KKCC mark algorithm. | |
1026 Instead of calling mark_object, all the alive Lisp_Objects are pushed | |
1027 on the kkcc_gc_stack. This function processes all elements on the stack | |
1028 according to their descriptions. */ | |
1029 static void | |
5054 | 1030 kkcc_marking (int USED_IF_NEW_GC (cnt)) |
3092 | 1031 { |
1032 kkcc_gc_stack_entry *stack_entry = 0; | |
1033 void *data = 0; | |
1034 const struct memory_description *desc = 0; | |
1035 int pos; | |
1036 #ifdef NEW_GC | |
5046 | 1037 int obj_count = cnt; |
3092 | 1038 #endif /* NEW_GC */ |
1039 #ifdef DEBUG_XEMACS | |
1040 int level = 0; | |
1041 #endif | |
1042 | |
1043 while ((stack_entry = kkcc_gc_stack_pop ()) != 0) | |
1044 { | |
1045 data = stack_entry->data; | |
1046 desc = stack_entry->desc; | |
1047 #ifdef DEBUG_XEMACS | |
1048 level = stack_entry->level + 1; | |
1049 #endif | |
1050 kkcc_bt_push (data, desc, stack_entry->level, stack_entry->pos); | |
1051 | |
1052 #ifdef NEW_GC | |
1053 /* Mark black if object is currently grey. This first checks, | |
1054 if the object is really allocated on the mc-heap. If it is, | |
1055 it can be marked black; if it is not, it cannot be marked. */ | |
1056 maybe_mark_black (data); | |
1057 #endif /* NEW_GC */ | |
1058 | |
1059 if (!data) continue; | |
1060 | |
1061 gc_checking_assert (data); | |
1062 gc_checking_assert (desc); | |
1063 | |
1064 for (pos = 0; desc[pos].type != XD_END; pos++) | |
1065 { | |
1066 const struct memory_description *desc1 = &desc[pos]; | |
1067 const void *rdata = | |
1068 (const char *) data + lispdesc_indirect_count (desc1->offset, | |
1069 desc, data); | |
1070 union_switcheroo: | |
1071 | |
1072 /* If the flag says don't mark, then don't mark. */ | |
1073 if ((desc1->flags) & XD_FLAG_NO_KKCC) | |
1074 continue; | |
1075 | |
1076 switch (desc1->type) | |
1077 { | |
1078 case XD_BYTECOUNT: | |
1079 case XD_ELEMCOUNT: | |
1080 case XD_HASHCODE: | |
1081 case XD_INT: | |
1082 case XD_LONG: | |
1083 case XD_INT_RESET: | |
1084 case XD_LO_LINK: | |
1085 case XD_OPAQUE_PTR: | |
1086 case XD_OPAQUE_DATA_PTR: | |
1087 case XD_ASCII_STRING: | |
1088 case XD_DOC_STRING: | |
1089 break; | |
1090 case XD_LISP_OBJECT: | |
1091 { | |
1092 const Lisp_Object *stored_obj = (const Lisp_Object *) rdata; | |
1093 | |
1094 /* Because of the way that tagged objects work (pointers and | |
1095 Lisp_Objects have the same representation), XD_LISP_OBJECT | |
1096 can be used for untagged pointers. They might be NULL, | |
1097 though. */ | |
1098 if (EQ (*stored_obj, Qnull_pointer)) | |
1099 break; | |
3263 | 1100 #ifdef NEW_GC |
3092 | 1101 mark_object_maybe_checking_free (*stored_obj, 0, level, pos); |
3263 | 1102 #else /* not NEW_GC */ |
3092 | 1103 mark_object_maybe_checking_free |
1104 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT, | |
1105 level, pos); | |
3263 | 1106 #endif /* not NEW_GC */ |
3092 | 1107 break; |
1108 } | |
1109 case XD_LISP_OBJECT_ARRAY: | |
1110 { | |
1111 int i; | |
1112 EMACS_INT count = | |
1113 lispdesc_indirect_count (desc1->data1, desc, data); | |
1114 | |
1115 for (i = 0; i < count; i++) | |
1116 { | |
1117 const Lisp_Object *stored_obj = | |
1118 (const Lisp_Object *) rdata + i; | |
1119 | |
1120 if (EQ (*stored_obj, Qnull_pointer)) | |
1121 break; | |
3263 | 1122 #ifdef NEW_GC |
3092 | 1123 mark_object_maybe_checking_free |
1124 (*stored_obj, 0, level, pos); | |
3263 | 1125 #else /* not NEW_GC */ |
3092 | 1126 mark_object_maybe_checking_free |
1127 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT, | |
1128 level, pos); | |
3263 | 1129 #endif /* not NEW_GC */ |
3092 | 1130 } |
1131 break; | |
1132 } | |
1133 #ifdef NEW_GC | |
1134 case XD_LISP_OBJECT_BLOCK_PTR: | |
1135 { | |
1136 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1137 data); | |
1138 const struct sized_memory_description *sdesc = | |
1139 lispdesc_indirect_description (data, desc1->data2.descr); | |
1140 const char *dobj = * (const char **) rdata; | |
1141 if (dobj) | |
1142 mark_lisp_object_block_contents | |
1143 (dobj, sdesc, count, level, pos); | |
1144 break; | |
1145 } | |
1146 #endif /* NEW_GC */ | |
1147 case XD_BLOCK_PTR: | |
1148 { | |
1149 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1150 data); | |
1151 const struct sized_memory_description *sdesc = | |
1152 lispdesc_indirect_description (data, desc1->data2.descr); | |
1153 const char *dobj = * (const char **) rdata; | |
1154 if (dobj) | |
1155 mark_struct_contents (dobj, sdesc, count, level, pos); | |
1156 break; | |
1157 } | |
1158 case XD_BLOCK_ARRAY: | |
1159 { | |
1160 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc, | |
1161 data); | |
1162 const struct sized_memory_description *sdesc = | |
1163 lispdesc_indirect_description (data, desc1->data2.descr); | |
1164 | |
1165 mark_struct_contents (rdata, sdesc, count, level, pos); | |
1166 break; | |
1167 } | |
1168 case XD_UNION: | |
1169 case XD_UNION_DYNAMIC_SIZE: | |
1170 desc1 = lispdesc_process_xd_union (desc1, desc, data); | |
1171 if (desc1) | |
1172 goto union_switcheroo; | |
1173 break; | |
1174 | |
1175 default: | |
1176 stderr_out ("Unsupported description type : %d\n", desc1->type); | |
1177 kkcc_backtrace (); | |
1178 ABORT (); | |
1179 } | |
1180 } | |
1181 | |
1182 #ifdef NEW_GC | |
1183 if (cnt) | |
5046 | 1184 if (!--obj_count) |
3092 | 1185 break; |
1186 #endif /* NEW_GC */ | |
1187 } | |
1188 } | |
1189 #endif /* USE_KKCC */ | |
1190 | |
1191 /* I hate duplicating all this crap! */ | |
1192 int | |
1193 marked_p (Lisp_Object obj) | |
1194 { | |
1195 /* Checks we used to perform. */ | |
1196 /* if (EQ (obj, Qnull_pointer)) return 1; */ | |
1197 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return 1; */ | |
1198 /* if (PURIFIED (XPNTR (obj))) return 1; */ | |
1199 | |
1200 if (XTYPE (obj) == Lisp_Type_Record) | |
1201 { | |
1202 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
1203 | |
1204 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1205 | |
1206 return MARKED_RECORD_HEADER_P (lheader); | |
1207 } | |
1208 return 1; | |
1209 } | |
1210 | |
1211 | |
1212 /* Mark reference to a Lisp_Object. If the object referred to has not been | |
1213 seen yet, recursively mark all the references contained in it. */ | |
1214 void | |
1215 mark_object ( | |
1216 #ifdef USE_KKCC | |
1217 Lisp_Object UNUSED (obj) | |
1218 #else | |
1219 Lisp_Object obj | |
1220 #endif | |
1221 ) | |
1222 { | |
1223 #ifdef USE_KKCC | |
1224 /* this code should never be reached when configured for KKCC */ | |
1225 stderr_out ("KKCC: Invalid mark_object call.\n"); | |
1226 stderr_out ("Replace mark_object with kkcc_gc_stack_push_lisp_object.\n"); | |
1227 ABORT (); | |
1228 #else /* not USE_KKCC */ | |
1229 | |
1230 tail_recurse: | |
1231 | |
1232 /* Checks we used to perform */ | |
1233 /* if (EQ (obj, Qnull_pointer)) return; */ | |
1234 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return; */ | |
1235 /* if (PURIFIED (XPNTR (obj))) return; */ | |
1236 | |
1237 if (XTYPE (obj) == Lisp_Type_Record) | |
1238 { | |
1239 struct lrecord_header *lheader = XRECORD_LHEADER (obj); | |
1240 | |
1241 GC_CHECK_LHEADER_INVARIANTS (lheader); | |
1242 | |
1243 /* We handle this separately, above, so we can mark free objects */ | |
1244 GC_CHECK_NOT_FREE (lheader); | |
1245 | |
1246 /* All c_readonly objects have their mark bit set, | |
1247 so that we only need to check the mark bit here. */ | |
1248 if (! MARKED_RECORD_HEADER_P (lheader)) | |
1249 { | |
1250 MARK_RECORD_HEADER (lheader); | |
1251 | |
1252 if (RECORD_MARKER (lheader)) | |
1253 { | |
1254 obj = RECORD_MARKER (lheader) (obj); | |
1255 if (!NILP (obj)) goto tail_recurse; | |
1256 } | |
1257 } | |
1258 } | |
1259 #endif /* not KKCC */ | |
1260 } | |
1261 | |
1262 | |
1263 /************************************************************************/ | |
1264 /* Hooks */ | |
1265 /************************************************************************/ | |
1266 | |
1267 /* Nonzero when calling certain hooks or doing other things where a GC | |
1268 would be bad. It prevents infinite recursive calls to gc. */ | |
1269 int gc_currently_forbidden; | |
1270 | |
1271 int | |
1272 begin_gc_forbidden (void) | |
1273 { | |
1274 return internal_bind_int (&gc_currently_forbidden, 1); | |
1275 } | |
1276 | |
1277 void | |
1278 end_gc_forbidden (int count) | |
1279 { | |
1280 unbind_to (count); | |
1281 } | |
1282 | |
1283 /* Hooks. */ | |
1284 Lisp_Object Vpre_gc_hook, Qpre_gc_hook; | |
1285 Lisp_Object Vpost_gc_hook, Qpost_gc_hook; | |
1286 | |
1287 /* Maybe we want to use this when doing a "panic" gc after memory_full()? */ | |
1288 static int gc_hooks_inhibited; | |
1289 | |
1290 struct post_gc_action | |
1291 { | |
1292 void (*fun) (void *); | |
1293 void *arg; | |
1294 }; | |
1295 | |
1296 typedef struct post_gc_action post_gc_action; | |
1297 | |
1298 typedef struct | |
1299 { | |
1300 Dynarr_declare (post_gc_action); | |
1301 } post_gc_action_dynarr; | |
1302 | |
1303 static post_gc_action_dynarr *post_gc_actions; | |
1304 | |
1305 /* Register an action to be called at the end of GC. | |
1306 gc_in_progress is 0 when this is called. | |
1307 This is used when it is discovered that an action needs to be taken, | |
1308 but it's during GC, so it's not safe. (e.g. in a finalize method.) | |
1309 | |
1310 As a general rule, do not use Lisp objects here. | |
1311 And NEVER signal an error. | |
1312 */ | |
1313 | |
1314 void | |
1315 register_post_gc_action (void (*fun) (void *), void *arg) | |
1316 { | |
1317 post_gc_action action; | |
1318 | |
1319 if (!post_gc_actions) | |
1320 post_gc_actions = Dynarr_new (post_gc_action); | |
1321 | |
1322 action.fun = fun; | |
1323 action.arg = arg; | |
1324 | |
1325 Dynarr_add (post_gc_actions, action); | |
1326 } | |
1327 | |
1328 static void | |
1329 run_post_gc_actions (void) | |
1330 { | |
1331 int i; | |
1332 | |
1333 if (post_gc_actions) | |
1334 { | |
1335 for (i = 0; i < Dynarr_length (post_gc_actions); i++) | |
1336 { | |
1337 post_gc_action action = Dynarr_at (post_gc_actions, i); | |
1338 (action.fun) (action.arg); | |
1339 } | |
1340 | |
1341 Dynarr_reset (post_gc_actions); | |
1342 } | |
1343 } | |
1344 | |
3263 | 1345 #ifdef NEW_GC |
1346 /* Asynchronous finalization. */ | |
1347 typedef struct finalize_elem | |
1348 { | |
1349 Lisp_Object obj; | |
1350 struct finalize_elem *next; | |
1351 } finalize_elem; | |
1352 | |
1353 finalize_elem *Vall_finalizable_objs; | |
1354 Lisp_Object Vfinalizers_to_run; | |
1355 | |
1356 void | |
1357 add_finalizable_obj (Lisp_Object obj) | |
1358 { | |
1359 finalize_elem *next = Vall_finalizable_objs; | |
1360 Vall_finalizable_objs = | |
1361 (finalize_elem *) xmalloc_and_zero (sizeof (finalize_elem)); | |
1362 Vall_finalizable_objs->obj = obj; | |
1363 Vall_finalizable_objs->next = next; | |
1364 } | |
1365 | |
1366 void | |
1367 register_for_finalization (void) | |
1368 { | |
1369 finalize_elem *rest = Vall_finalizable_objs; | |
1370 | |
1371 if (!rest) | |
1372 return; | |
1373 | |
1374 while (!marked_p (rest->obj)) | |
1375 { | |
1376 finalize_elem *temp = rest; | |
1377 Vfinalizers_to_run = Fcons (rest->obj, Vfinalizers_to_run); | |
1378 Vall_finalizable_objs = rest->next; | |
4976
16112448d484
Rename xfree(FOO, TYPE) -> xfree(FOO)
Ben Wing <ben@xemacs.org>
parents:
4969
diff
changeset
|
1379 xfree (temp); |
3263 | 1380 rest = Vall_finalizable_objs; |
1381 } | |
1382 | |
1383 while (rest->next) | |
1384 { | |
1385 if (LRECORDP (rest->next->obj) | |
1386 && !marked_p (rest->next->obj)) | |
1387 { | |
1388 finalize_elem *temp = rest->next; | |
1389 Vfinalizers_to_run = Fcons (rest->next->obj, Vfinalizers_to_run); | |
1390 rest->next = rest->next->next; | |
4976
16112448d484
Rename xfree(FOO, TYPE) -> xfree(FOO)
Ben Wing <ben@xemacs.org>
parents:
4969
diff
changeset
|
1391 xfree (temp); |
3263 | 1392 } |
1393 else | |
1394 { | |
1395 rest = rest->next; | |
1396 } | |
1397 } | |
1398 /* Keep objects alive that need to be finalized by marking | |
1399 Vfinalizers_to_run transitively. */ | |
1400 kkcc_gc_stack_push_lisp_object (Vfinalizers_to_run, 0, -1); | |
1401 kkcc_marking (0); | |
1402 } | |
1403 | |
1404 void | |
1405 run_finalizers (void) | |
1406 { | |
1407 Lisp_Object rest; | |
1408 for (rest = Vfinalizers_to_run; !NILP (rest); rest = XCDR (rest)) | |
1409 { | |
1410 MC_ALLOC_CALL_FINALIZER (XPNTR (XCAR (rest))); | |
1411 } | |
1412 Vfinalizers_to_run = Qnil; | |
1413 } | |
1414 #endif /* not NEW_GC */ | |
3092 | 1415 |
1416 | |
1417 /************************************************************************/ | |
1418 /* Garbage Collection */ | |
1419 /************************************************************************/ | |
1420 | |
1421 /* Enable/disable incremental garbage collection during runtime. */ | |
1422 int allow_incremental_gc; | |
1423 | |
1424 /* For profiling. */ | |
1425 static Lisp_Object QSin_garbage_collection; | |
1426 | |
1427 /* Nonzero means display messages at beginning and end of GC. */ | |
1428 int garbage_collection_messages; | |
1429 | |
1430 /* "Garbage collecting" */ | |
1431 Lisp_Object Vgc_message; | |
1432 Lisp_Object Vgc_pointer_glyph; | |
1433 static const Ascbyte gc_default_message[] = "Garbage collecting"; | |
1434 Lisp_Object Qgarbage_collecting; | |
1435 | |
1436 /* "Locals" during GC. */ | |
1437 struct frame *f; | |
1438 int speccount; | |
1439 int cursor_changed; | |
1440 Lisp_Object pre_gc_cursor; | |
1441 | |
1442 /* PROFILE_DECLARE */ | |
1443 int do_backtrace; | |
1444 struct backtrace backtrace; | |
1445 | |
1446 /* Maximum amount of C stack to save when a GC happens. */ | |
1447 #ifndef MAX_SAVE_STACK | |
1448 #define MAX_SAVE_STACK 0 /* 16000 */ | |
1449 #endif | |
1450 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1451 static void |
3267 | 1452 show_gc_cursor_and_message (void) |
3092 | 1453 { |
3267 | 1454 /* Now show the GC cursor/message. */ |
1455 pre_gc_cursor = Qnil; | |
1456 cursor_changed = 0; | |
3092 | 1457 |
1458 /* We used to call selected_frame() here. | |
1459 | |
1460 The following functions cannot be called inside GC | |
1461 so we move to after the above tests. */ | |
1462 { | |
1463 Lisp_Object frame; | |
1464 Lisp_Object device = Fselected_device (Qnil); | |
1465 if (NILP (device)) /* Could happen during startup, eg. if always_gc */ | |
1466 return; | |
1467 frame = Fselected_frame (device); | |
1468 if (NILP (frame)) | |
1469 invalid_state ("No frames exist on device", device); | |
1470 f = XFRAME (frame); | |
1471 } | |
1472 | |
1473 if (!noninteractive) | |
1474 { | |
1475 if (FRAME_WIN_P (f)) | |
1476 { | |
1477 Lisp_Object frame = wrap_frame (f); | |
1478 Lisp_Object cursor = glyph_image_instance (Vgc_pointer_glyph, | |
1479 FRAME_SELECTED_WINDOW (f), | |
1480 ERROR_ME_NOT, 1); | |
1481 pre_gc_cursor = f->pointer; | |
1482 if (POINTER_IMAGE_INSTANCEP (cursor) | |
1483 /* don't change if we don't know how to change back. */ | |
1484 && POINTER_IMAGE_INSTANCEP (pre_gc_cursor)) | |
1485 { | |
1486 cursor_changed = 1; | |
1487 Fset_frame_pointer (frame, cursor); | |
1488 } | |
1489 } | |
1490 | |
1491 /* Don't print messages to the stream device. */ | |
1492 if (!cursor_changed && !FRAME_STREAM_P (f)) | |
1493 { | |
1494 if (garbage_collection_messages) | |
1495 { | |
1496 Lisp_Object args[2], whole_msg; | |
1497 args[0] = (STRINGP (Vgc_message) ? Vgc_message : | |
1498 build_msg_string (gc_default_message)); | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
1499 args[1] = build_ascstring ("..."); |
3092 | 1500 whole_msg = Fconcat (2, args); |
1501 echo_area_message (f, (Ibyte *) 0, whole_msg, 0, -1, | |
1502 Qgarbage_collecting); | |
1503 } | |
1504 } | |
1505 } | |
3267 | 1506 } |
1507 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1508 static void |
3267 | 1509 remove_gc_cursor_and_message (void) |
1510 { | |
1511 /* Now remove the GC cursor/message */ | |
1512 if (!noninteractive) | |
1513 { | |
1514 if (cursor_changed) | |
1515 Fset_frame_pointer (wrap_frame (f), pre_gc_cursor); | |
1516 else if (!FRAME_STREAM_P (f)) | |
1517 { | |
1518 /* Show "...done" only if the echo area would otherwise be empty. */ | |
1519 if (NILP (clear_echo_area (selected_frame (), | |
1520 Qgarbage_collecting, 0))) | |
1521 { | |
1522 if (garbage_collection_messages) | |
1523 { | |
1524 Lisp_Object args[2], whole_msg; | |
1525 args[0] = (STRINGP (Vgc_message) ? Vgc_message : | |
1526 build_msg_string (gc_default_message)); | |
1527 args[1] = build_msg_string ("... done"); | |
1528 whole_msg = Fconcat (2, args); | |
1529 echo_area_message (selected_frame (), (Ibyte *) 0, | |
1530 whole_msg, 0, -1, | |
1531 Qgarbage_collecting); | |
1532 } | |
1533 } | |
1534 } | |
1535 } | |
1536 } | |
1537 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1538 static void |
3267 | 1539 gc_prepare (void) |
1540 { | |
1541 #if MAX_SAVE_STACK > 0 | |
1542 char stack_top_variable; | |
1543 extern char *stack_bottom; | |
1544 #endif | |
1545 | |
1546 #ifdef NEW_GC | |
1547 GC_STAT_START_NEW_GC; | |
1548 GC_SET_PHASE (INIT_GC); | |
1549 #endif /* NEW_GC */ | |
1550 | |
1551 do_backtrace = profiling_active || backtrace_with_internal_sections; | |
1552 | |
1553 assert (!gc_in_progress); | |
1554 assert (!in_display || gc_currently_forbidden); | |
1555 | |
1556 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection); | |
1557 | |
1558 need_to_signal_post_gc = 0; | |
1559 recompute_funcall_allocation_flag (); | |
1560 | |
1561 if (!gc_hooks_inhibited) | |
1562 run_hook_trapping_problems | |
1563 (Qgarbage_collecting, Qpre_gc_hook, | |
1564 INHIBIT_EXISTING_PERMANENT_DISPLAY_OBJECT_DELETION); | |
3092 | 1565 |
1566 /***** Now we actually start the garbage collection. */ | |
1567 | |
1568 gc_in_progress = 1; | |
1569 #ifndef NEW_GC | |
5014
c2e0c3af5fe3
cleanups to debug-print, try harder to make it work during GC
Ben Wing <ben@xemacs.org>
parents:
4976
diff
changeset
|
1570 inhibit_non_essential_conversion_operations++; |
3263 | 1571 #endif /* not NEW_GC */ |
3092 | 1572 |
1573 #if MAX_SAVE_STACK > 0 | |
1574 | |
1575 /* Save a copy of the contents of the stack, for debugging. */ | |
1576 if (!purify_flag) | |
1577 { | |
1578 /* Static buffer in which we save a copy of the C stack at each GC. */ | |
1579 static char *stack_copy; | |
1580 static Bytecount stack_copy_size; | |
1581 | |
1582 ptrdiff_t stack_diff = &stack_top_variable - stack_bottom; | |
1583 Bytecount stack_size = (stack_diff > 0 ? stack_diff : -stack_diff); | |
1584 if (stack_size < MAX_SAVE_STACK) | |
1585 { | |
1586 if (stack_copy_size < stack_size) | |
1587 { | |
1588 stack_copy = (char *) xrealloc (stack_copy, stack_size); | |
1589 stack_copy_size = stack_size; | |
1590 } | |
1591 | |
1592 memcpy (stack_copy, | |
1593 stack_diff > 0 ? stack_bottom : &stack_top_variable, | |
1594 stack_size); | |
1595 } | |
1596 } | |
1597 #endif /* MAX_SAVE_STACK > 0 */ | |
1598 | |
1599 /* Do some totally ad-hoc resource clearing. */ | |
1600 /* #### generalize this? */ | |
1601 clear_event_resource (); | |
1602 cleanup_specifiers (); | |
1603 cleanup_buffer_undo_lists (); | |
1604 } | |
1605 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1606 static void |
3092 | 1607 gc_mark_root_set ( |
1608 #ifdef NEW_GC | |
1609 enum gc_phase phase | |
1610 #else /* not NEW_GC */ | |
1611 void | |
1612 #endif /* not NEW_GC */ | |
1613 ) | |
1614 { | |
1615 #ifdef NEW_GC | |
1616 GC_SET_PHASE (phase); | |
1617 #endif /* NEW_GC */ | |
1618 | |
1619 /* Mark all the special slots that serve as the roots of accessibility. */ | |
1620 | |
1621 #ifdef USE_KKCC | |
1622 # define mark_object(obj) kkcc_gc_stack_push_lisp_object (obj, 0, -1) | |
1623 #endif /* USE_KKCC */ | |
1624 | |
1625 { /* staticpro() */ | |
1626 Lisp_Object **p = Dynarr_begin (staticpros); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1627 Elemcount len = Dynarr_length (staticpros); |
3092 | 1628 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1629 for (count = 0; count < len; count++, p++) |
3092 | 1630 /* Need to check if the pointer in the staticpro array is not |
1631 NULL. A gc can occur after variable is added to the staticpro | |
1632 array and _before_ it is correctly initialized. In this case | |
1633 its value is NULL, which we have to catch here. */ | |
1634 if (*p) | |
3486 | 1635 mark_object (**p); |
3092 | 1636 } |
1637 | |
1638 { /* staticpro_nodump() */ | |
1639 Lisp_Object **p = Dynarr_begin (staticpros_nodump); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1640 Elemcount len = Dynarr_length (staticpros_nodump); |
3092 | 1641 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1642 for (count = 0; count < len; count++, p++) |
3092 | 1643 /* Need to check if the pointer in the staticpro array is not |
1644 NULL. A gc can occur after variable is added to the staticpro | |
1645 array and _before_ it is correctly initialized. In this case | |
1646 its value is NULL, which we have to catch here. */ | |
1647 if (*p) | |
3486 | 1648 mark_object (**p); |
3092 | 1649 } |
1650 | |
3263 | 1651 #ifdef NEW_GC |
3092 | 1652 { /* mcpro () */ |
1653 Lisp_Object *p = Dynarr_begin (mcpros); | |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1654 Elemcount len = Dynarr_length (mcpros); |
3092 | 1655 Elemcount count; |
4921
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1656 for (count = 0; count < len; count++, p++) |
17362f371cc2
add more byte-code assertions and better failure output
Ben Wing <ben@xemacs.org>
parents:
4502
diff
changeset
|
1657 mark_object (*p); |
3092 | 1658 } |
3263 | 1659 #endif /* NEW_GC */ |
3092 | 1660 |
1661 { /* GCPRO() */ | |
1662 struct gcpro *tail; | |
1663 int i; | |
1664 for (tail = gcprolist; tail; tail = tail->next) | |
1665 for (i = 0; i < tail->nvars; i++) | |
1666 mark_object (tail->var[i]); | |
1667 } | |
1668 | |
1669 { /* specbind() */ | |
1670 struct specbinding *bind; | |
1671 for (bind = specpdl; bind != specpdl_ptr; bind++) | |
1672 { | |
1673 mark_object (bind->symbol); | |
1674 mark_object (bind->old_value); | |
1675 } | |
1676 } | |
1677 | |
1678 { | |
1679 struct catchtag *c; | |
1680 for (c = catchlist; c; c = c->next) | |
1681 { | |
1682 mark_object (c->tag); | |
1683 mark_object (c->val); | |
1684 mark_object (c->actual_tag); | |
1685 mark_object (c->backtrace); | |
1686 } | |
1687 } | |
1688 | |
1689 { | |
1690 struct backtrace *backlist; | |
1691 for (backlist = backtrace_list; backlist; backlist = backlist->next) | |
1692 { | |
1693 int nargs = backlist->nargs; | |
1694 int i; | |
1695 | |
1696 mark_object (*backlist->function); | |
1697 if (nargs < 0 /* nargs == UNEVALLED || nargs == MANY */ | |
1698 /* might be fake (internal profiling entry) */ | |
1699 && backlist->args) | |
1700 mark_object (backlist->args[0]); | |
1701 else | |
1702 for (i = 0; i < nargs; i++) | |
1703 mark_object (backlist->args[i]); | |
1704 } | |
1705 } | |
1706 | |
1707 mark_profiling_info (); | |
1708 #ifdef USE_KKCC | |
1709 # undef mark_object | |
1710 #endif | |
1711 } | |
1712 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1713 static void |
3092 | 1714 gc_finish_mark (void) |
1715 { | |
1716 #ifdef NEW_GC | |
1717 GC_SET_PHASE (FINISH_MARK); | |
1718 #endif /* NEW_GC */ | |
1719 init_marking_ephemerons (); | |
1720 | |
1721 while (finish_marking_weak_hash_tables () > 0 || | |
1722 finish_marking_weak_lists () > 0 || | |
1723 continue_marking_ephemerons () > 0) | |
1724 #ifdef USE_KKCC | |
1725 { | |
1726 kkcc_marking (0); | |
1727 } | |
1728 #else /* not USE_KKCC */ | |
1729 ; | |
1730 #endif /* not USE_KKCC */ | |
1731 | |
1732 /* At this point, we know which objects need to be finalized: we | |
1733 still need to resurrect them */ | |
1734 | |
1735 while (finish_marking_ephemerons () > 0 || | |
1736 finish_marking_weak_lists () > 0 || | |
1737 finish_marking_weak_hash_tables () > 0) | |
1738 #ifdef USE_KKCC | |
1739 { | |
1740 kkcc_marking (0); | |
1741 } | |
1742 #else /* not USE_KKCC */ | |
1743 ; | |
1744 #endif /* not USE_KKCC */ | |
1745 | |
1746 /* And prune (this needs to be called after everything else has been | |
1747 marked and before we do any sweeping). */ | |
1748 /* #### this is somewhat ad-hoc and should probably be an object | |
1749 method */ | |
1750 prune_weak_hash_tables (); | |
1751 prune_weak_lists (); | |
1752 prune_specifiers (); | |
1753 prune_syntax_tables (); | |
1754 | |
1755 prune_ephemerons (); | |
1756 prune_weak_boxes (); | |
1757 } | |
1758 | |
1759 #ifdef NEW_GC | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1760 static void |
3092 | 1761 gc_finalize (void) |
1762 { | |
1763 GC_SET_PHASE (FINALIZE); | |
3263 | 1764 register_for_finalization (); |
3092 | 1765 } |
1766 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1767 static void |
3092 | 1768 gc_sweep (void) |
1769 { | |
1770 GC_SET_PHASE (SWEEP); | |
1771 mc_sweep (); | |
1772 } | |
1773 #endif /* NEW_GC */ | |
1774 | |
1775 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1776 static void |
3092 | 1777 gc_finish (void) |
1778 { | |
1779 #ifdef NEW_GC | |
1780 GC_SET_PHASE (FINISH_GC); | |
1781 #endif /* NEW_GC */ | |
5158
9e0b43d3095c
more cleanups to object-memory-usage stuff
Ben Wing <ben@xemacs.org>
parents:
5142
diff
changeset
|
1782 finish_object_memory_usage_stats (); |
3092 | 1783 consing_since_gc = 0; |
1784 #ifndef DEBUG_XEMACS | |
1785 /* Allow you to set it really fucking low if you really want ... */ | |
1786 if (gc_cons_threshold < 10000) | |
1787 gc_cons_threshold = 10000; | |
1788 #endif | |
1789 recompute_need_to_garbage_collect (); | |
1790 | |
1791 #ifndef NEW_GC | |
5014
c2e0c3af5fe3
cleanups to debug-print, try harder to make it work during GC
Ben Wing <ben@xemacs.org>
parents:
4976
diff
changeset
|
1792 inhibit_non_essential_conversion_operations--; |
3092 | 1793 #endif /* not NEW_GC */ |
1794 gc_in_progress = 0; | |
1795 | |
1796 run_post_gc_actions (); | |
1797 | |
1798 /******* End of garbage collection ********/ | |
1799 | |
3263 | 1800 #ifndef NEW_GC |
3092 | 1801 if (!breathing_space) |
1802 { | |
1803 breathing_space = malloc (4096 - MALLOC_OVERHEAD); | |
1804 } | |
3263 | 1805 #endif /* not NEW_GC */ |
3092 | 1806 |
1807 need_to_signal_post_gc = 1; | |
1808 funcall_allocation_flag = 1; | |
1809 | |
1810 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection); | |
1811 | |
1812 #ifdef NEW_GC | |
1813 GC_SET_PHASE (NONE); | |
1814 #endif /* NEW_GC */ | |
1815 } | |
1816 | |
1817 #ifdef NEW_GC | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1818 static void |
3092 | 1819 gc_suspend_mark_phase (void) |
1820 { | |
1821 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection); | |
1822 write_barrier_enabled = 1; | |
1823 consing_since_gc = 0; | |
1824 vdb_start_dirty_bits_recording (); | |
1825 } | |
1826 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1827 static int |
3092 | 1828 gc_resume_mark_phase (void) |
1829 { | |
1830 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection); | |
1831 assert (write_barrier_enabled); | |
1832 vdb_stop_dirty_bits_recording (); | |
1833 write_barrier_enabled = 0; | |
1834 return vdb_read_dirty_bits (); | |
1835 } | |
1836 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1837 static int |
3092 | 1838 gc_mark (int incremental) |
1839 { | |
1840 GC_SET_PHASE (MARK); | |
1841 if (!incremental) | |
1842 { | |
1843 kkcc_marking (0); | |
1844 } | |
1845 else | |
1846 { | |
1847 kkcc_marking (gc_incremental_traversal_threshold); | |
1848 if (!KKCC_GC_STACK_EMPTY) | |
1849 { | |
1850 gc_suspend_mark_phase (); | |
1851 return 0; | |
1852 } | |
1853 } | |
1854 return 1; | |
1855 } | |
1856 | |
5016
2ade80e8c640
enable more warnings and fix them
Ben Wing <ben@xemacs.org>
parents:
5014
diff
changeset
|
1857 static int |
3092 | 1858 gc_resume_mark (int incremental) |
1859 { | |
1860 if (!incremental) | |
1861 { | |
1862 if (!KKCC_GC_STACK_EMPTY) | |
1863 { | |
1864 GC_STAT_RESUME_GC; | |
1865 /* An incremental garbage collection is already running --- | |
1866 now wrap it up and resume it atomically. */ | |
1867 gc_resume_mark_phase (); | |
1868 gc_mark_root_set (REPUSH_ROOT_SET); | |
1869 kkcc_marking (0); | |
1870 } | |
1871 } | |
1872 else | |
1873 { | |
1874 int repushed_objects; | |
1875 int mark_work; | |
1876 GC_STAT_RESUME_GC; | |
1877 repushed_objects = gc_resume_mark_phase (); | |
1878 mark_work = (gc_incremental_traversal_threshold > repushed_objects) ? | |
1879 gc_incremental_traversal_threshold : repushed_objects; | |
1880 kkcc_marking (mark_work); | |
1881 if (KKCC_GC_STACK_EMPTY) | |
1882 { | |
1883 /* Mark root set again and finish up marking. */ | |
1884 gc_mark_root_set (REPUSH_ROOT_SET); | |
1885 kkcc_marking (0); | |
1886 } | |
1887 else | |
1888 { | |
1889 gc_suspend_mark_phase (); | |
1890 return 0; | |
1891 } | |
1892 } | |
1893 return 1; | |
1894 } | |
1895 | |
1896 | |
5046 | 1897 static void |
3092 | 1898 gc_1 (int incremental) |
1899 { | |
1900 switch (GC_PHASE) | |
1901 { | |
1902 case NONE: | |
1903 gc_prepare (); | |
1904 kkcc_gc_stack_init(); | |
1905 #ifdef DEBUG_XEMACS | |
1906 kkcc_bt_init (); | |
1907 #endif | |
1908 case INIT_GC: | |
1909 gc_mark_root_set (PUSH_ROOT_SET); | |
1910 case PUSH_ROOT_SET: | |
1911 if (!gc_mark (incremental)) | |
1912 return; /* suspend gc */ | |
1913 case MARK: | |
1914 if (!KKCC_GC_STACK_EMPTY) | |
1915 if (!gc_resume_mark (incremental)) | |
1916 return; /* suspend gc */ | |
1917 gc_finish_mark (); | |
3263 | 1918 case FINISH_MARK: |
1919 gc_finalize (); | |
3092 | 1920 kkcc_gc_stack_free (); |
1921 #ifdef DEBUG_XEMACS | |
1922 kkcc_bt_free (); | |
1923 #endif | |
1924 case FINALIZE: | |
1925 gc_sweep (); | |
1926 case SWEEP: | |
1927 gc_finish (); | |
1928 case FINISH_GC: | |
1929 break; | |
1930 } | |
1931 } | |
1932 | |
5046 | 1933 static void |
1934 gc (int incremental) | |
3092 | 1935 { |
1936 if (gc_currently_forbidden | |
1937 || in_display | |
1938 || preparing_for_armageddon) | |
1939 return; | |
1940 | |
1941 /* Very important to prevent GC during any of the following | |
1942 stuff that might run Lisp code; otherwise, we'll likely | |
1943 have infinite GC recursion. */ | |
1944 speccount = begin_gc_forbidden (); | |
1945 | |
3267 | 1946 show_gc_cursor_and_message (); |
1947 | |
3092 | 1948 gc_1 (incremental); |
1949 | |
3267 | 1950 remove_gc_cursor_and_message (); |
1951 | |
3092 | 1952 /* now stop inhibiting GC */ |
1953 unbind_to (speccount); | |
1954 } | |
1955 | |
1956 void | |
1957 gc_full (void) | |
1958 { | |
1959 gc (0); | |
1960 } | |
1961 | |
1962 DEFUN ("gc-full", Fgc_full, 0, 0, "", /* | |
1963 This function performs a full garbage collection. If an incremental | |
1964 garbage collection is already running, it completes without any | |
1965 further interruption. This function guarantees that unused objects | |
1966 are freed when it returns. Garbage collection happens automatically if | |
1967 the client allocates more than `gc-cons-threshold' bytes of Lisp data | |
1968 since the previous garbage collection. | |
1969 */ | |
1970 ()) | |
1971 { | |
1972 gc_full (); | |
1973 return Qt; | |
1974 } | |
1975 | |
1976 void | |
1977 gc_incremental (void) | |
1978 { | |
1979 gc (allow_incremental_gc); | |
1980 } | |
1981 | |
1982 DEFUN ("gc-incremental", Fgc_incremental, 0, 0, "", /* | |
1983 This function starts an incremental garbage collection. If an | |
1984 incremental garbage collection is already running, the next cycle | |
1985 starts. Note that this function has not necessarily freed any memory | |
1986 when it returns. This function only guarantees, that the traversal of | |
1987 the heap makes progress. The next cycle of incremental garbage | |
1988 collection happens automatically if the client allocates more than | |
1989 `gc-incremental-cons-threshold' bytes of Lisp data since previous | |
1990 garbage collection. | |
1991 */ | |
1992 ()) | |
1993 { | |
1994 gc_incremental (); | |
1995 return Qt; | |
1996 } | |
1997 #else /* not NEW_GC */ | |
1998 void garbage_collect_1 (void) | |
1999 { | |
2000 if (gc_in_progress | |
2001 || gc_currently_forbidden | |
2002 || in_display | |
2003 || preparing_for_armageddon) | |
2004 return; | |
2005 | |
2006 /* Very important to prevent GC during any of the following | |
2007 stuff that might run Lisp code; otherwise, we'll likely | |
2008 have infinite GC recursion. */ | |
2009 speccount = begin_gc_forbidden (); | |
2010 | |
3267 | 2011 show_gc_cursor_and_message (); |
2012 | |
3092 | 2013 gc_prepare (); |
2014 #ifdef USE_KKCC | |
2015 kkcc_gc_stack_init(); | |
2016 #ifdef DEBUG_XEMACS | |
2017 kkcc_bt_init (); | |
2018 #endif | |
2019 #endif /* USE_KKCC */ | |
2020 gc_mark_root_set (); | |
2021 #ifdef USE_KKCC | |
2022 kkcc_marking (0); | |
2023 #endif /* USE_KKCC */ | |
2024 gc_finish_mark (); | |
2025 #ifdef USE_KKCC | |
2026 kkcc_gc_stack_free (); | |
2027 #ifdef DEBUG_XEMACS | |
2028 kkcc_bt_free (); | |
2029 #endif | |
2030 #endif /* USE_KKCC */ | |
2031 gc_sweep_1 (); | |
2032 gc_finish (); | |
2033 | |
3267 | 2034 remove_gc_cursor_and_message (); |
2035 | |
3092 | 2036 /* now stop inhibiting GC */ |
2037 unbind_to (speccount); | |
2038 } | |
2039 #endif /* not NEW_GC */ | |
2040 | |
2041 | |
2042 /************************************************************************/ | |
2043 /* Initializations */ | |
2044 /************************************************************************/ | |
2045 | |
2046 /* Initialization */ | |
2047 static void | |
2048 common_init_gc_early (void) | |
2049 { | |
2050 Vgc_message = Qzero; | |
2051 | |
2052 gc_currently_forbidden = 0; | |
2053 gc_hooks_inhibited = 0; | |
2054 | |
2055 need_to_garbage_collect = always_gc; | |
2056 | |
2057 gc_cons_threshold = GC_CONS_THRESHOLD; | |
2058 gc_cons_percentage = 40; /* #### what is optimal? */ | |
2059 total_gc_usage_set = 0; | |
2060 #ifdef NEW_GC | |
2061 gc_cons_incremental_threshold = GC_CONS_INCREMENTAL_THRESHOLD; | |
2062 gc_incremental_traversal_threshold = GC_INCREMENTAL_TRAVERSAL_THRESHOLD; | |
3263 | 2063 #endif /* NEW_GC */ |
3092 | 2064 } |
2065 | |
2066 void | |
2067 init_gc_early (void) | |
2068 { | |
3263 | 2069 #ifdef NEW_GC |
2070 /* Reset the finalizers_to_run list after pdump_load. */ | |
2071 Vfinalizers_to_run = Qnil; | |
2072 #endif /* NEW_GC */ | |
3092 | 2073 } |
2074 | |
2075 void | |
2076 reinit_gc_early (void) | |
2077 { | |
2078 common_init_gc_early (); | |
2079 } | |
2080 | |
2081 void | |
2082 init_gc_once_early (void) | |
2083 { | |
2084 common_init_gc_early (); | |
2085 } | |
2086 | |
2087 void | |
2088 syms_of_gc (void) | |
2089 { | |
2090 DEFSYMBOL (Qpre_gc_hook); | |
2091 DEFSYMBOL (Qpost_gc_hook); | |
2092 #ifdef NEW_GC | |
2093 DEFSUBR (Fgc_full); | |
2094 DEFSUBR (Fgc_incremental); | |
2095 #ifdef ERROR_CHECK_GC | |
2096 DEFSUBR (Fgc_stats); | |
2097 #endif /* not ERROR_CHECK_GC */ | |
2098 #endif /* NEW_GC */ | |
2099 } | |
2100 | |
2101 void | |
2102 vars_of_gc (void) | |
2103 { | |
2104 staticpro_nodump (&pre_gc_cursor); | |
2105 | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
2106 QSin_garbage_collection = build_defer_string ("(in garbage collection)"); |
3092 | 2107 staticpro (&QSin_garbage_collection); |
2108 | |
2109 DEFVAR_INT ("gc-cons-threshold", &gc_cons_threshold /* | |
2110 *Number of bytes of consing between full garbage collections. | |
2111 \"Consing\" is a misnomer in that this actually counts allocation | |
2112 of all different kinds of objects, not just conses. | |
2113 Garbage collection can happen automatically once this many bytes have been | |
2114 allocated since the last garbage collection. All data types count. | |
2115 | |
2116 Garbage collection happens automatically when `eval' or `funcall' are | |
2117 called. (Note that `funcall' is called implicitly as part of evaluation.) | |
2118 By binding this temporarily to a large number, you can effectively | |
2119 prevent garbage collection during a part of the program. | |
2120 | |
2121 Normally, you cannot set this value less than 10,000 (if you do, it is | |
2122 automatically reset during the next garbage collection). However, if | |
2123 XEmacs was compiled with DEBUG_XEMACS, this does not happen, allowing | |
2124 you to set this value very low to track down problems with insufficient | |
2125 GCPRO'ing. If you set this to a negative number, garbage collection will | |
2126 happen at *EVERY* call to `eval' or `funcall'. This is an extremely | |
2127 effective way to check GCPRO problems, but be warned that your XEmacs | |
2128 will be unusable! You almost certainly won't have the patience to wait | |
2129 long enough to be able to set it back. | |
2130 | |
2131 See also `consing-since-gc' and `gc-cons-percentage'. | |
2132 */ ); | |
2133 | |
2134 DEFVAR_INT ("gc-cons-percentage", &gc_cons_percentage /* | |
2135 *Percentage of memory allocated between garbage collections. | |
2136 | |
2137 Garbage collection will happen if this percentage of the total amount of | |
2138 memory used for data (see `lisp-object-memory-usage') has been allocated | |
2139 since the last garbage collection. However, it will not happen if less | |
2140 than `gc-cons-threshold' bytes have been allocated -- this sets an absolute | |
2141 minimum in case very little data has been allocated or the percentage is | |
2142 set very low. Set this to 0 to have garbage collection always happen after | |
2143 `gc-cons-threshold' bytes have been allocated, regardless of current memory | |
2144 usage. | |
2145 | |
2146 See also `consing-since-gc' and `gc-cons-threshold'. | |
2147 */ ); | |
2148 | |
2149 #ifdef NEW_GC | |
2150 DEFVAR_INT ("gc-cons-incremental-threshold", | |
2151 &gc_cons_incremental_threshold /* | |
2152 *Number of bytes of consing between cycles of incremental garbage | |
2153 collections. \"Consing\" is a misnomer in that this actually counts | |
2154 allocation of all different kinds of objects, not just conses. The | |
2155 next garbage collection cycle can happen automatically once this many | |
2156 bytes have been allocated since the last garbage collection cycle. | |
2157 All data types count. | |
2158 | |
2159 See also `gc-cons-threshold'. | |
2160 */ ); | |
2161 | |
2162 DEFVAR_INT ("gc-incremental-traversal-threshold", | |
2163 &gc_incremental_traversal_threshold /* | |
2164 *Number of elements processed in one cycle of incremental travesal. | |
2165 */ ); | |
2166 #endif /* NEW_GC */ | |
2167 | |
2168 DEFVAR_BOOL ("purify-flag", &purify_flag /* | |
2169 Non-nil means loading Lisp code in order to dump an executable. | |
2170 This means that certain objects should be allocated in readonly space. | |
2171 */ ); | |
2172 | |
2173 DEFVAR_BOOL ("garbage-collection-messages", &garbage_collection_messages /* | |
4502
8748a3f7ceb4
Handle varalias chains, custom variables in #'user-variable-p.
Aidan Kehoe <kehoea@parhasard.net>
parents:
4124
diff
changeset
|
2174 *Non-nil means display messages at start and end of garbage collection. |
3092 | 2175 */ ); |
2176 garbage_collection_messages = 0; | |
2177 | |
2178 DEFVAR_LISP ("pre-gc-hook", &Vpre_gc_hook /* | |
2179 Function or functions to be run just before each garbage collection. | |
2180 Interrupts, garbage collection, and errors are inhibited while this hook | |
2181 runs, so be extremely careful in what you add here. In particular, avoid | |
2182 consing, and do not interact with the user. | |
2183 */ ); | |
2184 Vpre_gc_hook = Qnil; | |
2185 | |
2186 DEFVAR_LISP ("post-gc-hook", &Vpost_gc_hook /* | |
2187 Function or functions to be run just after each garbage collection. | |
2188 Interrupts, garbage collection, and errors are inhibited while this hook | |
2189 runs. Each hook is called with one argument which is an alist with | |
2190 finalization data. | |
2191 */ ); | |
2192 Vpost_gc_hook = Qnil; | |
2193 | |
2194 DEFVAR_LISP ("gc-message", &Vgc_message /* | |
2195 String to print to indicate that a garbage collection is in progress. | |
2196 This is printed in the echo area. If the selected frame is on a | |
2197 window system and `gc-pointer-glyph' specifies a value (i.e. a pointer | |
2198 image instance) in the domain of the selected frame, the mouse pointer | |
2199 will change instead of this message being printed. | |
2200 */ ); | |
4952
19a72041c5ed
Mule-izing, various fixes related to char * arguments
Ben Wing <ben@xemacs.org>
parents:
4934
diff
changeset
|
2201 Vgc_message = build_defer_string (gc_default_message); |
3092 | 2202 |
2203 DEFVAR_LISP ("gc-pointer-glyph", &Vgc_pointer_glyph /* | |
2204 Pointer glyph used to indicate that a garbage collection is in progress. | |
2205 If the selected window is on a window system and this glyph specifies a | |
2206 value (i.e. a pointer image instance) in the domain of the selected | |
2207 window, the pointer will be changed as specified during garbage collection. | |
2208 Otherwise, a message will be printed in the echo area, as controlled | |
2209 by `gc-message'. | |
2210 */ ); | |
2211 | |
2212 #ifdef NEW_GC | |
2213 DEFVAR_BOOL ("allow-incremental-gc", &allow_incremental_gc /* | |
2214 *Non-nil means to allow incremental garbage collection. Nil prevents | |
2215 *incremental garbage collection, the garbage collector then only does | |
2216 *full collects (even if (gc-incremental) is called). | |
2217 */ ); | |
3263 | 2218 |
2219 Vfinalizers_to_run = Qnil; | |
2220 staticpro_nodump (&Vfinalizers_to_run); | |
3092 | 2221 #endif /* NEW_GC */ |
2222 } | |
2223 | |
2224 void | |
2225 complex_vars_of_gc (void) | |
2226 { | |
2227 Vgc_pointer_glyph = Fmake_glyph_internal (Qpointer); | |
2228 } |