3092
+ − 1 /* New incremental garbage collector for XEmacs.
+ − 2 Copyright (C) 2005 Marcus Crestani.
+ − 3
+ − 4 This file is part of XEmacs.
+ − 5
+ − 6 XEmacs is free software; you can redistribute it and/or modify it
+ − 7 under the terms of the GNU General Public License as published by the
+ − 8 Free Software Foundation; either version 2, or (at your option) any
+ − 9 later version.
+ − 10
+ − 11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
+ − 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ − 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ − 14 for more details.
+ − 15
+ − 16 You should have received a copy of the GNU General Public License
+ − 17 along with XEmacs; see the file COPYING. If not, write to
+ − 18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ − 19 Boston, MA 02111-1307, USA. */
+ − 20
+ − 21 /* Synched up with: Not in FSF. */
+ − 22
+ − 23 #include <config.h>
+ − 24 #include "lisp.h"
+ − 25
+ − 26 #include "backtrace.h"
+ − 27 #include "buffer.h"
+ − 28 #include "bytecode.h"
+ − 29 #include "chartab.h"
+ − 30 #include "console-stream.h"
+ − 31 #include "device.h"
+ − 32 #include "elhash.h"
+ − 33 #include "events.h"
+ − 34 #include "extents-impl.h"
+ − 35 #include "file-coding.h"
+ − 36 #include "frame-impl.h"
+ − 37 #include "gc.h"
+ − 38 #include "glyphs.h"
+ − 39 #include "opaque.h"
+ − 40 #include "lrecord.h"
+ − 41 #include "lstream.h"
+ − 42 #include "process.h"
+ − 43 #include "profile.h"
+ − 44 #include "redisplay.h"
+ − 45 #include "specifier.h"
+ − 46 #include "sysfile.h"
+ − 47 #include "sysdep.h"
+ − 48 #include "window.h"
+ − 49 #include "vdb.h"
+ − 50
+ − 51
+ − 52 #define GC_CONS_THRESHOLD 2000000
+ − 53 #define GC_CONS_INCREMENTAL_THRESHOLD 200000
+ − 54 #define GC_INCREMENTAL_TRAVERSAL_THRESHOLD 100000
+ − 55
+ − 56 /* Number of bytes of consing done since the last GC. */
+ − 57 EMACS_INT consing_since_gc;
+ − 58
+ − 59 /* Number of bytes of consing done since startup. */
+ − 60 EMACS_UINT total_consing;
+ − 61
+ − 62 /* Number of bytes of current allocated heap objects. */
+ − 63 EMACS_INT total_gc_usage;
+ − 64
+ − 65 /* If the above is set. */
+ − 66 int total_gc_usage_set;
+ − 67
+ − 68 /* Number of bytes of consing since gc before another gc should be done. */
+ − 69 EMACS_INT gc_cons_threshold;
+ − 70
+ − 71 /* Nonzero during gc */
+ − 72 int gc_in_progress;
+ − 73
+ − 74 /* Percentage of consing of total data size before another GC. */
+ − 75 EMACS_INT gc_cons_percentage;
+ − 76
+ − 77 #ifdef NEW_GC
+ − 78 /* Number of bytes of consing since gc before another cycle of the gc
+ − 79 should be done in incremental mode. */
+ − 80 EMACS_INT gc_cons_incremental_threshold;
+ − 81
+ − 82 /* Number of elements marked in one cycle of incremental GC. */
+ − 83 EMACS_INT gc_incremental_traversal_threshold;
+ − 84
+ − 85 /* Nonzero during write barrier */
+ − 86 int write_barrier_enabled;
+ − 87 #endif /* NEW_GC */
+ − 88
+ − 89
+ − 90
+ − 91 #ifdef NEW_GC
+ − 92 /************************************************************************/
+ − 93 /* Incremental State and Statistics */
+ − 94 /************************************************************************/
+ − 95
+ − 96 enum gc_phase
+ − 97 {
+ − 98 NONE,
+ − 99 INIT_GC,
+ − 100 PUSH_ROOT_SET,
+ − 101 MARK,
+ − 102 REPUSH_ROOT_SET,
+ − 103 FINISH_MARK,
+ − 104 FINALIZE,
+ − 105 SWEEP,
+ − 106 FINISH_GC
+ − 107 };
+ − 108
+ − 109 #ifndef ERROR_CHECK_GC
4124
+ − 110 typedef struct gc_state_type
3092
+ − 111 {
+ − 112 enum gc_phase phase;
4124
+ − 113 } gc_state_type;
3092
+ − 114 #else /* ERROR_CHECK_GC */
+ − 115 enum gc_stat_id
+ − 116 {
+ − 117 GC_STAT_TOTAL,
+ − 118 GC_STAT_IN_LAST_GC,
+ − 119 GC_STAT_IN_THIS_GC,
+ − 120 GC_STAT_IN_LAST_CYCLE,
+ − 121 GC_STAT_IN_THIS_CYCLE,
+ − 122 GC_STAT_COUNT /* has to be last */
+ − 123 };
+ − 124
4124
+ − 125 typedef struct gc_state_type
3092
+ − 126 {
+ − 127 enum gc_phase phase;
3313
+ − 128 double n_gc[GC_STAT_COUNT];
+ − 129 double n_cycles[GC_STAT_COUNT];
+ − 130 double enqueued[GC_STAT_COUNT];
+ − 131 double dequeued[GC_STAT_COUNT];
+ − 132 double repushed[GC_STAT_COUNT];
+ − 133 double enqueued2[GC_STAT_COUNT];
+ − 134 double dequeued2[GC_STAT_COUNT];
+ − 135 double finalized[GC_STAT_COUNT];
+ − 136 double freed[GC_STAT_COUNT];
4124
+ − 137 } gc_state_type;
3092
+ − 138 #endif /* ERROR_CHECK_GC */
+ − 139
4124
+ − 140 gc_state_type gc_state;
+ − 141
3092
+ − 142 #define GC_PHASE gc_state.phase
+ − 143 #define GC_SET_PHASE(p) GC_PHASE = p
+ − 144
+ − 145 #ifdef ERROR_CHECK_GC
+ − 146 # define GC_STAT_START_NEW_GC gc_stat_start_new_gc ()
+ − 147 # define GC_STAT_RESUME_GC gc_stat_resume_gc ()
+ − 148
+ − 149 #define GC_STAT_TICK(STAT) \
+ − 150 gc_state.STAT[GC_STAT_TOTAL]++; \
+ − 151 gc_state.STAT[GC_STAT_IN_THIS_GC]++; \
+ − 152 gc_state.STAT[GC_STAT_IN_THIS_CYCLE]++
+ − 153
+ − 154 # define GC_STAT_ENQUEUED \
+ − 155 if (GC_PHASE == REPUSH_ROOT_SET) \
+ − 156 { \
+ − 157 GC_STAT_TICK (enqueued2); \
+ − 158 } \
+ − 159 else \
+ − 160 { \
+ − 161 GC_STAT_TICK (enqueued); \
+ − 162 }
+ − 163
+ − 164 # define GC_STAT_DEQUEUED \
+ − 165 if (gc_state.phase == REPUSH_ROOT_SET) \
+ − 166 { \
+ − 167 GC_STAT_TICK (dequeued2); \
+ − 168 } \
+ − 169 else \
+ − 170 { \
+ − 171 GC_STAT_TICK (dequeued); \
+ − 172 }
+ − 173 # define GC_STAT_REPUSHED GC_STAT_TICK (repushed)
+ − 174
+ − 175 #define GC_STAT_RESUME(stat) \
+ − 176 gc_state.stat[GC_STAT_IN_LAST_CYCLE] = \
+ − 177 gc_state.stat[GC_STAT_IN_THIS_CYCLE]; \
+ − 178 gc_state.stat[GC_STAT_IN_THIS_CYCLE] = 0
+ − 179
+ − 180 #define GC_STAT_RESTART(stat) \
+ − 181 gc_state.stat[GC_STAT_IN_LAST_GC] = \
+ − 182 gc_state.stat[GC_STAT_IN_THIS_GC]; \
+ − 183 gc_state.stat[GC_STAT_IN_THIS_GC] = 0; \
+ − 184 GC_STAT_RESUME (stat)
+ − 185
+ − 186 void
+ − 187 gc_stat_start_new_gc (void)
+ − 188 {
+ − 189 gc_state.n_gc[GC_STAT_TOTAL]++;
+ − 190 gc_state.n_cycles[GC_STAT_TOTAL]++;
+ − 191 gc_state.n_cycles[GC_STAT_IN_LAST_GC] = gc_state.n_cycles[GC_STAT_IN_THIS_GC];
+ − 192 gc_state.n_cycles[GC_STAT_IN_THIS_GC] = 1;
+ − 193
+ − 194 GC_STAT_RESTART (enqueued);
+ − 195 GC_STAT_RESTART (dequeued);
+ − 196 GC_STAT_RESTART (repushed);
+ − 197 GC_STAT_RESTART (finalized);
+ − 198 GC_STAT_RESTART (enqueued2);
+ − 199 GC_STAT_RESTART (dequeued2);
+ − 200 GC_STAT_RESTART (freed);
+ − 201 }
+ − 202
+ − 203 void
+ − 204 gc_stat_resume_gc (void)
+ − 205 {
+ − 206 gc_state.n_cycles[GC_STAT_TOTAL]++;
+ − 207 gc_state.n_cycles[GC_STAT_IN_THIS_GC]++;
+ − 208 GC_STAT_RESUME (enqueued);
+ − 209 GC_STAT_RESUME (dequeued);
+ − 210 GC_STAT_RESUME (repushed);
+ − 211 GC_STAT_RESUME (finalized);
+ − 212 GC_STAT_RESUME (enqueued2);
+ − 213 GC_STAT_RESUME (dequeued2);
+ − 214 GC_STAT_RESUME (freed);
+ − 215 }
+ − 216
+ − 217 void
+ − 218 gc_stat_finalized (void)
+ − 219 {
+ − 220 GC_STAT_TICK (finalized);
+ − 221 }
+ − 222
+ − 223 void
+ − 224 gc_stat_freed (void)
+ − 225 {
+ − 226 GC_STAT_TICK (freed);
+ − 227 }
+ − 228
+ − 229 DEFUN("gc-stats", Fgc_stats, 0, 0 ,"", /*
+ − 230 Return statistics about garbage collection cycles in a property list.
+ − 231 */
+ − 232 ())
+ − 233 {
+ − 234 Lisp_Object pl = Qnil;
+ − 235 #define PL(name,value) \
3313
+ − 236 pl = cons3 (intern (name), make_float (gc_state.value), pl)
3092
+ − 237
+ − 238 PL ("freed-in-this-cycle", freed[GC_STAT_IN_THIS_CYCLE]);
+ − 239 PL ("freed-in-this-gc", freed[GC_STAT_IN_THIS_GC]);
+ − 240 PL ("freed-in-last-cycle", freed[GC_STAT_IN_LAST_CYCLE]);
+ − 241 PL ("freed-in-last-gc", freed[GC_STAT_IN_LAST_GC]);
+ − 242 PL ("freed-total", freed[GC_STAT_TOTAL]);
+ − 243 PL ("finalized-in-this-cycle", finalized[GC_STAT_IN_THIS_CYCLE]);
+ − 244 PL ("finalized-in-this-gc", finalized[GC_STAT_IN_THIS_GC]);
+ − 245 PL ("finalized-in-last-cycle", finalized[GC_STAT_IN_LAST_CYCLE]);
+ − 246 PL ("finalized-in-last-gc", finalized[GC_STAT_IN_LAST_GC]);
+ − 247 PL ("finalized-total", finalized[GC_STAT_TOTAL]);
+ − 248 PL ("repushed-in-this-cycle", repushed[GC_STAT_IN_THIS_CYCLE]);
+ − 249 PL ("repushed-in-this-gc", repushed[GC_STAT_IN_THIS_GC]);
+ − 250 PL ("repushed-in-last-cycle", repushed[GC_STAT_IN_LAST_CYCLE]);
+ − 251 PL ("repushed-in-last-gc", repushed[GC_STAT_IN_LAST_GC]);
+ − 252 PL ("repushed-total", repushed[GC_STAT_TOTAL]);
+ − 253 PL ("dequeued2-in-this-cycle", dequeued2[GC_STAT_IN_THIS_CYCLE]);
+ − 254 PL ("dequeued2-in-this-gc", dequeued2[GC_STAT_IN_THIS_GC]);
+ − 255 PL ("dequeued2-in-last-cycle", dequeued2[GC_STAT_IN_LAST_CYCLE]);
+ − 256 PL ("dequeued2-in-last-gc", dequeued2[GC_STAT_IN_LAST_GC]);
+ − 257 PL ("dequeued2-total", dequeued2[GC_STAT_TOTAL]);
+ − 258 PL ("enqueued2-in-this-cycle", enqueued2[GC_STAT_IN_THIS_CYCLE]);
+ − 259 PL ("enqueued2-in-this-gc", enqueued2[GC_STAT_IN_THIS_GC]);
+ − 260 PL ("enqueued2-in-last-cycle", enqueued2[GC_STAT_IN_LAST_CYCLE]);
+ − 261 PL ("enqueued2-in-last-gc", enqueued2[GC_STAT_IN_LAST_GC]);
+ − 262 PL ("enqueued2-total", enqueued2[GC_STAT_TOTAL]);
+ − 263 PL ("dequeued-in-this-cycle", dequeued[GC_STAT_IN_THIS_CYCLE]);
+ − 264 PL ("dequeued-in-this-gc", dequeued[GC_STAT_IN_THIS_GC]);
+ − 265 PL ("dequeued-in-last-cycle", dequeued[GC_STAT_IN_LAST_CYCLE]);
+ − 266 PL ("dequeued-in-last-gc", dequeued[GC_STAT_IN_LAST_GC]);
+ − 267 PL ("dequeued-total", dequeued[GC_STAT_TOTAL]);
+ − 268 PL ("enqueued-in-this-cycle", enqueued[GC_STAT_IN_THIS_CYCLE]);
+ − 269 PL ("enqueued-in-this-gc", enqueued[GC_STAT_IN_THIS_GC]);
+ − 270 PL ("enqueued-in-last-cycle", enqueued[GC_STAT_IN_LAST_CYCLE]);
+ − 271 PL ("enqueued-in-last-gc", enqueued[GC_STAT_IN_LAST_GC]);
+ − 272 PL ("enqueued-total", enqueued[GC_STAT_TOTAL]);
+ − 273 PL ("n-cycles-in-this-gc", n_cycles[GC_STAT_IN_THIS_GC]);
+ − 274 PL ("n-cycles-in-last-gc", n_cycles[GC_STAT_IN_LAST_GC]);
+ − 275 PL ("n-cycles-total", n_cycles[GC_STAT_TOTAL]);
+ − 276 PL ("n-gc-total", n_gc[GC_STAT_TOTAL]);
+ − 277 PL ("phase", phase);
+ − 278 return pl;
+ − 279 }
+ − 280 #else /* not ERROR_CHECK_GC */
+ − 281 # define GC_STAT_START_NEW_GC
+ − 282 # define GC_STAT_RESUME_GC
+ − 283 # define GC_STAT_ENQUEUED
+ − 284 # define GC_STAT_DEQUEUED
+ − 285 # define GC_STAT_REPUSHED
+ − 286 # define GC_STAT_REMOVED
+ − 287 #endif /* not ERROR_CHECK_GC */
+ − 288 #endif /* NEW_GC */
+ − 289
+ − 290
+ − 291 /************************************************************************/
+ − 292 /* Recompute need to garbage collect */
+ − 293 /************************************************************************/
+ − 294
+ − 295 int need_to_garbage_collect;
+ − 296
+ − 297 #ifdef ERROR_CHECK_GC
+ − 298 int always_gc = 0; /* Debugging hack; equivalent to
+ − 299 (setq gc-cons-thresold -1) */
+ − 300 #else
+ − 301 #define always_gc 0
+ − 302 #endif
+ − 303
+ − 304 /* True if it's time to garbage collect now. */
+ − 305 void
+ − 306 recompute_need_to_garbage_collect (void)
+ − 307 {
+ − 308 if (always_gc)
+ − 309 need_to_garbage_collect = 1;
+ − 310 else
+ − 311 need_to_garbage_collect =
+ − 312 #ifdef NEW_GC
+ − 313 write_barrier_enabled ?
+ − 314 (consing_since_gc > gc_cons_incremental_threshold) :
+ − 315 #endif /* NEW_GC */
+ − 316 (consing_since_gc > gc_cons_threshold
+ − 317 &&
+ − 318 #if 0 /* #### implement this better */
4115
+ − 319 ((double)consing_since_gc) / total_data_usage()) >=
+ − 320 ((double)gc_cons_percentage / 100)
3092
+ − 321 #else
+ − 322 (!total_gc_usage_set ||
4115
+ − 323 ((double)consing_since_gc / total_gc_usage) >=
+ − 324 ((double)gc_cons_percentage / 100))
3092
+ − 325 #endif
+ − 326 );
+ − 327 recompute_funcall_allocation_flag ();
+ − 328 }
+ − 329
+ − 330
+ − 331
+ − 332 /************************************************************************/
+ − 333 /* Mark Phase */
+ − 334 /************************************************************************/
+ − 335
+ − 336 static const struct memory_description lisp_object_description_1[] = {
+ − 337 { XD_LISP_OBJECT, 0 },
+ − 338 { XD_END }
+ − 339 };
+ − 340
+ − 341 const struct sized_memory_description lisp_object_description = {
+ − 342 sizeof (Lisp_Object),
+ − 343 lisp_object_description_1
+ − 344 };
+ − 345
+ − 346 #if defined (USE_KKCC) || defined (PDUMP)
+ − 347
+ − 348 /* This function extracts the value of a count variable described somewhere
+ − 349 else in the description. It is converted corresponding to the type */
+ − 350 EMACS_INT
+ − 351 lispdesc_indirect_count_1 (EMACS_INT code,
+ − 352 const struct memory_description *idesc,
+ − 353 const void *idata)
+ − 354 {
+ − 355 EMACS_INT count;
+ − 356 const void *irdata;
+ − 357
+ − 358 int line = XD_INDIRECT_VAL (code);
+ − 359 int delta = XD_INDIRECT_DELTA (code);
+ − 360
+ − 361 irdata = ((char *) idata) +
+ − 362 lispdesc_indirect_count (idesc[line].offset, idesc, idata);
+ − 363 switch (idesc[line].type)
+ − 364 {
+ − 365 case XD_BYTECOUNT:
+ − 366 count = * (Bytecount *) irdata;
+ − 367 break;
+ − 368 case XD_ELEMCOUNT:
+ − 369 count = * (Elemcount *) irdata;
+ − 370 break;
+ − 371 case XD_HASHCODE:
+ − 372 count = * (Hashcode *) irdata;
+ − 373 break;
+ − 374 case XD_INT:
+ − 375 count = * (int *) irdata;
+ − 376 break;
+ − 377 case XD_LONG:
+ − 378 count = * (long *) irdata;
+ − 379 break;
+ − 380 default:
+ − 381 stderr_out ("Unsupported count type : %d (line = %d, code = %ld)\n",
+ − 382 idesc[line].type, line, (long) code);
+ − 383 #if defined(USE_KKCC) && defined(DEBUG_XEMACS)
+ − 384 if (gc_in_progress)
+ − 385 kkcc_backtrace ();
+ − 386 #endif
+ − 387 #ifdef PDUMP
+ − 388 if (in_pdump)
+ − 389 pdump_backtrace ();
+ − 390 #endif
+ − 391 count = 0; /* warning suppression */
+ − 392 ABORT ();
+ − 393 }
+ − 394 count += delta;
+ − 395 return count;
+ − 396 }
+ − 397
+ − 398 /* SDESC is a "description map" (basically, a list of offsets used for
+ − 399 successive indirections) and OBJ is the first object to indirect off of.
+ − 400 Return the description ultimately found. */
+ − 401
+ − 402 const struct sized_memory_description *
+ − 403 lispdesc_indirect_description_1 (const void *obj,
+ − 404 const struct sized_memory_description *sdesc)
+ − 405 {
+ − 406 int pos;
+ − 407
+ − 408 for (pos = 0; sdesc[pos].size >= 0; pos++)
+ − 409 obj = * (const void **) ((const char *) obj + sdesc[pos].size);
+ − 410
+ − 411 return (const struct sized_memory_description *) obj;
+ − 412 }
+ − 413
+ − 414 /* Compute the size of the data at RDATA, described by a single entry
+ − 415 DESC1 in a description array. OBJ and DESC are used for
+ − 416 XD_INDIRECT references. */
+ − 417
+ − 418 static Bytecount
+ − 419 lispdesc_one_description_line_size (void *rdata,
+ − 420 const struct memory_description *desc1,
+ − 421 const void *obj,
+ − 422 const struct memory_description *desc)
+ − 423 {
+ − 424 union_switcheroo:
+ − 425 switch (desc1->type)
+ − 426 {
+ − 427 case XD_LISP_OBJECT_ARRAY:
+ − 428 {
+ − 429 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
+ − 430 return (val * sizeof (Lisp_Object));
+ − 431 }
+ − 432 case XD_LISP_OBJECT:
+ − 433 case XD_LO_LINK:
+ − 434 return sizeof (Lisp_Object);
+ − 435 case XD_OPAQUE_PTR:
+ − 436 return sizeof (void *);
+ − 437 #ifdef NEW_GC
+ − 438 case XD_LISP_OBJECT_BLOCK_PTR:
+ − 439 #endif /* NEW_GC */
+ − 440 case XD_BLOCK_PTR:
+ − 441 {
+ − 442 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
+ − 443 return val * sizeof (void *);
+ − 444 }
+ − 445 case XD_BLOCK_ARRAY:
+ − 446 {
+ − 447 EMACS_INT val = lispdesc_indirect_count (desc1->data1, desc, obj);
+ − 448
+ − 449 return (val *
+ − 450 lispdesc_block_size
+ − 451 (rdata,
+ − 452 lispdesc_indirect_description (obj, desc1->data2.descr)));
+ − 453 }
+ − 454 case XD_OPAQUE_DATA_PTR:
+ − 455 return sizeof (void *);
+ − 456 case XD_UNION_DYNAMIC_SIZE:
+ − 457 {
+ − 458 /* If an explicit size was given in the first-level structure
+ − 459 description, use it; else compute size based on current union
+ − 460 constant. */
+ − 461 const struct sized_memory_description *sdesc =
+ − 462 lispdesc_indirect_description (obj, desc1->data2.descr);
+ − 463 if (sdesc->size)
+ − 464 return sdesc->size;
+ − 465 else
+ − 466 {
+ − 467 desc1 = lispdesc_process_xd_union (desc1, desc, obj);
+ − 468 if (desc1)
+ − 469 goto union_switcheroo;
+ − 470 break;
+ − 471 }
+ − 472 }
+ − 473 case XD_UNION:
+ − 474 {
+ − 475 /* If an explicit size was given in the first-level structure
+ − 476 description, use it; else compute size based on maximum of all
+ − 477 possible structures. */
+ − 478 const struct sized_memory_description *sdesc =
+ − 479 lispdesc_indirect_description (obj, desc1->data2.descr);
+ − 480 if (sdesc->size)
+ − 481 return sdesc->size;
+ − 482 else
+ − 483 {
+ − 484 int count;
+ − 485 Bytecount max_size = -1, size;
+ − 486
+ − 487 desc1 = sdesc->description;
+ − 488
+ − 489 for (count = 0; desc1[count].type != XD_END; count++)
+ − 490 {
+ − 491 size = lispdesc_one_description_line_size (rdata,
+ − 492 &desc1[count],
+ − 493 obj, desc);
+ − 494 if (size > max_size)
+ − 495 max_size = size;
+ − 496 }
+ − 497 return max_size;
+ − 498 }
+ − 499 }
+ − 500 case XD_ASCII_STRING:
+ − 501 return sizeof (void *);
+ − 502 case XD_DOC_STRING:
+ − 503 return sizeof (void *);
+ − 504 case XD_INT_RESET:
+ − 505 return sizeof (int);
+ − 506 case XD_BYTECOUNT:
+ − 507 return sizeof (Bytecount);
+ − 508 case XD_ELEMCOUNT:
+ − 509 return sizeof (Elemcount);
+ − 510 case XD_HASHCODE:
+ − 511 return sizeof (Hashcode);
+ − 512 case XD_INT:
+ − 513 return sizeof (int);
+ − 514 case XD_LONG:
+ − 515 return sizeof (long);
+ − 516 default:
+ − 517 stderr_out ("Unsupported dump type : %d\n", desc1->type);
+ − 518 ABORT ();
+ − 519 }
+ − 520
+ − 521 return 0;
+ − 522 }
+ − 523
+ − 524
+ − 525 /* Return the size of the memory block (NOT necessarily a structure!)
+ − 526 described by SDESC and pointed to by OBJ. If SDESC records an
+ − 527 explicit size (i.e. non-zero), it is simply returned; otherwise,
+ − 528 the size is calculated by the maximum offset and the size of the
+ − 529 object at that offset, rounded up to the maximum alignment. In
+ − 530 this case, we may need the object, for example when retrieving an
+ − 531 "indirect count" of an inlined array (the count is not constant,
+ − 532 but is specified by one of the elements of the memory block). (It
+ − 533 is generally not a problem if we return an overly large size -- we
+ − 534 will simply end up reserving more space than necessary; but if the
+ − 535 size is too small we could be in serious trouble, in particular
+ − 536 with nested inlined structures, where there may be alignment
+ − 537 padding in the middle of a block. #### In fact there is an (at
+ − 538 least theoretical) problem with an overly large size -- we may
+ − 539 trigger a protection fault when reading from invalid memory. We
+ − 540 need to handle this -- perhaps in a stupid but dependable way,
+ − 541 i.e. by trapping SIGSEGV and SIGBUS.) */
+ − 542
+ − 543 Bytecount
+ − 544 lispdesc_block_size_1 (const void *obj, Bytecount size,
+ − 545 const struct memory_description *desc)
+ − 546 {
+ − 547 EMACS_INT max_offset = -1;
+ − 548 int max_offset_pos = -1;
+ − 549 int pos;
+ − 550
+ − 551 if (size)
+ − 552 return size;
+ − 553
+ − 554 for (pos = 0; desc[pos].type != XD_END; pos++)
+ − 555 {
+ − 556 EMACS_INT offset = lispdesc_indirect_count (desc[pos].offset, desc, obj);
+ − 557 if (offset == max_offset)
+ − 558 {
+ − 559 stderr_out ("Two relocatable elements at same offset?\n");
+ − 560 ABORT ();
+ − 561 }
+ − 562 else if (offset > max_offset)
+ − 563 {
+ − 564 max_offset = offset;
+ − 565 max_offset_pos = pos;
+ − 566 }
+ − 567 }
+ − 568
+ − 569 if (max_offset_pos < 0)
+ − 570 return 0;
+ − 571
+ − 572 {
+ − 573 Bytecount size_at_max;
+ − 574 size_at_max =
+ − 575 lispdesc_one_description_line_size ((char *) obj + max_offset,
+ − 576 &desc[max_offset_pos], obj, desc);
+ − 577
+ − 578 /* We have no way of knowing the required alignment for this structure,
+ − 579 so just make it maximally aligned. */
+ − 580 return MAX_ALIGN_SIZE (max_offset + size_at_max);
+ − 581 }
+ − 582 }
+ − 583 #endif /* defined (USE_KKCC) || defined (PDUMP) */
+ − 584
3263
+ − 585 #ifdef NEW_GC
3092
+ − 586 #define GC_CHECK_NOT_FREE(lheader) \
+ − 587 gc_checking_assert (! LRECORD_FREE_P (lheader));
3263
+ − 588 #else /* not NEW_GC */
3092
+ − 589 #define GC_CHECK_NOT_FREE(lheader) \
+ − 590 gc_checking_assert (! LRECORD_FREE_P (lheader)); \
+ − 591 gc_checking_assert (LHEADER_IMPLEMENTATION (lheader)->basic_p || \
+ − 592 ! ((struct old_lcrecord_header *) lheader)->free)
3263
+ − 593 #endif /* not NEW_GC */
3092
+ − 594
+ − 595 #ifdef USE_KKCC
+ − 596 /* The following functions implement the new mark algorithm.
+ − 597 They mark objects according to their descriptions. They
+ − 598 are modeled on the corresponding pdumper procedures. */
+ − 599
+ − 600 #if 0
+ − 601 # define KKCC_STACK_AS_QUEUE 1
+ − 602 #endif
+ − 603
+ − 604 #ifdef DEBUG_XEMACS
+ − 605 /* The backtrace for the KKCC mark functions. */
+ − 606 #define KKCC_INIT_BT_STACK_SIZE 4096
+ − 607
+ − 608 typedef struct
+ − 609 {
+ − 610 void *obj;
+ − 611 const struct memory_description *desc;
+ − 612 int pos;
+ − 613 } kkcc_bt_stack_entry;
+ − 614
+ − 615 static kkcc_bt_stack_entry *kkcc_bt;
+ − 616 static int kkcc_bt_stack_size;
+ − 617 static int kkcc_bt_depth = 0;
+ − 618
+ − 619 static void
+ − 620 kkcc_bt_init (void)
+ − 621 {
+ − 622 kkcc_bt_depth = 0;
+ − 623 kkcc_bt_stack_size = KKCC_INIT_BT_STACK_SIZE;
+ − 624 kkcc_bt = (kkcc_bt_stack_entry *)
+ − 625 xmalloc_and_zero (kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
+ − 626 if (!kkcc_bt)
+ − 627 {
+ − 628 stderr_out ("KKCC backtrace stack init failed for size %d\n",
+ − 629 kkcc_bt_stack_size);
+ − 630 ABORT ();
+ − 631 }
+ − 632 }
+ − 633
+ − 634 void
+ − 635 kkcc_backtrace (void)
+ − 636 {
+ − 637 int i;
+ − 638 stderr_out ("KKCC mark stack backtrace :\n");
+ − 639 for (i = kkcc_bt_depth - 1; i >= 0; i--)
+ − 640 {
+ − 641 Lisp_Object obj = wrap_pointer_1 (kkcc_bt[i].obj);
+ − 642 stderr_out (" [%d]", i);
+ − 643 if ((XRECORD_LHEADER (obj)->type >= lrecord_type_last_built_in_type)
+ − 644 || (!LRECORDP (obj))
+ − 645 || (!XRECORD_LHEADER_IMPLEMENTATION (obj)))
+ − 646 {
+ − 647 stderr_out (" non Lisp Object");
+ − 648 }
+ − 649 else
+ − 650 {
+ − 651 stderr_out (" %s",
+ − 652 XRECORD_LHEADER_IMPLEMENTATION (obj)->name);
+ − 653 }
3519
+ − 654 stderr_out (" (addr: %p, desc: %p, ",
+ − 655 (void *) kkcc_bt[i].obj,
+ − 656 (void *) kkcc_bt[i].desc);
3092
+ − 657 if (kkcc_bt[i].pos >= 0)
+ − 658 stderr_out ("pos: %d)\n", kkcc_bt[i].pos);
+ − 659 else
+ − 660 if (kkcc_bt[i].pos == -1)
+ − 661 stderr_out ("root set)\n");
+ − 662 else if (kkcc_bt[i].pos == -2)
+ − 663 stderr_out ("dirty object)\n");
+ − 664 }
+ − 665 }
+ − 666
+ − 667 static void
+ − 668 kkcc_bt_stack_realloc (void)
+ − 669 {
+ − 670 kkcc_bt_stack_size *= 2;
+ − 671 kkcc_bt = (kkcc_bt_stack_entry *)
+ − 672 xrealloc (kkcc_bt, kkcc_bt_stack_size * sizeof (kkcc_bt_stack_entry));
+ − 673 if (!kkcc_bt)
+ − 674 {
+ − 675 stderr_out ("KKCC backtrace stack realloc failed for size %d\n",
+ − 676 kkcc_bt_stack_size);
+ − 677 ABORT ();
+ − 678 }
+ − 679 }
+ − 680
+ − 681 static void
+ − 682 kkcc_bt_free (void)
+ − 683 {
+ − 684 xfree_1 (kkcc_bt);
+ − 685 kkcc_bt = 0;
+ − 686 kkcc_bt_stack_size = 0;
+ − 687 }
+ − 688
+ − 689 static void
+ − 690 kkcc_bt_push (void *obj, const struct memory_description *desc,
+ − 691 int level, int pos)
+ − 692 {
+ − 693 kkcc_bt_depth = level;
+ − 694 kkcc_bt[kkcc_bt_depth].obj = obj;
+ − 695 kkcc_bt[kkcc_bt_depth].desc = desc;
+ − 696 kkcc_bt[kkcc_bt_depth].pos = pos;
+ − 697 kkcc_bt_depth++;
+ − 698 if (kkcc_bt_depth >= kkcc_bt_stack_size)
+ − 699 kkcc_bt_stack_realloc ();
+ − 700 }
+ − 701
+ − 702 #else /* not DEBUG_XEMACS */
+ − 703 #define kkcc_bt_init()
+ − 704 #define kkcc_bt_push(obj, desc, level, pos)
+ − 705 #endif /* not DEBUG_XEMACS */
+ − 706
+ − 707 /* Object memory descriptions are in the lrecord_implementation structure.
+ − 708 But copying them to a parallel array is much more cache-friendly. */
+ − 709 const struct memory_description *lrecord_memory_descriptions[countof (lrecord_implementations_table)];
+ − 710
+ − 711 /* the initial stack size in kkcc_gc_stack_entries */
+ − 712 #define KKCC_INIT_GC_STACK_SIZE 16384
+ − 713
+ − 714 typedef struct
+ − 715 {
+ − 716 void *data;
+ − 717 const struct memory_description *desc;
+ − 718 #ifdef DEBUG_XEMACS
+ − 719 int level;
+ − 720 int pos;
+ − 721 #endif
+ − 722 } kkcc_gc_stack_entry;
+ − 723
+ − 724
+ − 725 static kkcc_gc_stack_entry *kkcc_gc_stack_ptr;
+ − 726 static int kkcc_gc_stack_front;
+ − 727 static int kkcc_gc_stack_rear;
+ − 728 static int kkcc_gc_stack_size;
+ − 729
+ − 730 #define KKCC_INC(i) ((i + 1) % kkcc_gc_stack_size)
+ − 731 #define KKCC_INC2(i) ((i + 2) % kkcc_gc_stack_size)
+ − 732
+ − 733 #define KKCC_GC_STACK_FULL (KKCC_INC2 (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
+ − 734 #define KKCC_GC_STACK_EMPTY (KKCC_INC (kkcc_gc_stack_rear) == kkcc_gc_stack_front)
+ − 735
+ − 736 static void
+ − 737 kkcc_gc_stack_init (void)
+ − 738 {
+ − 739 kkcc_gc_stack_size = KKCC_INIT_GC_STACK_SIZE;
+ − 740 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
+ − 741 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
+ − 742 if (!kkcc_gc_stack_ptr)
+ − 743 {
+ − 744 stderr_out ("stack init failed for size %d\n", kkcc_gc_stack_size);
+ − 745 ABORT ();
+ − 746 }
+ − 747 kkcc_gc_stack_front = 0;
+ − 748 kkcc_gc_stack_rear = kkcc_gc_stack_size - 1;
+ − 749 }
+ − 750
+ − 751 static void
+ − 752 kkcc_gc_stack_free (void)
+ − 753 {
+ − 754 xfree_1 (kkcc_gc_stack_ptr);
+ − 755 kkcc_gc_stack_ptr = 0;
+ − 756 kkcc_gc_stack_front = 0;
+ − 757 kkcc_gc_stack_rear = 0;
+ − 758 kkcc_gc_stack_size = 0;
+ − 759 }
+ − 760
+ − 761 static void
+ − 762 kkcc_gc_stack_realloc (void)
+ − 763 {
+ − 764 kkcc_gc_stack_entry *old_ptr = kkcc_gc_stack_ptr;
+ − 765 int old_size = kkcc_gc_stack_size;
+ − 766 kkcc_gc_stack_size *= 2;
+ − 767 kkcc_gc_stack_ptr = (kkcc_gc_stack_entry *)
+ − 768 xmalloc_and_zero (kkcc_gc_stack_size * sizeof (kkcc_gc_stack_entry));
+ − 769 if (!kkcc_gc_stack_ptr)
+ − 770 {
+ − 771 stderr_out ("stack realloc failed for size %d\n", kkcc_gc_stack_size);
+ − 772 ABORT ();
+ − 773 }
+ − 774 if (kkcc_gc_stack_rear >= kkcc_gc_stack_front)
+ − 775 {
+ − 776 int number_elements = kkcc_gc_stack_rear - kkcc_gc_stack_front + 1;
+ − 777 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
+ − 778 number_elements * sizeof (kkcc_gc_stack_entry));
+ − 779 kkcc_gc_stack_front = 0;
+ − 780 kkcc_gc_stack_rear = number_elements - 1;
+ − 781 }
+ − 782 else
+ − 783 {
+ − 784 int number_elements = old_size - kkcc_gc_stack_front;
+ − 785 memcpy (kkcc_gc_stack_ptr, &old_ptr[kkcc_gc_stack_front],
+ − 786 number_elements * sizeof (kkcc_gc_stack_entry));
+ − 787 memcpy (&kkcc_gc_stack_ptr[number_elements], &old_ptr[0],
+ − 788 (kkcc_gc_stack_rear + 1) * sizeof (kkcc_gc_stack_entry));
+ − 789 kkcc_gc_stack_front = 0;
+ − 790 kkcc_gc_stack_rear = kkcc_gc_stack_rear + number_elements;
+ − 791 }
+ − 792 xfree_1 (old_ptr);
+ − 793 }
+ − 794
+ − 795 static void
+ − 796 #ifdef DEBUG_XEMACS
+ − 797 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc,
+ − 798 int level, int pos)
+ − 799 #else
+ − 800 kkcc_gc_stack_push_1 (void *data, const struct memory_description *desc)
+ − 801 #endif
+ − 802 {
+ − 803 #ifdef NEW_GC
+ − 804 GC_STAT_ENQUEUED;
+ − 805 #endif /* NEW_GC */
+ − 806 if (KKCC_GC_STACK_FULL)
+ − 807 kkcc_gc_stack_realloc();
+ − 808 kkcc_gc_stack_rear = KKCC_INC (kkcc_gc_stack_rear);
+ − 809 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].data = data;
+ − 810 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].desc = desc;
+ − 811 #ifdef DEBUG_XEMACS
+ − 812 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].level = level;
+ − 813 kkcc_gc_stack_ptr[kkcc_gc_stack_rear].pos = pos;
+ − 814 #endif
+ − 815 }
+ − 816
+ − 817 #ifdef DEBUG_XEMACS
+ − 818 #define kkcc_gc_stack_push(data, desc, level, pos) \
+ − 819 kkcc_gc_stack_push_1 (data, desc, level, pos)
+ − 820 #else
+ − 821 #define kkcc_gc_stack_push(data, desc, level, pos) \
+ − 822 kkcc_gc_stack_push_1 (data, desc)
+ − 823 #endif
+ − 824
+ − 825 static kkcc_gc_stack_entry *
+ − 826 kkcc_gc_stack_pop (void)
+ − 827 {
+ − 828 if (KKCC_GC_STACK_EMPTY)
+ − 829 return 0;
+ − 830 #ifdef NEW_GC
+ − 831 GC_STAT_DEQUEUED;
+ − 832 #endif /* NEW_GC */
+ − 833 #ifndef KKCC_STACK_AS_QUEUE
+ − 834 /* stack behaviour */
+ − 835 return &kkcc_gc_stack_ptr[kkcc_gc_stack_rear--];
+ − 836 #else
+ − 837 /* queue behaviour */
+ − 838 {
+ − 839 int old_front = kkcc_gc_stack_front;
+ − 840 kkcc_gc_stack_front = KKCC_INC (kkcc_gc_stack_front);
+ − 841 return &kkcc_gc_stack_ptr[old_front];
+ − 842 }
+ − 843 #endif
+ − 844 }
+ − 845
+ − 846 void
+ − 847 #ifdef DEBUG_XEMACS
+ − 848 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj, int level, int pos)
+ − 849 #else
+ − 850 kkcc_gc_stack_push_lisp_object_1 (Lisp_Object obj)
+ − 851 #endif
+ − 852 {
+ − 853 if (XTYPE (obj) == Lisp_Type_Record)
+ − 854 {
+ − 855 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
+ − 856 const struct memory_description *desc;
+ − 857 GC_CHECK_LHEADER_INVARIANTS (lheader);
+ − 858 desc = RECORD_DESCRIPTION (lheader);
+ − 859 if (! MARKED_RECORD_HEADER_P (lheader))
+ − 860 {
+ − 861 #ifdef NEW_GC
+ − 862 MARK_GREY (lheader);
+ − 863 #else /* not NEW_GC */
+ − 864 MARK_RECORD_HEADER (lheader);
+ − 865 #endif /* not NEW_GC */
+ − 866 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
+ − 867 }
+ − 868 }
+ − 869 }
+ − 870
+ − 871 #ifdef NEW_GC
+ − 872 #ifdef DEBUG_XEMACS
+ − 873 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
+ − 874 kkcc_gc_stack_push_lisp_object_1 (obj, level, pos)
+ − 875 #else
+ − 876 #define kkcc_gc_stack_push_lisp_object(obj, level, pos) \
+ − 877 kkcc_gc_stack_push_lisp_object_1 (obj)
+ − 878 #endif
+ − 879
+ − 880 void
+ − 881 #ifdef DEBUG_XEMACS
+ − 882 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj, int level, int pos)
+ − 883 #else
+ − 884 kkcc_gc_stack_repush_dirty_object_1 (Lisp_Object obj)
+ − 885 #endif
+ − 886 {
+ − 887 if (XTYPE (obj) == Lisp_Type_Record)
+ − 888 {
+ − 889 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
+ − 890 const struct memory_description *desc;
+ − 891 GC_STAT_REPUSHED;
+ − 892 GC_CHECK_LHEADER_INVARIANTS (lheader);
+ − 893 desc = RECORD_DESCRIPTION (lheader);
+ − 894 MARK_GREY (lheader);
+ − 895 kkcc_gc_stack_push ((void*) lheader, desc, level, pos);
+ − 896 }
+ − 897 }
+ − 898 #endif /* NEW_GC */
+ − 899
+ − 900 #ifdef ERROR_CHECK_GC
+ − 901 #define KKCC_DO_CHECK_FREE(obj, allow_free) \
+ − 902 do \
+ − 903 { \
+ − 904 if (!allow_free && XTYPE (obj) == Lisp_Type_Record) \
+ − 905 { \
+ − 906 struct lrecord_header *lheader = XRECORD_LHEADER (obj); \
+ − 907 GC_CHECK_NOT_FREE (lheader); \
+ − 908 } \
+ − 909 } while (0)
+ − 910 #else
+ − 911 #define KKCC_DO_CHECK_FREE(obj, allow_free)
+ − 912 #endif
+ − 913
+ − 914 #ifdef ERROR_CHECK_GC
+ − 915 #ifdef DEBUG_XEMACS
+ − 916 static void
+ − 917 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free,
+ − 918 int level, int pos)
+ − 919 #else
+ − 920 static void
+ − 921 mark_object_maybe_checking_free_1 (Lisp_Object obj, int allow_free)
+ − 922 #endif
+ − 923 {
+ − 924 KKCC_DO_CHECK_FREE (obj, allow_free);
+ − 925 kkcc_gc_stack_push_lisp_object (obj, level, pos);
+ − 926 }
+ − 927
+ − 928 #ifdef DEBUG_XEMACS
+ − 929 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
+ − 930 mark_object_maybe_checking_free_1 (obj, allow_free, level, pos)
+ − 931 #else
+ − 932 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
+ − 933 mark_object_maybe_checking_free_1 (obj, allow_free)
+ − 934 #endif
+ − 935 #else /* not ERROR_CHECK_GC */
+ − 936 #define mark_object_maybe_checking_free(obj, allow_free, level, pos) \
+ − 937 kkcc_gc_stack_push_lisp_object (obj, level, pos)
+ − 938 #endif /* not ERROR_CHECK_GC */
+ − 939
+ − 940
+ − 941 /* This function loops all elements of a struct pointer and calls
+ − 942 mark_with_description with each element. */
+ − 943 static void
+ − 944 #ifdef DEBUG_XEMACS
+ − 945 mark_struct_contents_1 (const void *data,
+ − 946 const struct sized_memory_description *sdesc,
+ − 947 int count, int level, int pos)
+ − 948 #else
+ − 949 mark_struct_contents_1 (const void *data,
+ − 950 const struct sized_memory_description *sdesc,
+ − 951 int count)
+ − 952 #endif
+ − 953 {
+ − 954 int i;
+ − 955 Bytecount elsize;
+ − 956 elsize = lispdesc_block_size (data, sdesc);
+ − 957
+ − 958 for (i = 0; i < count; i++)
+ − 959 {
+ − 960 kkcc_gc_stack_push (((char *) data) + elsize * i, sdesc->description,
+ − 961 level, pos);
+ − 962 }
+ − 963 }
+ − 964
+ − 965 #ifdef DEBUG_XEMACS
+ − 966 #define mark_struct_contents(data, sdesc, count, level, pos) \
+ − 967 mark_struct_contents_1 (data, sdesc, count, level, pos)
+ − 968 #else
+ − 969 #define mark_struct_contents(data, sdesc, count, level, pos) \
+ − 970 mark_struct_contents_1 (data, sdesc, count)
+ − 971 #endif
+ − 972
+ − 973
+ − 974 #ifdef NEW_GC
+ − 975 /* This function loops all elements of a struct pointer and calls
+ − 976 mark_with_description with each element. */
+ − 977 static void
+ − 978 #ifdef DEBUG_XEMACS
+ − 979 mark_lisp_object_block_contents_1 (const void *data,
+ − 980 const struct sized_memory_description *sdesc,
+ − 981 int count, int level, int pos)
+ − 982 #else
+ − 983 mark_lisp_object_block_contents_1 (const void *data,
+ − 984 const struct sized_memory_description *sdesc,
+ − 985 int count)
+ − 986 #endif
+ − 987 {
+ − 988 int i;
+ − 989 Bytecount elsize;
+ − 990 elsize = lispdesc_block_size (data, sdesc);
+ − 991
+ − 992 for (i = 0; i < count; i++)
+ − 993 {
+ − 994 const Lisp_Object obj = wrap_pointer_1 (((char *) data) + elsize * i);
+ − 995 if (XTYPE (obj) == Lisp_Type_Record)
+ − 996 {
+ − 997 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
+ − 998 const struct memory_description *desc;
+ − 999 GC_CHECK_LHEADER_INVARIANTS (lheader);
+ − 1000 desc = sdesc->description;
+ − 1001 if (! MARKED_RECORD_HEADER_P (lheader))
+ − 1002 {
+ − 1003 MARK_GREY (lheader);
+ − 1004 kkcc_gc_stack_push ((void *) lheader, desc, level, pos);
+ − 1005 }
+ − 1006 }
+ − 1007 }
+ − 1008 }
+ − 1009
+ − 1010 #ifdef DEBUG_XEMACS
+ − 1011 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
+ − 1012 mark_lisp_object_block_contents_1 (data, sdesc, count, level, pos)
+ − 1013 #else
+ − 1014 #define mark_lisp_object_block_contents(data, sdesc, count, level, pos) \
+ − 1015 mark_lisp_object_block_contents_1 (data, sdesc, count)
+ − 1016 #endif
+ − 1017 #endif /* not NEW_GC */
+ − 1018
+ − 1019 /* This function implements the KKCC mark algorithm.
+ − 1020 Instead of calling mark_object, all the alive Lisp_Objects are pushed
+ − 1021 on the kkcc_gc_stack. This function processes all elements on the stack
+ − 1022 according to their descriptions. */
+ − 1023 static void
+ − 1024 kkcc_marking (
+ − 1025 #ifdef NEW_GC
+ − 1026 int cnt
+ − 1027 #else /* not NEW_GC */
+ − 1028 int UNUSED(cnt)
+ − 1029 #endif /* not NEW_GC */
+ − 1030 )
+ − 1031 {
+ − 1032 kkcc_gc_stack_entry *stack_entry = 0;
+ − 1033 void *data = 0;
+ − 1034 const struct memory_description *desc = 0;
+ − 1035 int pos;
+ − 1036 #ifdef NEW_GC
+ − 1037 int count = cnt;
+ − 1038 #endif /* NEW_GC */
+ − 1039 #ifdef DEBUG_XEMACS
+ − 1040 int level = 0;
+ − 1041 #endif
+ − 1042
+ − 1043 while ((stack_entry = kkcc_gc_stack_pop ()) != 0)
+ − 1044 {
+ − 1045 data = stack_entry->data;
+ − 1046 desc = stack_entry->desc;
+ − 1047 #ifdef DEBUG_XEMACS
+ − 1048 level = stack_entry->level + 1;
+ − 1049 #endif
+ − 1050 kkcc_bt_push (data, desc, stack_entry->level, stack_entry->pos);
+ − 1051
+ − 1052 #ifdef NEW_GC
+ − 1053 /* Mark black if object is currently grey. This first checks,
+ − 1054 if the object is really allocated on the mc-heap. If it is,
+ − 1055 it can be marked black; if it is not, it cannot be marked. */
+ − 1056 maybe_mark_black (data);
+ − 1057 #endif /* NEW_GC */
+ − 1058
+ − 1059 if (!data) continue;
+ − 1060
+ − 1061 gc_checking_assert (data);
+ − 1062 gc_checking_assert (desc);
+ − 1063
+ − 1064 for (pos = 0; desc[pos].type != XD_END; pos++)
+ − 1065 {
+ − 1066 const struct memory_description *desc1 = &desc[pos];
+ − 1067 const void *rdata =
+ − 1068 (const char *) data + lispdesc_indirect_count (desc1->offset,
+ − 1069 desc, data);
+ − 1070 union_switcheroo:
+ − 1071
+ − 1072 /* If the flag says don't mark, then don't mark. */
+ − 1073 if ((desc1->flags) & XD_FLAG_NO_KKCC)
+ − 1074 continue;
+ − 1075
+ − 1076 switch (desc1->type)
+ − 1077 {
+ − 1078 case XD_BYTECOUNT:
+ − 1079 case XD_ELEMCOUNT:
+ − 1080 case XD_HASHCODE:
+ − 1081 case XD_INT:
+ − 1082 case XD_LONG:
+ − 1083 case XD_INT_RESET:
+ − 1084 case XD_LO_LINK:
+ − 1085 case XD_OPAQUE_PTR:
+ − 1086 case XD_OPAQUE_DATA_PTR:
+ − 1087 case XD_ASCII_STRING:
+ − 1088 case XD_DOC_STRING:
+ − 1089 break;
+ − 1090 case XD_LISP_OBJECT:
+ − 1091 {
+ − 1092 const Lisp_Object *stored_obj = (const Lisp_Object *) rdata;
+ − 1093
+ − 1094 /* Because of the way that tagged objects work (pointers and
+ − 1095 Lisp_Objects have the same representation), XD_LISP_OBJECT
+ − 1096 can be used for untagged pointers. They might be NULL,
+ − 1097 though. */
+ − 1098 if (EQ (*stored_obj, Qnull_pointer))
+ − 1099 break;
3263
+ − 1100 #ifdef NEW_GC
3092
+ − 1101 mark_object_maybe_checking_free (*stored_obj, 0, level, pos);
3263
+ − 1102 #else /* not NEW_GC */
3092
+ − 1103 mark_object_maybe_checking_free
+ − 1104 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
+ − 1105 level, pos);
3263
+ − 1106 #endif /* not NEW_GC */
3092
+ − 1107 break;
+ − 1108 }
+ − 1109 case XD_LISP_OBJECT_ARRAY:
+ − 1110 {
+ − 1111 int i;
+ − 1112 EMACS_INT count =
+ − 1113 lispdesc_indirect_count (desc1->data1, desc, data);
+ − 1114
+ − 1115 for (i = 0; i < count; i++)
+ − 1116 {
+ − 1117 const Lisp_Object *stored_obj =
+ − 1118 (const Lisp_Object *) rdata + i;
+ − 1119
+ − 1120 if (EQ (*stored_obj, Qnull_pointer))
+ − 1121 break;
3263
+ − 1122 #ifdef NEW_GC
3092
+ − 1123 mark_object_maybe_checking_free
+ − 1124 (*stored_obj, 0, level, pos);
3263
+ − 1125 #else /* not NEW_GC */
3092
+ − 1126 mark_object_maybe_checking_free
+ − 1127 (*stored_obj, (desc1->flags) & XD_FLAG_FREE_LISP_OBJECT,
+ − 1128 level, pos);
3263
+ − 1129 #endif /* not NEW_GC */
3092
+ − 1130 }
+ − 1131 break;
+ − 1132 }
+ − 1133 #ifdef NEW_GC
+ − 1134 case XD_LISP_OBJECT_BLOCK_PTR:
+ − 1135 {
+ − 1136 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
+ − 1137 data);
+ − 1138 const struct sized_memory_description *sdesc =
+ − 1139 lispdesc_indirect_description (data, desc1->data2.descr);
+ − 1140 const char *dobj = * (const char **) rdata;
+ − 1141 if (dobj)
+ − 1142 mark_lisp_object_block_contents
+ − 1143 (dobj, sdesc, count, level, pos);
+ − 1144 break;
+ − 1145 }
+ − 1146 #endif /* NEW_GC */
+ − 1147 case XD_BLOCK_PTR:
+ − 1148 {
+ − 1149 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
+ − 1150 data);
+ − 1151 const struct sized_memory_description *sdesc =
+ − 1152 lispdesc_indirect_description (data, desc1->data2.descr);
+ − 1153 const char *dobj = * (const char **) rdata;
+ − 1154 if (dobj)
+ − 1155 mark_struct_contents (dobj, sdesc, count, level, pos);
+ − 1156 break;
+ − 1157 }
+ − 1158 case XD_BLOCK_ARRAY:
+ − 1159 {
+ − 1160 EMACS_INT count = lispdesc_indirect_count (desc1->data1, desc,
+ − 1161 data);
+ − 1162 const struct sized_memory_description *sdesc =
+ − 1163 lispdesc_indirect_description (data, desc1->data2.descr);
+ − 1164
+ − 1165 mark_struct_contents (rdata, sdesc, count, level, pos);
+ − 1166 break;
+ − 1167 }
+ − 1168 case XD_UNION:
+ − 1169 case XD_UNION_DYNAMIC_SIZE:
+ − 1170 desc1 = lispdesc_process_xd_union (desc1, desc, data);
+ − 1171 if (desc1)
+ − 1172 goto union_switcheroo;
+ − 1173 break;
+ − 1174
+ − 1175 default:
+ − 1176 stderr_out ("Unsupported description type : %d\n", desc1->type);
+ − 1177 kkcc_backtrace ();
+ − 1178 ABORT ();
+ − 1179 }
+ − 1180 }
+ − 1181
+ − 1182 #ifdef NEW_GC
+ − 1183 if (cnt)
+ − 1184 if (!--count)
+ − 1185 break;
+ − 1186 #endif /* NEW_GC */
+ − 1187 }
+ − 1188 }
+ − 1189 #endif /* USE_KKCC */
+ − 1190
+ − 1191 /* I hate duplicating all this crap! */
+ − 1192 int
+ − 1193 marked_p (Lisp_Object obj)
+ − 1194 {
+ − 1195 /* Checks we used to perform. */
+ − 1196 /* if (EQ (obj, Qnull_pointer)) return 1; */
+ − 1197 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return 1; */
+ − 1198 /* if (PURIFIED (XPNTR (obj))) return 1; */
+ − 1199
+ − 1200 if (XTYPE (obj) == Lisp_Type_Record)
+ − 1201 {
+ − 1202 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
+ − 1203
+ − 1204 GC_CHECK_LHEADER_INVARIANTS (lheader);
+ − 1205
+ − 1206 return MARKED_RECORD_HEADER_P (lheader);
+ − 1207 }
+ − 1208 return 1;
+ − 1209 }
+ − 1210
+ − 1211
+ − 1212 /* Mark reference to a Lisp_Object. If the object referred to has not been
+ − 1213 seen yet, recursively mark all the references contained in it. */
+ − 1214 void
+ − 1215 mark_object (
+ − 1216 #ifdef USE_KKCC
+ − 1217 Lisp_Object UNUSED (obj)
+ − 1218 #else
+ − 1219 Lisp_Object obj
+ − 1220 #endif
+ − 1221 )
+ − 1222 {
+ − 1223 #ifdef USE_KKCC
+ − 1224 /* this code should never be reached when configured for KKCC */
+ − 1225 stderr_out ("KKCC: Invalid mark_object call.\n");
+ − 1226 stderr_out ("Replace mark_object with kkcc_gc_stack_push_lisp_object.\n");
+ − 1227 ABORT ();
+ − 1228 #else /* not USE_KKCC */
+ − 1229
+ − 1230 tail_recurse:
+ − 1231
+ − 1232 /* Checks we used to perform */
+ − 1233 /* if (EQ (obj, Qnull_pointer)) return; */
+ − 1234 /* if (!POINTER_TYPE_P (XGCTYPE (obj))) return; */
+ − 1235 /* if (PURIFIED (XPNTR (obj))) return; */
+ − 1236
+ − 1237 if (XTYPE (obj) == Lisp_Type_Record)
+ − 1238 {
+ − 1239 struct lrecord_header *lheader = XRECORD_LHEADER (obj);
+ − 1240
+ − 1241 GC_CHECK_LHEADER_INVARIANTS (lheader);
+ − 1242
+ − 1243 /* We handle this separately, above, so we can mark free objects */
+ − 1244 GC_CHECK_NOT_FREE (lheader);
+ − 1245
+ − 1246 /* All c_readonly objects have their mark bit set,
+ − 1247 so that we only need to check the mark bit here. */
+ − 1248 if (! MARKED_RECORD_HEADER_P (lheader))
+ − 1249 {
+ − 1250 MARK_RECORD_HEADER (lheader);
+ − 1251
+ − 1252 if (RECORD_MARKER (lheader))
+ − 1253 {
+ − 1254 obj = RECORD_MARKER (lheader) (obj);
+ − 1255 if (!NILP (obj)) goto tail_recurse;
+ − 1256 }
+ − 1257 }
+ − 1258 }
+ − 1259 #endif /* not KKCC */
+ − 1260 }
+ − 1261
+ − 1262
+ − 1263 /************************************************************************/
+ − 1264 /* Hooks */
+ − 1265 /************************************************************************/
+ − 1266
+ − 1267 /* Nonzero when calling certain hooks or doing other things where a GC
+ − 1268 would be bad. It prevents infinite recursive calls to gc. */
+ − 1269 int gc_currently_forbidden;
+ − 1270
+ − 1271 int
+ − 1272 begin_gc_forbidden (void)
+ − 1273 {
+ − 1274 return internal_bind_int (&gc_currently_forbidden, 1);
+ − 1275 }
+ − 1276
+ − 1277 void
+ − 1278 end_gc_forbidden (int count)
+ − 1279 {
+ − 1280 unbind_to (count);
+ − 1281 }
+ − 1282
+ − 1283 /* Hooks. */
+ − 1284 Lisp_Object Vpre_gc_hook, Qpre_gc_hook;
+ − 1285 Lisp_Object Vpost_gc_hook, Qpost_gc_hook;
+ − 1286
+ − 1287 /* Maybe we want to use this when doing a "panic" gc after memory_full()? */
+ − 1288 static int gc_hooks_inhibited;
+ − 1289
+ − 1290 struct post_gc_action
+ − 1291 {
+ − 1292 void (*fun) (void *);
+ − 1293 void *arg;
+ − 1294 };
+ − 1295
+ − 1296 typedef struct post_gc_action post_gc_action;
+ − 1297
+ − 1298 typedef struct
+ − 1299 {
+ − 1300 Dynarr_declare (post_gc_action);
+ − 1301 } post_gc_action_dynarr;
+ − 1302
+ − 1303 static post_gc_action_dynarr *post_gc_actions;
+ − 1304
+ − 1305 /* Register an action to be called at the end of GC.
+ − 1306 gc_in_progress is 0 when this is called.
+ − 1307 This is used when it is discovered that an action needs to be taken,
+ − 1308 but it's during GC, so it's not safe. (e.g. in a finalize method.)
+ − 1309
+ − 1310 As a general rule, do not use Lisp objects here.
+ − 1311 And NEVER signal an error.
+ − 1312 */
+ − 1313
+ − 1314 void
+ − 1315 register_post_gc_action (void (*fun) (void *), void *arg)
+ − 1316 {
+ − 1317 post_gc_action action;
+ − 1318
+ − 1319 if (!post_gc_actions)
+ − 1320 post_gc_actions = Dynarr_new (post_gc_action);
+ − 1321
+ − 1322 action.fun = fun;
+ − 1323 action.arg = arg;
+ − 1324
+ − 1325 Dynarr_add (post_gc_actions, action);
+ − 1326 }
+ − 1327
+ − 1328 static void
+ − 1329 run_post_gc_actions (void)
+ − 1330 {
+ − 1331 int i;
+ − 1332
+ − 1333 if (post_gc_actions)
+ − 1334 {
+ − 1335 for (i = 0; i < Dynarr_length (post_gc_actions); i++)
+ − 1336 {
+ − 1337 post_gc_action action = Dynarr_at (post_gc_actions, i);
+ − 1338 (action.fun) (action.arg);
+ − 1339 }
+ − 1340
+ − 1341 Dynarr_reset (post_gc_actions);
+ − 1342 }
+ − 1343 }
+ − 1344
3263
+ − 1345 #ifdef NEW_GC
+ − 1346 /* Asynchronous finalization. */
+ − 1347 typedef struct finalize_elem
+ − 1348 {
+ − 1349 Lisp_Object obj;
+ − 1350 struct finalize_elem *next;
+ − 1351 } finalize_elem;
+ − 1352
+ − 1353 finalize_elem *Vall_finalizable_objs;
+ − 1354 Lisp_Object Vfinalizers_to_run;
+ − 1355
+ − 1356 void
+ − 1357 add_finalizable_obj (Lisp_Object obj)
+ − 1358 {
+ − 1359 finalize_elem *next = Vall_finalizable_objs;
+ − 1360 Vall_finalizable_objs =
+ − 1361 (finalize_elem *) xmalloc_and_zero (sizeof (finalize_elem));
+ − 1362 Vall_finalizable_objs->obj = obj;
+ − 1363 Vall_finalizable_objs->next = next;
+ − 1364 }
+ − 1365
+ − 1366 void
+ − 1367 register_for_finalization (void)
+ − 1368 {
+ − 1369 finalize_elem *rest = Vall_finalizable_objs;
+ − 1370
+ − 1371 if (!rest)
+ − 1372 return;
+ − 1373
+ − 1374 while (!marked_p (rest->obj))
+ − 1375 {
+ − 1376 finalize_elem *temp = rest;
+ − 1377 Vfinalizers_to_run = Fcons (rest->obj, Vfinalizers_to_run);
+ − 1378 Vall_finalizable_objs = rest->next;
+ − 1379 xfree (temp, finalize_elem *);
+ − 1380 rest = Vall_finalizable_objs;
+ − 1381 }
+ − 1382
+ − 1383 while (rest->next)
+ − 1384 {
+ − 1385 if (LRECORDP (rest->next->obj)
+ − 1386 && !marked_p (rest->next->obj))
+ − 1387 {
+ − 1388 finalize_elem *temp = rest->next;
+ − 1389 Vfinalizers_to_run = Fcons (rest->next->obj, Vfinalizers_to_run);
+ − 1390 rest->next = rest->next->next;
+ − 1391 xfree (temp, finalize_elem *);
+ − 1392 }
+ − 1393 else
+ − 1394 {
+ − 1395 rest = rest->next;
+ − 1396 }
+ − 1397 }
+ − 1398 /* Keep objects alive that need to be finalized by marking
+ − 1399 Vfinalizers_to_run transitively. */
+ − 1400 kkcc_gc_stack_push_lisp_object (Vfinalizers_to_run, 0, -1);
+ − 1401 kkcc_marking (0);
+ − 1402 }
+ − 1403
+ − 1404 void
+ − 1405 run_finalizers (void)
+ − 1406 {
+ − 1407 Lisp_Object rest;
+ − 1408 for (rest = Vfinalizers_to_run; !NILP (rest); rest = XCDR (rest))
+ − 1409 {
+ − 1410 MC_ALLOC_CALL_FINALIZER (XPNTR (XCAR (rest)));
+ − 1411 }
+ − 1412 Vfinalizers_to_run = Qnil;
+ − 1413 }
+ − 1414 #endif /* not NEW_GC */
3092
+ − 1415
+ − 1416
+ − 1417 /************************************************************************/
+ − 1418 /* Garbage Collection */
+ − 1419 /************************************************************************/
+ − 1420
+ − 1421 /* Enable/disable incremental garbage collection during runtime. */
+ − 1422 int allow_incremental_gc;
+ − 1423
+ − 1424 /* For profiling. */
+ − 1425 static Lisp_Object QSin_garbage_collection;
+ − 1426
+ − 1427 /* Nonzero means display messages at beginning and end of GC. */
+ − 1428 int garbage_collection_messages;
+ − 1429
+ − 1430 /* "Garbage collecting" */
+ − 1431 Lisp_Object Vgc_message;
+ − 1432 Lisp_Object Vgc_pointer_glyph;
+ − 1433 static const Ascbyte gc_default_message[] = "Garbage collecting";
+ − 1434 Lisp_Object Qgarbage_collecting;
+ − 1435
+ − 1436 /* "Locals" during GC. */
+ − 1437 struct frame *f;
+ − 1438 int speccount;
+ − 1439 int cursor_changed;
+ − 1440 Lisp_Object pre_gc_cursor;
+ − 1441
+ − 1442 /* PROFILE_DECLARE */
+ − 1443 int do_backtrace;
+ − 1444 struct backtrace backtrace;
+ − 1445
+ − 1446 /* Maximum amount of C stack to save when a GC happens. */
+ − 1447 #ifndef MAX_SAVE_STACK
+ − 1448 #define MAX_SAVE_STACK 0 /* 16000 */
+ − 1449 #endif
+ − 1450
+ − 1451 void
3267
+ − 1452 show_gc_cursor_and_message (void)
3092
+ − 1453 {
3267
+ − 1454 /* Now show the GC cursor/message. */
+ − 1455 pre_gc_cursor = Qnil;
+ − 1456 cursor_changed = 0;
3092
+ − 1457
+ − 1458 /* We used to call selected_frame() here.
+ − 1459
+ − 1460 The following functions cannot be called inside GC
+ − 1461 so we move to after the above tests. */
+ − 1462 {
+ − 1463 Lisp_Object frame;
+ − 1464 Lisp_Object device = Fselected_device (Qnil);
+ − 1465 if (NILP (device)) /* Could happen during startup, eg. if always_gc */
+ − 1466 return;
+ − 1467 frame = Fselected_frame (device);
+ − 1468 if (NILP (frame))
+ − 1469 invalid_state ("No frames exist on device", device);
+ − 1470 f = XFRAME (frame);
+ − 1471 }
+ − 1472
+ − 1473 if (!noninteractive)
+ − 1474 {
+ − 1475 if (FRAME_WIN_P (f))
+ − 1476 {
+ − 1477 Lisp_Object frame = wrap_frame (f);
+ − 1478 Lisp_Object cursor = glyph_image_instance (Vgc_pointer_glyph,
+ − 1479 FRAME_SELECTED_WINDOW (f),
+ − 1480 ERROR_ME_NOT, 1);
+ − 1481 pre_gc_cursor = f->pointer;
+ − 1482 if (POINTER_IMAGE_INSTANCEP (cursor)
+ − 1483 /* don't change if we don't know how to change back. */
+ − 1484 && POINTER_IMAGE_INSTANCEP (pre_gc_cursor))
+ − 1485 {
+ − 1486 cursor_changed = 1;
+ − 1487 Fset_frame_pointer (frame, cursor);
+ − 1488 }
+ − 1489 }
+ − 1490
+ − 1491 /* Don't print messages to the stream device. */
+ − 1492 if (!cursor_changed && !FRAME_STREAM_P (f))
+ − 1493 {
+ − 1494 if (garbage_collection_messages)
+ − 1495 {
+ − 1496 Lisp_Object args[2], whole_msg;
+ − 1497 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
+ − 1498 build_msg_string (gc_default_message));
+ − 1499 args[1] = build_string ("...");
+ − 1500 whole_msg = Fconcat (2, args);
+ − 1501 echo_area_message (f, (Ibyte *) 0, whole_msg, 0, -1,
+ − 1502 Qgarbage_collecting);
+ − 1503 }
+ − 1504 }
+ − 1505 }
3267
+ − 1506 }
+ − 1507
+ − 1508 void
+ − 1509 remove_gc_cursor_and_message (void)
+ − 1510 {
+ − 1511 /* Now remove the GC cursor/message */
+ − 1512 if (!noninteractive)
+ − 1513 {
+ − 1514 if (cursor_changed)
+ − 1515 Fset_frame_pointer (wrap_frame (f), pre_gc_cursor);
+ − 1516 else if (!FRAME_STREAM_P (f))
+ − 1517 {
+ − 1518 /* Show "...done" only if the echo area would otherwise be empty. */
+ − 1519 if (NILP (clear_echo_area (selected_frame (),
+ − 1520 Qgarbage_collecting, 0)))
+ − 1521 {
+ − 1522 if (garbage_collection_messages)
+ − 1523 {
+ − 1524 Lisp_Object args[2], whole_msg;
+ − 1525 args[0] = (STRINGP (Vgc_message) ? Vgc_message :
+ − 1526 build_msg_string (gc_default_message));
+ − 1527 args[1] = build_msg_string ("... done");
+ − 1528 whole_msg = Fconcat (2, args);
+ − 1529 echo_area_message (selected_frame (), (Ibyte *) 0,
+ − 1530 whole_msg, 0, -1,
+ − 1531 Qgarbage_collecting);
+ − 1532 }
+ − 1533 }
+ − 1534 }
+ − 1535 }
+ − 1536 }
+ − 1537
+ − 1538 void
+ − 1539 gc_prepare (void)
+ − 1540 {
+ − 1541 #if MAX_SAVE_STACK > 0
+ − 1542 char stack_top_variable;
+ − 1543 extern char *stack_bottom;
+ − 1544 #endif
+ − 1545
+ − 1546 #ifdef NEW_GC
+ − 1547 GC_STAT_START_NEW_GC;
+ − 1548 GC_SET_PHASE (INIT_GC);
+ − 1549 #endif /* NEW_GC */
+ − 1550
+ − 1551 do_backtrace = profiling_active || backtrace_with_internal_sections;
+ − 1552
+ − 1553 assert (!gc_in_progress);
+ − 1554 assert (!in_display || gc_currently_forbidden);
+ − 1555
+ − 1556 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
+ − 1557
+ − 1558 need_to_signal_post_gc = 0;
+ − 1559 recompute_funcall_allocation_flag ();
+ − 1560
+ − 1561 if (!gc_hooks_inhibited)
+ − 1562 run_hook_trapping_problems
+ − 1563 (Qgarbage_collecting, Qpre_gc_hook,
+ − 1564 INHIBIT_EXISTING_PERMANENT_DISPLAY_OBJECT_DELETION);
3092
+ − 1565
+ − 1566 /***** Now we actually start the garbage collection. */
+ − 1567
+ − 1568 gc_in_progress = 1;
+ − 1569 #ifndef NEW_GC
+ − 1570 inhibit_non_essential_conversion_operations = 1;
3263
+ − 1571 #endif /* not NEW_GC */
3092
+ − 1572
+ − 1573 #if MAX_SAVE_STACK > 0
+ − 1574
+ − 1575 /* Save a copy of the contents of the stack, for debugging. */
+ − 1576 if (!purify_flag)
+ − 1577 {
+ − 1578 /* Static buffer in which we save a copy of the C stack at each GC. */
+ − 1579 static char *stack_copy;
+ − 1580 static Bytecount stack_copy_size;
+ − 1581
+ − 1582 ptrdiff_t stack_diff = &stack_top_variable - stack_bottom;
+ − 1583 Bytecount stack_size = (stack_diff > 0 ? stack_diff : -stack_diff);
+ − 1584 if (stack_size < MAX_SAVE_STACK)
+ − 1585 {
+ − 1586 if (stack_copy_size < stack_size)
+ − 1587 {
+ − 1588 stack_copy = (char *) xrealloc (stack_copy, stack_size);
+ − 1589 stack_copy_size = stack_size;
+ − 1590 }
+ − 1591
+ − 1592 memcpy (stack_copy,
+ − 1593 stack_diff > 0 ? stack_bottom : &stack_top_variable,
+ − 1594 stack_size);
+ − 1595 }
+ − 1596 }
+ − 1597 #endif /* MAX_SAVE_STACK > 0 */
+ − 1598
+ − 1599 /* Do some totally ad-hoc resource clearing. */
+ − 1600 /* #### generalize this? */
+ − 1601 clear_event_resource ();
+ − 1602 cleanup_specifiers ();
+ − 1603 cleanup_buffer_undo_lists ();
+ − 1604 }
+ − 1605
+ − 1606 void
+ − 1607 gc_mark_root_set (
+ − 1608 #ifdef NEW_GC
+ − 1609 enum gc_phase phase
+ − 1610 #else /* not NEW_GC */
+ − 1611 void
+ − 1612 #endif /* not NEW_GC */
+ − 1613 )
+ − 1614 {
+ − 1615 #ifdef NEW_GC
+ − 1616 GC_SET_PHASE (phase);
+ − 1617 #endif /* NEW_GC */
+ − 1618
+ − 1619 /* Mark all the special slots that serve as the roots of accessibility. */
+ − 1620
+ − 1621 #ifdef USE_KKCC
+ − 1622 # define mark_object(obj) kkcc_gc_stack_push_lisp_object (obj, 0, -1)
+ − 1623 #endif /* USE_KKCC */
+ − 1624
+ − 1625 { /* staticpro() */
+ − 1626 Lisp_Object **p = Dynarr_begin (staticpros);
+ − 1627 Elemcount count;
3486
+ − 1628 for (count = Dynarr_length (staticpros); count; count--, p++)
3092
+ − 1629 /* Need to check if the pointer in the staticpro array is not
+ − 1630 NULL. A gc can occur after variable is added to the staticpro
+ − 1631 array and _before_ it is correctly initialized. In this case
+ − 1632 its value is NULL, which we have to catch here. */
+ − 1633 if (*p)
3486
+ − 1634 mark_object (**p);
3092
+ − 1635 }
+ − 1636
+ − 1637 { /* staticpro_nodump() */
+ − 1638 Lisp_Object **p = Dynarr_begin (staticpros_nodump);
+ − 1639 Elemcount count;
3486
+ − 1640 for (count = Dynarr_length (staticpros_nodump); count; count--, p++)
3092
+ − 1641 /* Need to check if the pointer in the staticpro array is not
+ − 1642 NULL. A gc can occur after variable is added to the staticpro
+ − 1643 array and _before_ it is correctly initialized. In this case
+ − 1644 its value is NULL, which we have to catch here. */
+ − 1645 if (*p)
3486
+ − 1646 mark_object (**p);
3092
+ − 1647 }
+ − 1648
3263
+ − 1649 #ifdef NEW_GC
3092
+ − 1650 { /* mcpro () */
+ − 1651 Lisp_Object *p = Dynarr_begin (mcpros);
+ − 1652 Elemcount count;
+ − 1653 for (count = Dynarr_length (mcpros); count; count--)
+ − 1654 mark_object (*p++);
+ − 1655 }
3263
+ − 1656 #endif /* NEW_GC */
3092
+ − 1657
+ − 1658 { /* GCPRO() */
+ − 1659 struct gcpro *tail;
+ − 1660 int i;
+ − 1661 for (tail = gcprolist; tail; tail = tail->next)
+ − 1662 for (i = 0; i < tail->nvars; i++)
+ − 1663 mark_object (tail->var[i]);
+ − 1664 }
+ − 1665
+ − 1666 { /* specbind() */
+ − 1667 struct specbinding *bind;
+ − 1668 for (bind = specpdl; bind != specpdl_ptr; bind++)
+ − 1669 {
+ − 1670 mark_object (bind->symbol);
+ − 1671 mark_object (bind->old_value);
+ − 1672 }
+ − 1673 }
+ − 1674
+ − 1675 {
+ − 1676 struct catchtag *c;
+ − 1677 for (c = catchlist; c; c = c->next)
+ − 1678 {
+ − 1679 mark_object (c->tag);
+ − 1680 mark_object (c->val);
+ − 1681 mark_object (c->actual_tag);
+ − 1682 mark_object (c->backtrace);
+ − 1683 }
+ − 1684 }
+ − 1685
+ − 1686 {
+ − 1687 struct backtrace *backlist;
+ − 1688 for (backlist = backtrace_list; backlist; backlist = backlist->next)
+ − 1689 {
+ − 1690 int nargs = backlist->nargs;
+ − 1691 int i;
+ − 1692
+ − 1693 mark_object (*backlist->function);
+ − 1694 if (nargs < 0 /* nargs == UNEVALLED || nargs == MANY */
+ − 1695 /* might be fake (internal profiling entry) */
+ − 1696 && backlist->args)
+ − 1697 mark_object (backlist->args[0]);
+ − 1698 else
+ − 1699 for (i = 0; i < nargs; i++)
+ − 1700 mark_object (backlist->args[i]);
+ − 1701 }
+ − 1702 }
+ − 1703
+ − 1704 mark_profiling_info ();
+ − 1705 #ifdef USE_KKCC
+ − 1706 # undef mark_object
+ − 1707 #endif
+ − 1708 }
+ − 1709
+ − 1710 void
+ − 1711 gc_finish_mark (void)
+ − 1712 {
+ − 1713 #ifdef NEW_GC
+ − 1714 GC_SET_PHASE (FINISH_MARK);
+ − 1715 #endif /* NEW_GC */
+ − 1716 init_marking_ephemerons ();
+ − 1717
+ − 1718 while (finish_marking_weak_hash_tables () > 0 ||
+ − 1719 finish_marking_weak_lists () > 0 ||
+ − 1720 continue_marking_ephemerons () > 0)
+ − 1721 #ifdef USE_KKCC
+ − 1722 {
+ − 1723 kkcc_marking (0);
+ − 1724 }
+ − 1725 #else /* not USE_KKCC */
+ − 1726 ;
+ − 1727 #endif /* not USE_KKCC */
+ − 1728
+ − 1729 /* At this point, we know which objects need to be finalized: we
+ − 1730 still need to resurrect them */
+ − 1731
+ − 1732 while (finish_marking_ephemerons () > 0 ||
+ − 1733 finish_marking_weak_lists () > 0 ||
+ − 1734 finish_marking_weak_hash_tables () > 0)
+ − 1735 #ifdef USE_KKCC
+ − 1736 {
+ − 1737 kkcc_marking (0);
+ − 1738 }
+ − 1739 #else /* not USE_KKCC */
+ − 1740 ;
+ − 1741 #endif /* not USE_KKCC */
+ − 1742
+ − 1743 /* And prune (this needs to be called after everything else has been
+ − 1744 marked and before we do any sweeping). */
+ − 1745 /* #### this is somewhat ad-hoc and should probably be an object
+ − 1746 method */
+ − 1747 prune_weak_hash_tables ();
+ − 1748 prune_weak_lists ();
+ − 1749 prune_specifiers ();
+ − 1750 prune_syntax_tables ();
+ − 1751
+ − 1752 prune_ephemerons ();
+ − 1753 prune_weak_boxes ();
+ − 1754 }
+ − 1755
+ − 1756 #ifdef NEW_GC
+ − 1757 void
+ − 1758 gc_finalize (void)
+ − 1759 {
+ − 1760 GC_SET_PHASE (FINALIZE);
3263
+ − 1761 register_for_finalization ();
3092
+ − 1762 }
+ − 1763
+ − 1764 void
+ − 1765 gc_sweep (void)
+ − 1766 {
+ − 1767 GC_SET_PHASE (SWEEP);
+ − 1768 mc_sweep ();
+ − 1769 }
+ − 1770 #endif /* NEW_GC */
+ − 1771
+ − 1772
+ − 1773 void
+ − 1774 gc_finish (void)
+ − 1775 {
+ − 1776 #ifdef NEW_GC
+ − 1777 GC_SET_PHASE (FINISH_GC);
+ − 1778 #endif /* NEW_GC */
+ − 1779 consing_since_gc = 0;
+ − 1780 #ifndef DEBUG_XEMACS
+ − 1781 /* Allow you to set it really fucking low if you really want ... */
+ − 1782 if (gc_cons_threshold < 10000)
+ − 1783 gc_cons_threshold = 10000;
+ − 1784 #endif
+ − 1785 recompute_need_to_garbage_collect ();
+ − 1786
+ − 1787 #ifndef NEW_GC
+ − 1788 inhibit_non_essential_conversion_operations = 0;
+ − 1789 #endif /* not NEW_GC */
+ − 1790 gc_in_progress = 0;
+ − 1791
+ − 1792 run_post_gc_actions ();
+ − 1793
+ − 1794 /******* End of garbage collection ********/
+ − 1795
3263
+ − 1796 #ifndef NEW_GC
3092
+ − 1797 if (!breathing_space)
+ − 1798 {
+ − 1799 breathing_space = malloc (4096 - MALLOC_OVERHEAD);
+ − 1800 }
3263
+ − 1801 #endif /* not NEW_GC */
3092
+ − 1802
+ − 1803 need_to_signal_post_gc = 1;
+ − 1804 funcall_allocation_flag = 1;
+ − 1805
+ − 1806 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
+ − 1807
+ − 1808 #ifdef NEW_GC
+ − 1809 GC_SET_PHASE (NONE);
+ − 1810 #endif /* NEW_GC */
+ − 1811 }
+ − 1812
+ − 1813 #ifdef NEW_GC
+ − 1814 void
+ − 1815 gc_suspend_mark_phase (void)
+ − 1816 {
+ − 1817 PROFILE_RECORD_EXITING_SECTION (QSin_garbage_collection);
+ − 1818 write_barrier_enabled = 1;
+ − 1819 consing_since_gc = 0;
+ − 1820 vdb_start_dirty_bits_recording ();
+ − 1821 }
+ − 1822
+ − 1823 int
+ − 1824 gc_resume_mark_phase (void)
+ − 1825 {
+ − 1826 PROFILE_RECORD_ENTERING_SECTION (QSin_garbage_collection);
+ − 1827 assert (write_barrier_enabled);
+ − 1828 vdb_stop_dirty_bits_recording ();
+ − 1829 write_barrier_enabled = 0;
+ − 1830 return vdb_read_dirty_bits ();
+ − 1831 }
+ − 1832
+ − 1833 int
+ − 1834 gc_mark (int incremental)
+ − 1835 {
+ − 1836 GC_SET_PHASE (MARK);
+ − 1837 if (!incremental)
+ − 1838 {
+ − 1839 kkcc_marking (0);
+ − 1840 }
+ − 1841 else
+ − 1842 {
+ − 1843 kkcc_marking (gc_incremental_traversal_threshold);
+ − 1844 if (!KKCC_GC_STACK_EMPTY)
+ − 1845 {
+ − 1846 gc_suspend_mark_phase ();
+ − 1847 return 0;
+ − 1848 }
+ − 1849 }
+ − 1850 return 1;
+ − 1851 }
+ − 1852
+ − 1853 int
+ − 1854 gc_resume_mark (int incremental)
+ − 1855 {
+ − 1856 if (!incremental)
+ − 1857 {
+ − 1858 if (!KKCC_GC_STACK_EMPTY)
+ − 1859 {
+ − 1860 GC_STAT_RESUME_GC;
+ − 1861 /* An incremental garbage collection is already running ---
+ − 1862 now wrap it up and resume it atomically. */
+ − 1863 gc_resume_mark_phase ();
+ − 1864 gc_mark_root_set (REPUSH_ROOT_SET);
+ − 1865 kkcc_marking (0);
+ − 1866 }
+ − 1867 }
+ − 1868 else
+ − 1869 {
+ − 1870 int repushed_objects;
+ − 1871 int mark_work;
+ − 1872 GC_STAT_RESUME_GC;
+ − 1873 repushed_objects = gc_resume_mark_phase ();
+ − 1874 mark_work = (gc_incremental_traversal_threshold > repushed_objects) ?
+ − 1875 gc_incremental_traversal_threshold : repushed_objects;
+ − 1876 kkcc_marking (mark_work);
+ − 1877 if (KKCC_GC_STACK_EMPTY)
+ − 1878 {
+ − 1879 /* Mark root set again and finish up marking. */
+ − 1880 gc_mark_root_set (REPUSH_ROOT_SET);
+ − 1881 kkcc_marking (0);
+ − 1882 }
+ − 1883 else
+ − 1884 {
+ − 1885 gc_suspend_mark_phase ();
+ − 1886 return 0;
+ − 1887 }
+ − 1888 }
+ − 1889 return 1;
+ − 1890 }
+ − 1891
+ − 1892
+ − 1893 void
+ − 1894 gc_1 (int incremental)
+ − 1895 {
+ − 1896 switch (GC_PHASE)
+ − 1897 {
+ − 1898 case NONE:
+ − 1899 gc_prepare ();
+ − 1900 kkcc_gc_stack_init();
+ − 1901 #ifdef DEBUG_XEMACS
+ − 1902 kkcc_bt_init ();
+ − 1903 #endif
+ − 1904 case INIT_GC:
+ − 1905 gc_mark_root_set (PUSH_ROOT_SET);
+ − 1906 case PUSH_ROOT_SET:
+ − 1907 if (!gc_mark (incremental))
+ − 1908 return; /* suspend gc */
+ − 1909 case MARK:
+ − 1910 if (!KKCC_GC_STACK_EMPTY)
+ − 1911 if (!gc_resume_mark (incremental))
+ − 1912 return; /* suspend gc */
+ − 1913 gc_finish_mark ();
3263
+ − 1914 case FINISH_MARK:
+ − 1915 gc_finalize ();
3092
+ − 1916 kkcc_gc_stack_free ();
+ − 1917 #ifdef DEBUG_XEMACS
+ − 1918 kkcc_bt_free ();
+ − 1919 #endif
+ − 1920 case FINALIZE:
+ − 1921 gc_sweep ();
+ − 1922 case SWEEP:
+ − 1923 gc_finish ();
+ − 1924 case FINISH_GC:
+ − 1925 break;
+ − 1926 }
+ − 1927 }
+ − 1928
+ − 1929 void gc (int incremental)
+ − 1930 {
+ − 1931 if (gc_currently_forbidden
+ − 1932 || in_display
+ − 1933 || preparing_for_armageddon)
+ − 1934 return;
+ − 1935
+ − 1936 /* Very important to prevent GC during any of the following
+ − 1937 stuff that might run Lisp code; otherwise, we'll likely
+ − 1938 have infinite GC recursion. */
+ − 1939 speccount = begin_gc_forbidden ();
+ − 1940
3267
+ − 1941 show_gc_cursor_and_message ();
+ − 1942
3092
+ − 1943 gc_1 (incremental);
+ − 1944
3267
+ − 1945 remove_gc_cursor_and_message ();
+ − 1946
3092
+ − 1947 /* now stop inhibiting GC */
+ − 1948 unbind_to (speccount);
+ − 1949 }
+ − 1950
+ − 1951 void
+ − 1952 gc_full (void)
+ − 1953 {
+ − 1954 gc (0);
+ − 1955 }
+ − 1956
+ − 1957 DEFUN ("gc-full", Fgc_full, 0, 0, "", /*
+ − 1958 This function performs a full garbage collection. If an incremental
+ − 1959 garbage collection is already running, it completes without any
+ − 1960 further interruption. This function guarantees that unused objects
+ − 1961 are freed when it returns. Garbage collection happens automatically if
+ − 1962 the client allocates more than `gc-cons-threshold' bytes of Lisp data
+ − 1963 since the previous garbage collection.
+ − 1964 */
+ − 1965 ())
+ − 1966 {
+ − 1967 gc_full ();
+ − 1968 return Qt;
+ − 1969 }
+ − 1970
+ − 1971 void
+ − 1972 gc_incremental (void)
+ − 1973 {
+ − 1974 gc (allow_incremental_gc);
+ − 1975 }
+ − 1976
+ − 1977 DEFUN ("gc-incremental", Fgc_incremental, 0, 0, "", /*
+ − 1978 This function starts an incremental garbage collection. If an
+ − 1979 incremental garbage collection is already running, the next cycle
+ − 1980 starts. Note that this function has not necessarily freed any memory
+ − 1981 when it returns. This function only guarantees, that the traversal of
+ − 1982 the heap makes progress. The next cycle of incremental garbage
+ − 1983 collection happens automatically if the client allocates more than
+ − 1984 `gc-incremental-cons-threshold' bytes of Lisp data since previous
+ − 1985 garbage collection.
+ − 1986 */
+ − 1987 ())
+ − 1988 {
+ − 1989 gc_incremental ();
+ − 1990 return Qt;
+ − 1991 }
+ − 1992 #else /* not NEW_GC */
+ − 1993 void garbage_collect_1 (void)
+ − 1994 {
+ − 1995 if (gc_in_progress
+ − 1996 || gc_currently_forbidden
+ − 1997 || in_display
+ − 1998 || preparing_for_armageddon)
+ − 1999 return;
+ − 2000
+ − 2001 /* Very important to prevent GC during any of the following
+ − 2002 stuff that might run Lisp code; otherwise, we'll likely
+ − 2003 have infinite GC recursion. */
+ − 2004 speccount = begin_gc_forbidden ();
+ − 2005
3267
+ − 2006 show_gc_cursor_and_message ();
+ − 2007
3092
+ − 2008 gc_prepare ();
+ − 2009 #ifdef USE_KKCC
+ − 2010 kkcc_gc_stack_init();
+ − 2011 #ifdef DEBUG_XEMACS
+ − 2012 kkcc_bt_init ();
+ − 2013 #endif
+ − 2014 #endif /* USE_KKCC */
+ − 2015 gc_mark_root_set ();
+ − 2016 #ifdef USE_KKCC
+ − 2017 kkcc_marking (0);
+ − 2018 #endif /* USE_KKCC */
+ − 2019 gc_finish_mark ();
+ − 2020 #ifdef USE_KKCC
+ − 2021 kkcc_gc_stack_free ();
+ − 2022 #ifdef DEBUG_XEMACS
+ − 2023 kkcc_bt_free ();
+ − 2024 #endif
+ − 2025 #endif /* USE_KKCC */
+ − 2026 gc_sweep_1 ();
+ − 2027 gc_finish ();
+ − 2028
3267
+ − 2029 remove_gc_cursor_and_message ();
+ − 2030
3092
+ − 2031 /* now stop inhibiting GC */
+ − 2032 unbind_to (speccount);
+ − 2033 }
+ − 2034 #endif /* not NEW_GC */
+ − 2035
+ − 2036
+ − 2037 /************************************************************************/
+ − 2038 /* Initializations */
+ − 2039 /************************************************************************/
+ − 2040
+ − 2041 /* Initialization */
+ − 2042 static void
+ − 2043 common_init_gc_early (void)
+ − 2044 {
+ − 2045 Vgc_message = Qzero;
+ − 2046
+ − 2047 gc_currently_forbidden = 0;
+ − 2048 gc_hooks_inhibited = 0;
+ − 2049
+ − 2050 need_to_garbage_collect = always_gc;
+ − 2051
+ − 2052 gc_cons_threshold = GC_CONS_THRESHOLD;
+ − 2053 gc_cons_percentage = 40; /* #### what is optimal? */
+ − 2054 total_gc_usage_set = 0;
+ − 2055 #ifdef NEW_GC
+ − 2056 gc_cons_incremental_threshold = GC_CONS_INCREMENTAL_THRESHOLD;
+ − 2057 gc_incremental_traversal_threshold = GC_INCREMENTAL_TRAVERSAL_THRESHOLD;
3263
+ − 2058 #endif /* NEW_GC */
3092
+ − 2059 }
+ − 2060
+ − 2061 void
+ − 2062 init_gc_early (void)
+ − 2063 {
3263
+ − 2064 #ifdef NEW_GC
+ − 2065 /* Reset the finalizers_to_run list after pdump_load. */
+ − 2066 Vfinalizers_to_run = Qnil;
+ − 2067 #endif /* NEW_GC */
3092
+ − 2068 }
+ − 2069
+ − 2070 void
+ − 2071 reinit_gc_early (void)
+ − 2072 {
+ − 2073 common_init_gc_early ();
+ − 2074 }
+ − 2075
+ − 2076 void
+ − 2077 init_gc_once_early (void)
+ − 2078 {
+ − 2079 common_init_gc_early ();
+ − 2080 }
+ − 2081
+ − 2082 void
+ − 2083 syms_of_gc (void)
+ − 2084 {
+ − 2085 DEFSYMBOL (Qpre_gc_hook);
+ − 2086 DEFSYMBOL (Qpost_gc_hook);
+ − 2087 #ifdef NEW_GC
+ − 2088 DEFSUBR (Fgc_full);
+ − 2089 DEFSUBR (Fgc_incremental);
+ − 2090 #ifdef ERROR_CHECK_GC
+ − 2091 DEFSUBR (Fgc_stats);
+ − 2092 #endif /* not ERROR_CHECK_GC */
+ − 2093 #endif /* NEW_GC */
+ − 2094 }
+ − 2095
+ − 2096 void
+ − 2097 vars_of_gc (void)
+ − 2098 {
+ − 2099 staticpro_nodump (&pre_gc_cursor);
+ − 2100
+ − 2101 QSin_garbage_collection = build_msg_string ("(in garbage collection)");
+ − 2102 staticpro (&QSin_garbage_collection);
+ − 2103
+ − 2104 DEFVAR_INT ("gc-cons-threshold", &gc_cons_threshold /*
+ − 2105 *Number of bytes of consing between full garbage collections.
+ − 2106 \"Consing\" is a misnomer in that this actually counts allocation
+ − 2107 of all different kinds of objects, not just conses.
+ − 2108 Garbage collection can happen automatically once this many bytes have been
+ − 2109 allocated since the last garbage collection. All data types count.
+ − 2110
+ − 2111 Garbage collection happens automatically when `eval' or `funcall' are
+ − 2112 called. (Note that `funcall' is called implicitly as part of evaluation.)
+ − 2113 By binding this temporarily to a large number, you can effectively
+ − 2114 prevent garbage collection during a part of the program.
+ − 2115
+ − 2116 Normally, you cannot set this value less than 10,000 (if you do, it is
+ − 2117 automatically reset during the next garbage collection). However, if
+ − 2118 XEmacs was compiled with DEBUG_XEMACS, this does not happen, allowing
+ − 2119 you to set this value very low to track down problems with insufficient
+ − 2120 GCPRO'ing. If you set this to a negative number, garbage collection will
+ − 2121 happen at *EVERY* call to `eval' or `funcall'. This is an extremely
+ − 2122 effective way to check GCPRO problems, but be warned that your XEmacs
+ − 2123 will be unusable! You almost certainly won't have the patience to wait
+ − 2124 long enough to be able to set it back.
+ − 2125
+ − 2126 See also `consing-since-gc' and `gc-cons-percentage'.
+ − 2127 */ );
+ − 2128
+ − 2129 DEFVAR_INT ("gc-cons-percentage", &gc_cons_percentage /*
+ − 2130 *Percentage of memory allocated between garbage collections.
+ − 2131
+ − 2132 Garbage collection will happen if this percentage of the total amount of
+ − 2133 memory used for data (see `lisp-object-memory-usage') has been allocated
+ − 2134 since the last garbage collection. However, it will not happen if less
+ − 2135 than `gc-cons-threshold' bytes have been allocated -- this sets an absolute
+ − 2136 minimum in case very little data has been allocated or the percentage is
+ − 2137 set very low. Set this to 0 to have garbage collection always happen after
+ − 2138 `gc-cons-threshold' bytes have been allocated, regardless of current memory
+ − 2139 usage.
+ − 2140
+ − 2141 See also `consing-since-gc' and `gc-cons-threshold'.
+ − 2142 */ );
+ − 2143
+ − 2144 #ifdef NEW_GC
+ − 2145 DEFVAR_INT ("gc-cons-incremental-threshold",
+ − 2146 &gc_cons_incremental_threshold /*
+ − 2147 *Number of bytes of consing between cycles of incremental garbage
+ − 2148 collections. \"Consing\" is a misnomer in that this actually counts
+ − 2149 allocation of all different kinds of objects, not just conses. The
+ − 2150 next garbage collection cycle can happen automatically once this many
+ − 2151 bytes have been allocated since the last garbage collection cycle.
+ − 2152 All data types count.
+ − 2153
+ − 2154 See also `gc-cons-threshold'.
+ − 2155 */ );
+ − 2156
+ − 2157 DEFVAR_INT ("gc-incremental-traversal-threshold",
+ − 2158 &gc_incremental_traversal_threshold /*
+ − 2159 *Number of elements processed in one cycle of incremental travesal.
+ − 2160 */ );
+ − 2161 #endif /* NEW_GC */
+ − 2162
+ − 2163 DEFVAR_BOOL ("purify-flag", &purify_flag /*
+ − 2164 Non-nil means loading Lisp code in order to dump an executable.
+ − 2165 This means that certain objects should be allocated in readonly space.
+ − 2166 */ );
+ − 2167
+ − 2168 DEFVAR_BOOL ("garbage-collection-messages", &garbage_collection_messages /*
+ − 2169 Non-nil means display messages at start and end of garbage collection.
+ − 2170 */ );
+ − 2171 garbage_collection_messages = 0;
+ − 2172
+ − 2173 DEFVAR_LISP ("pre-gc-hook", &Vpre_gc_hook /*
+ − 2174 Function or functions to be run just before each garbage collection.
+ − 2175 Interrupts, garbage collection, and errors are inhibited while this hook
+ − 2176 runs, so be extremely careful in what you add here. In particular, avoid
+ − 2177 consing, and do not interact with the user.
+ − 2178 */ );
+ − 2179 Vpre_gc_hook = Qnil;
+ − 2180
+ − 2181 DEFVAR_LISP ("post-gc-hook", &Vpost_gc_hook /*
+ − 2182 Function or functions to be run just after each garbage collection.
+ − 2183 Interrupts, garbage collection, and errors are inhibited while this hook
+ − 2184 runs. Each hook is called with one argument which is an alist with
+ − 2185 finalization data.
+ − 2186 */ );
+ − 2187 Vpost_gc_hook = Qnil;
+ − 2188
+ − 2189 DEFVAR_LISP ("gc-message", &Vgc_message /*
+ − 2190 String to print to indicate that a garbage collection is in progress.
+ − 2191 This is printed in the echo area. If the selected frame is on a
+ − 2192 window system and `gc-pointer-glyph' specifies a value (i.e. a pointer
+ − 2193 image instance) in the domain of the selected frame, the mouse pointer
+ − 2194 will change instead of this message being printed.
+ − 2195 */ );
+ − 2196 Vgc_message = build_string (gc_default_message);
+ − 2197
+ − 2198 DEFVAR_LISP ("gc-pointer-glyph", &Vgc_pointer_glyph /*
+ − 2199 Pointer glyph used to indicate that a garbage collection is in progress.
+ − 2200 If the selected window is on a window system and this glyph specifies a
+ − 2201 value (i.e. a pointer image instance) in the domain of the selected
+ − 2202 window, the pointer will be changed as specified during garbage collection.
+ − 2203 Otherwise, a message will be printed in the echo area, as controlled
+ − 2204 by `gc-message'.
+ − 2205 */ );
+ − 2206
+ − 2207 #ifdef NEW_GC
+ − 2208 DEFVAR_BOOL ("allow-incremental-gc", &allow_incremental_gc /*
+ − 2209 *Non-nil means to allow incremental garbage collection. Nil prevents
+ − 2210 *incremental garbage collection, the garbage collector then only does
+ − 2211 *full collects (even if (gc-incremental) is called).
+ − 2212 */ );
3263
+ − 2213
+ − 2214 Vfinalizers_to_run = Qnil;
+ − 2215 staticpro_nodump (&Vfinalizers_to_run);
3092
+ − 2216 #endif /* NEW_GC */
+ − 2217 }
+ − 2218
+ − 2219 void
+ − 2220 complex_vars_of_gc (void)
+ − 2221 {
+ − 2222 Vgc_pointer_glyph = Fmake_glyph_internal (Qpointer);
+ − 2223 }