Mercurial > hg > xemacs-beta
annotate src/mc-alloc.c @ 5088:207dad9e74f7
Signal an error upon reading 1/0.
Thanks to Ben Wing for finding the problem. See xemacs-patches message with ID
<870180fe1003021239k77d81203xb7b6f94ce83208b3@mail.gmail.com> and also
<d7ba68911002240223u36bb87c2nf4ccfde06b5519cc@mail.gmail.com> in xemacs-beta.
| author | Jerry James <james@xemacs.org> |
|---|---|
| date | Tue, 02 Mar 2010 13:42:37 -0700 |
| parents | 24372c7e0e8f |
| children | 88bd4f3ef8e4 |
| rev | line source |
|---|---|
| 2720 | 1 /* New size-based allocator for XEmacs. |
| 2 Copyright (C) 2005 Marcus Crestani. | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
| 2720 | 4 |
| 5 This file is part of XEmacs. | |
| 6 | |
| 7 XEmacs is free software; you can redistribute it and/or modify it | |
| 8 under the terms of the GNU General Public License as published by the | |
| 9 Free Software Foundation; either version 2, or (at your option) any | |
| 10 later version. | |
| 11 | |
| 12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
| 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
| 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
| 15 for more details. | |
| 16 | |
| 17 You should have received a copy of the GNU General Public License | |
| 18 along with XEmacs; see the file COPYING. If not, write to | |
| 19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
| 20 Boston, MA 02111-1307, USA. */ | |
| 21 | |
| 22 /* Synched up with: Not in FSF. */ | |
| 23 | |
| 24 #include <config.h> | |
| 3092 | 25 |
| 2720 | 26 #include "lisp.h" |
| 27 #include "mc-alloc.h" | |
| 3092 | 28 #include "getpagesize.h" |
| 29 | |
| 30 | |
| 31 #if 0 | |
| 32 # define USE_MARK_BITS_FREE_LIST 1 | |
| 33 #endif | |
| 34 #if 1 | |
| 35 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1 | |
| 36 #endif | |
| 37 | |
| 38 /* Memory protection needs the real system-dependent pagesize. */ | |
| 39 #ifndef WIN32_NATIVE | |
| 40 #include <unistd.h> /* for getpagesize () */ | |
| 41 #endif | |
| 42 #if defined (HAVE_GETPAGESIZE) | |
| 43 # define SYS_PAGE_SIZE getpagesize () | |
| 44 #elif defined (_SC_PAGESIZE) | |
| 45 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE) | |
| 46 #elif defined (_SC_PAGE_SIZE) | |
| 47 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE) | |
| 48 #elif defined(get_page_size) | |
| 49 # define SYS_PAGE_SIZE get_page_size () | |
| 50 #elif defined(PAGESIZE) | |
| 51 # define SYS_PAGE_SIZE PAGESIZE | |
| 52 #elif defined(PAGE_SIZE) | |
| 53 # define SYS_PAGE_SIZE PAGE_SIZE | |
| 54 #else | |
| 55 /* Valid page sizes are powers of 2. */ | |
| 56 # define SYS_PAGE_SIZE 4096 | |
| 57 #endif | |
| 2720 | 58 |
| 59 | |
| 60 /*--- configurable values ----------------------------------------------*/ | |
| 61 | |
| 62 /* Definition of size classes */ | |
| 63 | |
| 64 /* Heap used list constants: In the used heap, it is important to | |
| 65 quickly find a free spot for a new object. Therefore the size | |
| 66 classes of the used heap are defined by the size of the cells on | |
| 67 the pages. The size classes should match common object sizes, to | |
| 68 avoid wasting memory. */ | |
| 69 | |
| 70 /* Minimum object size in bytes. */ | |
| 3092 | 71 #if BITS_PER_EMACS_INT > 32 |
| 72 # define USED_LIST_MIN_OBJECT_SIZE 16 | |
| 73 #else | |
| 74 # define USED_LIST_MIN_OBJECT_SIZE 8 | |
| 75 #endif | |
| 2720 | 76 |
| 77 /* The step size by which the size classes increase (up to upper | |
| 78 threshold). This many bytes are mapped to a single used list: */ | |
| 3092 | 79 #if BITS_PER_EMACS_INT > 32 |
| 80 # define USED_LIST_LIN_STEP 8 | |
| 81 #else | |
| 82 # define USED_LIST_LIN_STEP 4 | |
| 83 #endif | |
| 2720 | 84 |
| 85 /* The upper threshold should always be set to PAGE_SIZE/2, because if | |
| 86 a object is larger than PAGE_SIZE/2 there is no room for any other | |
| 87 object on this page. Objects this big are kept in the page list of | |
| 88 the multiple pages, since a quick search for free spots is not | |
| 89 needed for this kind of pages (because there are no free spots). | |
| 90 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ | |
| 3092 | 91 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 |
| 2720 | 92 |
| 93 | |
| 94 /* Heap free list constants: In the unused heap, the size of | |
| 95 consecutive memory tips the scales. A page is smallest entity which | |
| 96 is asked for. Therefore, the size classes of the unused heap are | |
| 97 defined by the number of consecutive pages. */ | |
| 98 /* Sizes up to this many pages each have their own free list. */ | |
| 99 #define FREE_LIST_LOWER_THRESHOLD 32 | |
| 100 /* The step size by which the size classes increase (up to upper | |
| 101 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a | |
| 102 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and | |
| 103 FREE_LIST_UPPER_THRESHOLD. */ | |
| 104 #define FREE_LIST_LIN_STEP 8 | |
| 105 /* Sizes of at least this many pages are mapped to a single free | |
| 106 list. Blocks of memory larger than this number are all kept in a | |
| 107 single list, which makes searching this list slow. But objects that | |
| 108 big are really seldom. */ | |
| 109 #define FREE_LIST_UPPER_THRESHOLD 256 | |
| 110 | |
| 111 | |
| 3092 | 112 /* used heap list count */ |
| 113 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \ | |
| 114 - USED_LIST_MIN_OBJECT_SIZE) \ | |
| 115 / USED_LIST_LIN_STEP) + 1 ) + 1 | |
| 116 | |
| 117 /* free heap list count */ | |
| 118 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \ | |
| 119 - FREE_LIST_LOWER_THRESHOLD) \ | |
| 120 / FREE_LIST_LIN_STEP) \ | |
| 121 + FREE_LIST_LOWER_THRESHOLD) | |
| 122 | |
| 123 | |
| 2720 | 124 /* Maximum number of separately added heap sections. */ |
| 125 #if BITS_PER_EMACS_INT > 32 | |
| 126 # define MAX_HEAP_SECTS 2048 | |
| 127 #else | |
| 128 # define MAX_HEAP_SECTS 768 | |
| 129 #endif | |
| 130 | |
| 131 | |
| 132 /* Heap growth constants. Heap increases by any number between the | |
| 133 boundaries (unit is PAGE_SIZE). */ | |
| 3092 | 134 #define MIN_HEAP_INCREASE 256 |
| 2720 | 135 #define MAX_HEAP_INCREASE 256 /* not used */ |
| 136 | |
| 137 /* Every heap growth is calculated like this: | |
| 138 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). | |
| 139 So the growth of the heap is influenced by the current size of the | |
| 140 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE | |
| 141 boundaries. | |
| 142 This reduces the number of heap sectors, the larger the heap grows | |
| 143 the larger are the newly allocated chunks. */ | |
| 144 #define HEAP_GROWTH_DIVISOR 3 | |
| 145 | |
| 146 | |
| 147 /* Zero memory before putting on free lists. */ | |
| 148 #define ZERO_MEM 1 | |
| 149 | |
| 150 | |
| 151 #ifndef CHAR_BIT /* should be included by limits.h */ | |
| 152 # define CHAR_BIT BITS_PER_CHAR | |
| 153 #endif | |
| 154 | |
| 155 | |
| 3092 | 156 |
| 157 /*--- values depending on PAGE_SIZE ------------------------------------*/ | |
| 2720 | 158 |
| 3092 | 159 /* initialized in init_mc_allocator () */ |
| 160 static EMACS_INT log_page_size; | |
| 161 static EMACS_INT page_size_div_2; | |
| 2720 | 162 |
| 3092 | 163 #undef PAGE_SIZE |
| 164 #define PAGE_SIZE SYS_PAGE_SIZE | |
| 165 #define LOG_PAGE_SIZE log_page_size | |
| 166 #define PAGE_SIZE_DIV_2 page_size_div_2 | |
| 2720 | 167 |
| 168 | |
| 169 /* Constants for heap address to page header mapping. */ | |
| 170 #define LOG_LEVEL2_SIZE 10 | |
| 171 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) | |
| 172 #if BITS_PER_EMACS_INT > 32 | |
| 173 # define USE_HASH_TABLE 1 | |
| 174 # define LOG_LEVEL1_SIZE 11 | |
| 175 #else | |
| 176 # define LOG_LEVEL1_SIZE \ | |
| 177 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE) | |
| 178 #endif | |
| 179 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE) | |
| 180 | |
| 181 #ifdef USE_HASH_TABLE | |
| 182 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1)) | |
| 4125 | 183 # define L1_INDEX(p) HASH ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
| 2720 | 184 #else |
| 4125 | 185 # define L1_INDEX(p) ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
| 2720 | 186 #endif |
| 4125 | 187 #define L2_INDEX(p) (((EMACS_UINT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1)) |
| 2720 | 188 |
| 189 | |
| 190 | |
| 191 | |
| 192 /*--- structs and typedefs ---------------------------------------------*/ | |
| 193 | |
| 3092 | 194 /* Links the free lists (mark_bit_free_list and cell free list). */ |
| 2720 | 195 typedef struct free_link |
| 196 { | |
| 197 struct lrecord_header lheader; | |
| 198 struct free_link *next_free; | |
| 199 } free_link; | |
| 200 | |
| 201 | |
| 3092 | 202 /* Header for pages. They are held in a doubly linked list. */ |
| 2720 | 203 typedef struct page_header |
| 204 { | |
| 205 struct page_header *next; /* next page_header */ | |
| 206 struct page_header *prev; /* previous page_header */ | |
| 207 /* Field plh holds pointer to the according header of the page list.*/ | |
| 208 struct page_list_header *plh; /* page list header */ | |
| 209 free_link *free_list; /* links free cells on page */ | |
| 210 EMACS_INT n_pages; /* number of pages */ | |
| 211 EMACS_INT cell_size; /* size of cells on page */ | |
| 212 EMACS_INT cells_on_page; /* total number of cells on page */ | |
| 213 EMACS_INT cells_used; /* number of used cells on page */ | |
| 214 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, | |
| 215 the mark bits are put in an extra memory area. Then the field | |
| 216 mark_bits holds the pointer to this area. Is the number of | |
| 217 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the | |
| 218 mark_bit EMACS_INT directly, without an additional indirection. */ | |
| 3092 | 219 unsigned int black_bit:1; /* objects on page are black */ |
| 220 unsigned int dirty_bit:1; /* page is dirty */ | |
| 221 unsigned int protection_bit:1; /* page is write protected */ | |
| 222 unsigned int array_bit:1; /* page holds arrays */ | |
| 223 Rawbyte *mark_bits; /* pointer to mark bits */ | |
| 2720 | 224 void *heap_space; /* pointer to heap, where objects |
| 225 are stored */ | |
| 226 } page_header; | |
| 227 | |
| 228 | |
| 229 /* Different list types. */ | |
| 230 enum list_type_enum { | |
| 231 USED_LIST, | |
| 232 FREE_LIST | |
| 233 }; | |
| 234 | |
| 235 | |
| 236 /* Header for page lists. Field list_type holds the type of the list. */ | |
| 237 typedef struct page_list_header | |
| 238 { | |
| 239 enum list_type_enum list_type; /* the type of the list */ | |
| 240 /* Size holds the size of one cell (in bytes) in a used heap list, or the | |
| 241 size of the heap sector (in number of pages). */ | |
| 242 size_t size; /* size of one cell / heap sector */ | |
| 243 page_header *first; /* first of page_header list */ | |
| 244 page_header *last; /* last of page_header list */ | |
| 245 /* If the number of objects on page is bigger than | |
| 246 BITS_PER_EMACS_INT, the mark bits are put in an extra memory | |
| 247 area, which is linked in this free list, if not used. Is the | |
| 248 number of objects smaller than BITS_PER_EMACS_INT, the mark bits | |
| 249 are hold in the mark bit EMACS_INT directly, without an | |
| 250 additional indirection. */ | |
| 251 free_link *mark_bit_free_list; | |
| 252 | |
| 253 #ifdef MEMORY_USAGE_STATS | |
| 254 EMACS_INT page_count; /* number if pages in list */ | |
| 255 EMACS_INT used_cells; /* number of objects in list */ | |
| 256 EMACS_INT used_space; /* used space */ | |
| 257 EMACS_INT total_cells; /* number of available cells */ | |
| 258 EMACS_INT total_space; /* available space */ | |
| 259 #endif | |
| 260 } page_list_header; | |
| 261 | |
| 262 | |
| 263 /* The heap sectors are stored with their real start pointer and their | |
| 264 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */ | |
| 265 typedef struct heap_sect { | |
| 266 void *real_start; /* real start pointer (NOT aligned) */ | |
| 267 size_t real_size; /* NOT multiple of PAGE_SIZE */ | |
| 268 void *start; /* aligned start pointer */ | |
| 269 EMACS_INT n_pages; /* multiple of PAGE_SIZE */ | |
| 270 } heap_sect; | |
| 271 | |
| 272 | |
| 273 /* 2nd tree level for mapping of heap addresses to page headers. */ | |
| 274 typedef struct level_2_lookup_tree { | |
| 275 page_header *index[LEVEL2_SIZE]; /* link to page header */ | |
| 276 EMACS_INT key; /* high order address bits */ | |
| 277 #ifdef USE_HASH_TABLE | |
| 278 struct level_2_lookup_tree *hash_link; /* hash chain link */ | |
| 279 #endif | |
| 280 } level_2_lookup_tree; | |
| 281 | |
| 282 | |
| 283 | |
| 284 /*--- global variable definitions --------------------------------------*/ | |
| 285 | |
| 286 /* All global allocator variables are kept in this struct. */ | |
| 287 typedef struct mc_allocator_globals_type { | |
| 288 | |
| 289 /* heap size */ | |
| 290 EMACS_INT heap_size; | |
| 291 | |
| 292 /* list of all separatly allocated chunks of heap */ | |
| 293 heap_sect heap_sections[MAX_HEAP_SECTS]; | |
| 294 EMACS_INT n_heap_sections; | |
| 295 | |
| 296 /* Holds all allocated pages, each object size class in its separate list, | |
| 297 to guarantee fast allocation on partially filled pages. */ | |
| 3092 | 298 page_list_header *used_heap_pages; |
| 2720 | 299 |
| 300 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are | |
| 301 kept on the Nth free list. Contiguos pages are coalesced. */ | |
| 302 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; | |
| 303 | |
| 304 /* ptr lookup table */ | |
| 3092 | 305 level_2_lookup_tree **ptr_lookup_table; |
| 2720 | 306 |
| 3092 | 307 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 2720 | 308 /* page header free list */ |
| 309 free_link *page_header_free_list; | |
| 3092 | 310 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
| 2720 | 311 |
| 312 #ifdef MEMORY_USAGE_STATS | |
| 313 EMACS_INT malloced_bytes; | |
| 314 #endif | |
| 315 } mc_allocator_globals_type; | |
| 316 | |
| 317 mc_allocator_globals_type mc_allocator_globals; | |
| 318 | |
| 319 | |
| 320 | |
| 321 | |
| 322 /*--- macro accessors --------------------------------------------------*/ | |
| 323 | |
| 324 #define USED_HEAP_PAGES(i) \ | |
| 325 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) | |
| 326 | |
| 327 #define FREE_HEAP_PAGES(i) \ | |
| 328 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) | |
| 329 | |
| 330 #define PLH(plh) plh | |
| 331 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type | |
| 332 # define PLH_SIZE(plh) PLH (plh)->size | |
| 333 # define PLH_FIRST(plh) PLH (plh)->first | |
| 334 # define PLH_LAST(plh) PLH (plh)->last | |
| 335 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list | |
| 336 #ifdef MEMORY_USAGE_STATS | |
| 337 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count | |
| 338 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells | |
| 339 # define PLH_USED_SPACE(plh) PLH (plh)->used_space | |
| 340 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells | |
| 341 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space | |
| 342 #endif | |
| 343 | |
| 344 #define PH(ph) ph | |
| 345 # define PH_NEXT(ph) PH (ph)->next | |
| 346 # define PH_PREV(ph) PH (ph)->prev | |
| 347 # define PH_PLH(ph) PH (ph)->plh | |
| 348 # define PH_FREE_LIST(ph) PH (ph)->free_list | |
| 349 # define PH_N_PAGES(ph) PH (ph)->n_pages | |
| 350 # define PH_CELL_SIZE(ph) PH (ph)->cell_size | |
| 351 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page | |
| 352 # define PH_CELLS_USED(ph) PH (ph)->cells_used | |
| 3092 | 353 # define PH_BLACK_BIT(ph) PH (ph)->black_bit |
| 354 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit | |
| 355 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit | |
| 356 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit | |
| 2720 | 357 # define PH_MARK_BITS(ph) PH (ph)->mark_bits |
| 358 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space | |
| 359 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) | |
| 360 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) | |
| 361 | |
| 362 #define HEAP_SIZE mc_allocator_globals.heap_size | |
| 363 | |
| 364 #ifdef MEMORY_USAGE_STATS | |
| 365 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes | |
| 366 #endif | |
| 367 | |
| 368 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] | |
| 369 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections | |
| 370 | |
| 3092 | 371 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 2720 | 372 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list |
| 3092 | 373 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
| 2720 | 374 |
| 375 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free | |
| 376 #define FREE_LIST(free_list) (free_link*) (free_list) | |
| 377 | |
| 378 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] | |
| 379 #define LEVEL2(l2, i) l2->index[i] | |
| 380 # define LEVEL2_KEY(l2) l2->key | |
| 381 #ifdef USE_HASH_TABLE | |
| 382 # define LEVEL2_HASH_LINK(l2) l2->hash_link | |
| 383 #endif | |
| 384 | |
| 385 #if ZERO_MEM | |
| 386 # define ZERO_HEAP_SPACE(ph) \ | |
| 387 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE) | |
| 388 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header)) | |
| 389 #endif | |
| 390 | |
| 391 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE) | |
| 392 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE) | |
| 393 | |
| 394 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1)))) | |
| 395 | |
| 396 #define PAGE_SIZE_ALIGNMENT(address) \ | |
| 4125 | 397 (void *) ((((EMACS_UINT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1)) |
| 2720 | 398 |
| 399 #define PH_ON_FREE_LIST_P(ph) \ | |
| 400 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) | |
| 401 | |
| 402 #define PH_ON_USED_LIST_P(ph) \ | |
| 403 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | |
| 404 | |
| 405 | |
| 3092 | 406 /* Number of mark bits: minimum 1, maximum 8. */ |
| 407 #define N_MARK_BITS 2 | |
| 2720 | 408 |
| 409 | |
| 410 | |
| 411 /************************************************************************/ | |
| 412 /* MC Allocator */ | |
| 413 /************************************************************************/ | |
| 414 | |
| 3305 | 415 /* Set to 1 if memory becomes short. */ |
| 416 EMACS_INT memory_shortage; | |
| 2720 | 417 |
| 418 /*--- misc functions ---------------------------------------------------*/ | |
| 419 | |
| 420 /* moved here from alloc.c */ | |
| 421 #ifdef ERROR_CHECK_GC | |
| 422 static void | |
| 423 deadbeef_memory (void *ptr, Bytecount size) | |
| 424 { | |
| 425 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr; | |
| 426 Bytecount beefs = size >> 2; | |
| 427 | |
| 428 /* In practice, size will always be a multiple of four. */ | |
| 429 while (beefs--) | |
| 430 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */ | |
| 431 } | |
| 432 #endif /* ERROR_CHECK_GC */ | |
| 433 | |
| 434 /* Visits all pages (page_headers) hooked into the used heap pages | |
| 435 list and executes f with the current page header as | |
| 3303 | 436 argument. Needed for sweep. Returns number of processed pages. */ |
| 437 static EMACS_INT | |
| 438 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph)) | |
| 2720 | 439 { |
| 3303 | 440 EMACS_INT number_of_pages_processed = 0; |
| 3092 | 441 EMACS_INT i; |
| 2720 | 442 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
| 443 if (PLH_FIRST (USED_HEAP_PAGES (i))) | |
| 444 { | |
| 445 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); | |
| 446 while (PH_NEXT (ph)) | |
| 447 { | |
| 448 page_header *next = PH_NEXT (ph); /* in case f removes the page */ | |
| 3303 | 449 number_of_pages_processed += f (ph); |
| 2720 | 450 ph = next; |
| 451 } | |
| 3303 | 452 number_of_pages_processed += f (ph); |
| 2720 | 453 } |
| 3303 | 454 return number_of_pages_processed; |
| 2720 | 455 } |
| 456 | |
| 457 | |
| 458 | |
| 459 | |
| 460 /*--- mapping of heap addresses to page headers and mark bits ----------*/ | |
| 461 | |
| 462 /* Sets a heap pointer and page header pair into the lookup table. */ | |
| 463 static void | |
| 464 set_lookup_table (void *ptr, page_header *ph) | |
| 465 { | |
| 3092 | 466 EMACS_INT l1_index = L1_INDEX (ptr); |
| 2720 | 467 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
| 468 #ifdef USE_HASH_TABLE | |
| 469 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
| 470 l2 = LEVEL2_HASH_LINK (l2); | |
| 471 #endif | |
| 472 if (!l2) | |
| 473 { | |
| 474 l2 = (level_2_lookup_tree*) | |
| 475 xmalloc_and_zero (sizeof (level_2_lookup_tree)); | |
| 476 #ifdef MEMORY_USAGE_STATS | |
| 477 MC_MALLOCED_BYTES += | |
| 478 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0); | |
| 479 #endif | |
| 2932 | 480 memset (l2, '\0', sizeof (level_2_lookup_tree)); |
| 2720 | 481 #ifdef USE_HASH_TABLE |
| 482 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index); | |
| 483 #endif | |
| 484 PTR_LOOKUP_TABLE (l1_index) = l2; | |
| 485 LEVEL2_KEY (l2) = l1_index; | |
| 486 } | |
| 487 LEVEL2 (l2, L2_INDEX (ptr)) = ph; | |
| 488 } | |
| 489 | |
| 490 | |
| 491 #ifdef UNSET_LOOKUP_TABLE | |
| 492 /* Set the lookup table to 0 for given heap address. */ | |
| 493 static void | |
| 494 unset_lookup_table (void *ptr) | |
| 495 { | |
| 3092 | 496 EMACS_INT l1_index = L1_INDEX (ptr); |
| 2720 | 497 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
| 498 #ifdef USE_HASH_TABLE | |
| 499 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
| 500 l2 = LEVEL2_HASH_LINK (l2); | |
| 501 #endif | |
| 502 if (l2) { | |
| 503 LEVEL2 (l2, L2_INDEX (ptr)) = 0; | |
| 504 } | |
| 505 } | |
| 506 #endif | |
| 507 | |
| 508 /* Returns the page header of a given heap address, or 0 if not in table. | |
| 509 For internal use, no error checking. */ | |
| 510 static page_header * | |
| 511 get_page_header_internal (void *ptr) | |
| 512 { | |
| 3092 | 513 EMACS_INT l1_index = L1_INDEX (ptr); |
| 2720 | 514 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
| 515 #ifdef USE_HASH_TABLE | |
| 516 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
| 517 l2 = LEVEL2_HASH_LINK (l2); | |
| 518 #endif | |
| 519 if (!l2) | |
| 520 return 0; | |
| 521 return LEVEL2 (l2, L2_INDEX (ptr)); | |
| 522 } | |
| 523 | |
| 524 /* Returns the page header of a given heap address, or 0 if not in table. */ | |
| 525 static page_header * | |
| 526 get_page_header (void *ptr) | |
| 527 { | |
| 3092 | 528 EMACS_INT l1_index = L1_INDEX (ptr); |
| 2720 | 529 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
| 2723 | 530 assert (l2); |
| 2720 | 531 #ifdef USE_HASH_TABLE |
| 532 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
| 533 l2 = LEVEL2_HASH_LINK (l2); | |
| 534 #endif | |
| 2723 | 535 assert (LEVEL2 (l2, L2_INDEX (ptr))); |
| 2720 | 536 return LEVEL2 (l2, L2_INDEX (ptr)); |
| 537 } | |
| 538 | |
| 539 /* Returns the mark bit index of a given heap address. */ | |
| 540 static EMACS_INT | |
| 541 get_mark_bit_index (void *ptr, page_header *ph) | |
| 542 { | |
| 543 EMACS_INT cell_size = PH_CELL_SIZE (ph); | |
| 544 if (cell_size) | |
| 3092 | 545 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size) |
| 546 * N_MARK_BITS; | |
| 2720 | 547 else /* only one object on page */ |
| 548 return 0; | |
| 549 } | |
| 550 | |
| 551 | |
| 552 /* Adds addresses of pages to lookup table. */ | |
| 553 static void | |
| 554 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) | |
| 555 { | |
| 3092 | 556 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph); |
| 2720 | 557 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); |
| 3092 | 558 for (p = (Rawbyte *) PH_HEAP_SPACE (ph); |
| 2720 | 559 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) |
| 560 set_lookup_table (p, ph); | |
| 561 } | |
| 562 | |
| 563 | |
| 564 /* Initializes lookup table. */ | |
| 565 static void | |
| 566 init_lookup_table (void) | |
| 567 { | |
| 3092 | 568 EMACS_INT i; |
| 2720 | 569 for (i = 0; i < LEVEL1_SIZE; i++) |
| 570 PTR_LOOKUP_TABLE (i) = 0; | |
| 571 } | |
| 572 | |
| 573 | |
| 574 | |
| 575 | |
| 576 /*--- mark bits --------------------------------------------------------*/ | |
| 577 | |
| 578 /*--- bit operations --- */ | |
| 579 | |
| 580 /* Allocates a bit array of length bits. */ | |
| 3092 | 581 static Rawbyte * |
| 2720 | 582 alloc_bit_array(size_t bits) |
| 583 { | |
| 3092 | 584 Rawbyte *bit_array; |
| 585 #ifdef USE_MARK_BITS_FREE_LIST | |
| 586 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte); | |
| 587 #else /* not USE_MARK_BITS_FREE_LIST */ | |
| 588 size_t size = | |
| 589 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte), | |
| 590 Rawbyte *); | |
| 591 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
| 2720 | 592 if (size < sizeof (free_link)) size = sizeof (free_link); |
| 593 #ifdef MEMORY_USAGE_STATS | |
| 594 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); | |
| 595 #endif | |
| 3092 | 596 bit_array = (Rawbyte *) xmalloc_and_zero (size); |
| 2720 | 597 return bit_array; |
| 598 } | |
| 599 | |
| 600 | |
| 601 /* Returns the bit value at pos. */ | |
| 602 static EMACS_INT | |
| 3092 | 603 get_bit (Rawbyte *bit_array, EMACS_INT pos) |
| 2720 | 604 { |
| 605 #if N_MARK_BITS > 1 | |
| 606 EMACS_INT result = 0; | |
| 607 EMACS_INT i; | |
| 608 #endif | |
| 609 bit_array += pos / CHAR_BIT; | |
| 610 #if N_MARK_BITS > 1 | |
| 611 for (i = 0; i < N_MARK_BITS; i++) | |
| 3092 | 612 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i; |
| 613 return result; | |
| 2720 | 614 #else |
| 615 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; | |
| 616 #endif | |
| 617 } | |
| 618 | |
| 619 | |
| 620 /* Bit_Arrays bit at pos to val. */ | |
| 621 static void | |
| 4125 | 622 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_UINT val) |
| 2720 | 623 { |
| 624 #if N_MARK_BITS > 1 | |
| 625 EMACS_INT i; | |
| 626 #endif | |
| 627 bit_array += pos / CHAR_BIT; | |
| 628 #if N_MARK_BITS > 1 | |
| 629 for (i = 0; i < N_MARK_BITS; i++) | |
| 630 if ((val >> i) & 1) | |
| 631 *bit_array |= 1 << ((pos + i) % CHAR_BIT); | |
| 632 else | |
| 633 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT)); | |
| 634 #else | |
| 635 if (val) | |
| 636 *bit_array |= 1 << (pos % CHAR_BIT); | |
| 637 else | |
| 638 *bit_array &= ~(1 << (pos % CHAR_BIT)); | |
| 639 #endif | |
| 640 } | |
| 641 | |
| 642 | |
| 643 /*--- mark bit functions ---*/ | |
| 3092 | 644 #define USE_PNTR_MARK_BITS(ph) \ |
| 645 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT) | |
| 646 #define USE_WORD_MARK_BITS(ph) \ | |
| 647 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT) | |
| 2720 | 648 |
| 3092 | 649 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p) |
| 2720 | 650 #define GET_BIT_PNTR(b, p) get_bit (b, p) |
| 651 | |
| 3092 | 652 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v) |
| 2720 | 653 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) |
| 654 | |
| 655 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 | |
| 3092 | 656 #define ZERO_MARK_BITS_PNTR(ph) \ |
| 657 do { \ | |
| 658 memset (PH_MARK_BITS (ph), '\0', \ | |
| 659 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \ | |
| 660 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \ | |
| 2720 | 661 } while (0) |
| 662 | |
| 663 #define GET_BIT(bit, ph, p) \ | |
| 664 do { \ | |
| 665 if (USE_PNTR_MARK_BITS (ph)) \ | |
| 666 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \ | |
| 667 else \ | |
| 668 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \ | |
| 669 } while (0) | |
| 670 | |
| 671 #define SET_BIT(ph, p, v) \ | |
| 672 do { \ | |
| 673 if (USE_PNTR_MARK_BITS (ph)) \ | |
| 674 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \ | |
| 675 else \ | |
| 676 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \ | |
| 677 } while (0) | |
| 678 | |
| 679 #define ZERO_MARK_BITS(ph) \ | |
| 680 do { \ | |
| 681 if (USE_PNTR_MARK_BITS (ph)) \ | |
| 682 ZERO_MARK_BITS_PNTR (ph); \ | |
| 683 else \ | |
| 684 ZERO_MARK_BITS_WORD (ph); \ | |
| 685 } while (0) | |
| 686 | |
| 687 | |
| 688 /* Allocates mark-bit space either from a free list or from the OS | |
| 689 for the given page header. */ | |
| 3092 | 690 static Rawbyte * |
| 2720 | 691 alloc_mark_bits (page_header *ph) |
| 692 { | |
| 3092 | 693 Rawbyte *result; |
| 694 #ifdef USE_MARK_BITS_FREE_LIST | |
| 2720 | 695 if (PH_MARK_BIT_FREE_LIST (ph) == 0) |
| 3092 | 696 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); |
| 2720 | 697 else |
| 698 { | |
| 3092 | 699 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph); |
| 2720 | 700 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); |
| 701 } | |
| 3092 | 702 #else /* not USE_MARK_BITS_FREE_LIST */ |
| 703 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); | |
| 704 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
| 2720 | 705 return result; |
| 706 } | |
| 707 | |
| 708 | |
| 709 /* Frees by maintaining a free list. */ | |
| 710 static void | |
| 711 free_mark_bits (page_header *ph) | |
| 712 { | |
| 3092 | 713 #ifdef USE_MARK_BITS_FREE_LIST |
| 714 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph); | |
| 715 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph)); | |
| 716 #else /* not USE_MARK_BITS_FREE_LIST */ | |
| 2720 | 717 if (PH_MARK_BITS (ph)) |
| 3092 | 718 free (PH_MARK_BITS (ph)); |
| 719 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
| 2720 | 720 } |
| 721 | |
| 722 | |
| 723 /* Installs mark bits and zeros bits. */ | |
| 724 static void | |
| 725 install_mark_bits (page_header *ph) | |
| 726 { | |
| 727 if (USE_PNTR_MARK_BITS (ph)) | |
| 728 { | |
| 729 PH_MARK_BITS (ph) = alloc_mark_bits (ph); | |
| 730 ZERO_MARK_BITS_PNTR (ph); | |
| 731 } | |
| 732 else | |
| 733 ZERO_MARK_BITS_WORD (ph); | |
| 734 } | |
| 735 | |
| 736 | |
| 737 /* Cleans and frees the mark bits of the given page_header. */ | |
| 738 static void | |
| 739 remove_mark_bits (page_header *ph) | |
| 740 { | |
| 741 if (USE_PNTR_MARK_BITS (ph)) | |
| 742 free_mark_bits (ph); | |
| 743 } | |
| 744 | |
| 745 | |
| 746 /* Zeros all mark bits in given header. */ | |
| 747 static void | |
| 748 zero_mark_bits (page_header *ph) | |
| 749 { | |
| 750 ZERO_MARK_BITS (ph); | |
| 751 } | |
| 752 | |
| 753 | |
| 754 /* Returns mark bit for given heap pointer. */ | |
| 755 EMACS_INT | |
| 756 get_mark_bit (void *ptr) | |
| 757 { | |
| 758 EMACS_INT bit = 0; | |
| 759 page_header *ph = get_page_header (ptr); | |
| 760 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph)); | |
| 761 if (ph) | |
| 762 { | |
| 763 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
| 764 } | |
| 765 return bit; | |
| 766 } | |
| 767 | |
| 768 | |
| 769 /* Sets mark bit for given heap pointer. */ | |
| 770 void | |
| 771 set_mark_bit (void *ptr, EMACS_INT value) | |
| 772 { | |
| 773 page_header *ph = get_page_header (ptr); | |
| 774 assert (ph && PH_ON_USED_LIST_P (ph)); | |
| 775 if (ph) | |
| 776 { | |
| 3092 | 777 if (value == BLACK) |
| 778 if (!PH_BLACK_BIT (ph)) | |
| 779 PH_BLACK_BIT (ph) = 1; | |
| 2720 | 780 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
| 781 } | |
| 782 } | |
| 783 | |
| 784 | |
| 785 | |
| 786 | |
| 787 /*--- page header functions --------------------------------------------*/ | |
| 788 | |
| 3092 | 789 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 790 #include "blocktype.h" | |
| 791 | |
| 792 struct page_header_blocktype | |
| 793 { | |
| 794 Blocktype_declare (page_header); | |
| 795 } *the_page_header_blocktype; | |
| 796 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
| 797 | |
| 2720 | 798 /* Allocates a page header either from a free list or from the OS. */ |
| 799 static page_header * | |
| 800 alloc_page_header (void) | |
| 801 { | |
| 3092 | 802 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 803 page_header *result; | |
| 804 #ifdef MEMORY_USAGE_STATS | |
| 805 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
| 806 #endif | |
| 807 result = Blocktype_alloc (the_page_header_blocktype); | |
| 808 ZERO_PAGE_HEADER (result); | |
| 809 return result; | |
| 810 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
| 2720 | 811 page_header *result; |
| 812 if (PAGE_HEADER_FREE_LIST == 0) | |
| 813 { | |
| 814 result = | |
| 815 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); | |
| 816 #ifdef MEMORY_USAGE_STATS | |
| 817 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
| 818 #endif | |
| 819 } | |
| 820 else | |
| 821 { | |
| 822 result = (page_header*) PAGE_HEADER_FREE_LIST; | |
| 823 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); | |
| 824 } | |
| 825 return result; | |
| 3092 | 826 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
| 2720 | 827 } |
| 828 | |
| 829 | |
| 830 /* Frees given page header by maintaining a free list. */ | |
| 831 static void | |
| 832 free_page_header (page_header *ph) | |
| 833 { | |
| 3092 | 834 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 835 Blocktype_free (the_page_header_blocktype, ph); | |
| 836 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
| 2720 | 837 #if ZERO_MEM |
| 838 ZERO_PAGE_HEADER (ph); | |
| 839 #endif | |
| 840 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; | |
| 841 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); | |
| 3092 | 842 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
| 2720 | 843 } |
| 844 | |
| 845 | |
| 846 /* Adds given page header to given page list header's list. */ | |
| 847 static void | |
| 848 add_page_header_to_plh (page_header *ph, page_list_header *plh) | |
| 849 { | |
| 850 /* insert at the front of the list */ | |
| 851 PH_PREV (ph) = 0; | |
| 852 PH_NEXT (ph) = PLH_FIRST (plh); | |
| 853 PH_PLH (ph) = plh; | |
| 854 /* if list is not empty, set prev in the first element */ | |
| 855 if (PLH_FIRST (plh)) | |
| 856 PH_PREV (PLH_FIRST (plh)) = ph; | |
| 857 /* one element in list is first and last at the same time */ | |
| 858 PLH_FIRST (plh) = ph; | |
| 859 if (!PLH_LAST (plh)) | |
| 860 PLH_LAST (plh) = ph; | |
| 861 | |
| 862 #ifdef MEMORY_USAGE_STATS | |
| 863 /* bump page count */ | |
| 864 PLH_PAGE_COUNT (plh)++; | |
| 865 #endif | |
| 866 | |
| 867 } | |
| 868 | |
| 869 | |
| 870 /* Removes given page header from given page list header's list. */ | |
| 871 static void | |
| 872 remove_page_header_from_plh (page_header *ph, page_list_header *plh) | |
| 873 { | |
| 874 if (PLH_FIRST (plh) == ph) | |
| 875 PLH_FIRST (plh) = PH_NEXT (ph); | |
| 876 if (PLH_LAST (plh) == ph) | |
| 877 PLH_LAST (plh) = PH_PREV (ph); | |
| 878 if (PH_NEXT (ph)) | |
| 879 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
| 880 if (PH_PREV (ph)) | |
| 881 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
| 882 | |
| 883 #ifdef MEMORY_USAGE_STATS | |
| 884 /* decrease page count */ | |
| 885 PLH_PAGE_COUNT (plh)--; | |
| 886 #endif | |
| 887 } | |
| 888 | |
| 889 | |
| 890 /* Moves a page header to the front of its the page header list. | |
| 891 This is used during sweep: Pages with some alive objects are moved to | |
| 892 the front. This makes allocation faster, all pages with free slots | |
| 893 can be found at the front of the list. */ | |
| 894 static void | |
| 895 move_page_header_to_front (page_header *ph) | |
| 896 { | |
| 897 page_list_header *plh = PH_PLH (ph); | |
| 898 /* is page already first? */ | |
| 899 if (ph == PLH_FIRST (plh)) return; | |
| 900 /* remove from list */ | |
| 901 if (PLH_LAST (plh) == ph) | |
| 902 PLH_LAST (plh) = PH_PREV (ph); | |
| 903 if (PH_NEXT (ph)) | |
| 904 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
| 905 if (PH_PREV (ph)) | |
| 906 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
| 907 /* insert at the front */ | |
| 908 PH_NEXT (ph) = PLH_FIRST (plh); | |
| 909 PH_PREV (ph) = 0; | |
| 910 PH_PREV (PH_NEXT (ph)) = ph; | |
| 911 PLH_FIRST (plh) = ph; | |
| 912 } | |
| 913 | |
| 914 | |
| 915 | |
| 916 | |
| 917 /*--- page list functions ----------------------------------------------*/ | |
| 918 | |
| 919 /* Returns the index of the used heap list according to given size. */ | |
| 920 static int | |
| 921 get_used_list_index (size_t size) | |
| 922 { | |
| 923 if (size <= USED_LIST_MIN_OBJECT_SIZE) | |
| 3092 | 924 { |
| 925 // printf ("size %d -> index %d\n", size, 0); | |
| 926 return 0; | |
| 927 } | |
| 928 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD) | |
| 929 { | |
| 930 // printf ("size %d -> index %d\n", size, | |
| 931 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
| 932 // / USED_LIST_LIN_STEP) + 1); | |
| 933 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
| 934 / USED_LIST_LIN_STEP) + 1; | |
| 935 } | |
| 936 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1); | |
| 2720 | 937 return N_USED_PAGE_LISTS - 1; |
| 938 } | |
| 939 | |
| 940 /* Returns the size of the used heap list according to given index. */ | |
| 941 static size_t | |
| 942 get_used_list_size_value (int used_index) | |
| 943 { | |
| 944 if (used_index < N_USED_PAGE_LISTS - 1) | |
| 945 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; | |
| 946 return 0; | |
| 947 } | |
| 948 | |
| 949 | |
| 950 /* Returns the index of the free heap list according to given size. */ | |
| 3092 | 951 static EMACS_INT |
| 2720 | 952 get_free_list_index (EMACS_INT n_pages) |
| 953 { | |
| 954 if (n_pages == 0) | |
| 955 return 0; | |
| 956 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) | |
| 957 return n_pages - 1; | |
| 958 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1) | |
| 959 return N_FREE_PAGE_LISTS - 1; | |
| 960 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1) | |
| 961 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
| 962 | |
| 963 } | |
| 964 | |
| 965 | |
| 966 /* Returns the size in number of pages of the given free list at index. */ | |
| 967 static size_t | |
| 3092 | 968 get_free_list_size_value (EMACS_INT free_index) |
| 2720 | 969 { |
| 970 if (free_index < FREE_LIST_LOWER_THRESHOLD) | |
| 971 return free_index + 1; | |
| 972 if (free_index >= N_FREE_PAGE_LISTS) | |
| 973 return FREE_LIST_UPPER_THRESHOLD; | |
| 974 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD) | |
| 975 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
| 976 } | |
| 977 | |
| 978 | |
| 979 #ifdef MEMORY_USAGE_STATS | |
| 980 Bytecount | |
| 981 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats) | |
| 982 { | |
| 983 size_t used_size = | |
| 984 get_used_list_size_value (get_used_list_index (claimed_size)); | |
| 985 if (used_size == 0) | |
| 986 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size)); | |
| 987 | |
| 988 if (stats) | |
| 989 { | |
| 990 stats->was_requested += claimed_size; | |
| 991 stats->malloc_overhead += used_size - claimed_size; | |
| 992 } | |
| 993 | |
| 994 return used_size; | |
| 995 } | |
| 996 #endif /* not MEMORY_USAGE_STATS */ | |
| 997 | |
| 998 | |
| 999 | |
| 1000 /*--- free heap functions ----------------------------------------------*/ | |
| 1001 | |
| 1002 /* Frees a heap section, if the heap_section is completly free */ | |
| 1003 static EMACS_INT | |
| 1004 free_heap_section (page_header *ph) | |
| 1005 { | |
| 3092 | 1006 EMACS_INT i; |
| 1007 EMACS_INT removed = 0; | |
| 2720 | 1008 for (i = 0; i < N_HEAP_SECTIONS; i++) |
| 1009 if (!removed) | |
| 1010 { | |
| 1011 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) | |
| 1012 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) | |
| 1013 { | |
| 1014 xfree_1 (HEAP_SECTION(i).real_start); | |
| 1015 #ifdef MEMORY_USAGE_STATS | |
| 1016 MC_MALLOCED_BYTES | |
| 1017 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
| 1018 #endif | |
| 1019 | |
| 1020 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE; | |
| 1021 | |
| 1022 removed = 1; | |
| 1023 } | |
| 1024 } | |
| 1025 else | |
| 1026 { | |
| 1027 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start; | |
| 1028 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size; | |
| 1029 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start; | |
| 1030 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages; | |
| 1031 } | |
| 1032 | |
| 1033 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed; | |
| 1034 | |
| 1035 return removed; | |
| 1036 } | |
| 1037 | |
| 1038 /* Removes page from free list. */ | |
| 1039 static void | |
| 1040 remove_page_from_free_list (page_header *ph) | |
| 1041 { | |
| 1042 remove_page_header_from_plh (ph, PH_PLH (ph)); | |
| 1043 PH_PLH (ph) = 0; | |
| 1044 } | |
| 1045 | |
| 1046 | |
| 1047 /* Adds page to according free list. */ | |
| 1048 static void | |
| 1049 add_page_to_free_list (page_header *ph) | |
| 1050 { | |
| 1051 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph))); | |
| 1052 add_page_header_to_plh (ph, PH_PLH (ph)); | |
| 1053 } | |
| 1054 | |
| 1055 | |
| 1056 /* Merges two adjacent pages. */ | |
| 1057 static page_header * | |
| 1058 merge_pages (page_header *first_ph, page_header *second_ph) | |
| 1059 { | |
| 1060 /* merge */ | |
| 1061 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph); | |
| 1062 /* clean up left over page header */ | |
| 1063 free_page_header (second_ph); | |
| 1064 /* update lookup table */ | |
| 1065 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph)); | |
| 1066 | |
| 1067 return first_ph; | |
| 1068 } | |
| 1069 | |
| 1070 | |
| 1071 /* Checks if pages are adjacent, merges them, and adds merged page to | |
| 1072 free list */ | |
| 1073 static void | |
| 1074 merge_into_free_list (page_header *ph) | |
| 1075 { | |
| 1076 /* check if you can coalesce adjacent pages */ | |
| 1077 page_header *prev_ph = | |
| 1078 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
| 1079 - PAGE_SIZE)); | |
| 1080 page_header *succ_ph = | |
| 1081 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
| 1082 + (PH_N_PAGES (ph) * PAGE_SIZE))); | |
| 1083 if (PH_ON_FREE_LIST_P (prev_ph)) | |
| 1084 { | |
| 1085 remove_page_from_free_list (prev_ph); | |
| 1086 ph = merge_pages (prev_ph, ph); | |
| 1087 } | |
| 1088 if (PH_ON_FREE_LIST_P (succ_ph)) | |
| 1089 { | |
| 1090 remove_page_from_free_list (succ_ph); | |
| 1091 ph = merge_pages (ph, succ_ph); | |
| 1092 } | |
| 1093 /* try to free heap_section, if the section is complete */ | |
| 1094 if (!free_heap_section (ph)) | |
| 1095 /* else add merged page to free list */ | |
| 1096 add_page_to_free_list (ph); | |
| 1097 } | |
| 1098 | |
| 1099 | |
| 1100 /* Cuts given page header after n_pages, returns the first (cut) part, and | |
| 1101 puts the rest on the free list. */ | |
| 1102 static page_header * | |
| 1103 split_page (page_header *ph, EMACS_INT n_pages) | |
| 1104 { | |
| 1105 page_header *new_ph; | |
| 1106 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages; | |
| 1107 | |
| 1108 /* remove the page from the free list if already hooked in */ | |
| 1109 if (PH_PLH (ph)) | |
| 1110 remove_page_from_free_list (ph); | |
| 1111 /* set new number of pages */ | |
| 1112 PH_N_PAGES (ph) = n_pages; | |
| 1113 /* add new page to lookup table */ | |
| 1114 add_pages_to_lookup_table (ph, n_pages); | |
| 1115 | |
| 1116 if (rem_pages) | |
| 1117 { | |
| 1118 /* build new page with reminder */ | |
| 1119 new_ph = alloc_page_header (); | |
| 1120 PH_N_PAGES (new_ph) = rem_pages; | |
| 1121 PH_HEAP_SPACE (new_ph) = | |
| 1122 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE)); | |
| 1123 /* add new page to lookup table */ | |
| 1124 add_pages_to_lookup_table (new_ph, rem_pages); | |
| 1125 /* hook the rest into free list */ | |
| 1126 add_page_to_free_list (new_ph); | |
| 1127 } | |
| 1128 return ph; | |
| 1129 } | |
| 1130 | |
| 1131 | |
| 1132 /* Expands the heap by given number of pages. */ | |
| 1133 static page_header * | |
| 1134 expand_heap (EMACS_INT needed_pages) | |
| 1135 { | |
| 1136 page_header *ph; | |
| 1137 EMACS_INT n_pages; | |
| 1138 size_t real_size; | |
| 1139 void *real_start; | |
| 1140 | |
| 1141 /* determine number of pages the heap should grow */ | |
| 3305 | 1142 if (memory_shortage) |
| 1143 n_pages = needed_pages; | |
| 1144 else | |
| 1145 n_pages = max (MIN_HEAP_INCREASE, | |
| 1146 needed_pages | |
| 1147 + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR))); | |
| 2720 | 1148 |
| 1149 /* get the real values */ | |
| 1150 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE; | |
| 1151 real_start = xmalloc_and_zero (real_size); | |
| 1152 #ifdef MEMORY_USAGE_STATS | |
| 1153 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0); | |
| 1154 #endif | |
| 1155 | |
| 1156 /* maintain heap section count */ | |
| 1157 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS) | |
| 1158 { | |
| 1159 stderr_out ("Increase number of MAX_HEAP_SECTS"); | |
| 1160 ABORT (); | |
| 1161 } | |
| 1162 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start; | |
| 1163 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size; | |
| 1164 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start); | |
| 1165 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages; | |
| 1166 N_HEAP_SECTIONS ++; | |
| 1167 | |
| 1168 /* get page header */ | |
| 1169 ph = alloc_page_header (); | |
| 1170 | |
| 1171 /* setup page header */ | |
| 1172 PH_N_PAGES (ph) = n_pages; | |
| 1173 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start); | |
| 1174 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0); | |
| 1175 HEAP_SIZE += n_pages * PAGE_SIZE; | |
| 1176 | |
| 1177 /* this was also done by allocate_lisp_storage */ | |
| 1178 if (need_to_check_c_alloca) | |
| 1179 xemacs_c_alloca (0); | |
| 1180 | |
| 1181 /* return needed size, put rest on free list */ | |
| 1182 return split_page (ph, needed_pages); | |
| 1183 } | |
| 1184 | |
| 1185 | |
| 1186 | |
| 1187 | |
| 1188 /*--- used heap functions ----------------------------------------------*/ | |
| 1189 /* Installs initial free list. */ | |
| 1190 static void | |
| 3092 | 1191 install_cell_free_list (page_header *ph, EMACS_INT elemcount) |
| 2720 | 1192 { |
| 3092 | 1193 Rawbyte *p; |
| 1194 EMACS_INT i; | |
| 2720 | 1195 EMACS_INT cell_size = PH_CELL_SIZE (ph); |
| 1196 /* write initial free list if cell_size is < PAGE_SIZE */ | |
| 3092 | 1197 p = (Rawbyte *) PH_HEAP_SPACE (ph); |
| 2720 | 1198 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) |
| 1199 { | |
| 1200 #ifdef ERROR_CHECK_GC | |
| 1201 assert (!LRECORD_FREE_P (p)); | |
| 1202 MARK_LRECORD_AS_FREE (p); | |
| 1203 #endif | |
| 3092 | 1204 if (elemcount == 1) |
| 1205 NEXT_FREE (p) = FREE_LIST (p + cell_size); | |
| 2720 | 1206 set_lookup_table (p, ph); |
| 3092 | 1207 p += cell_size; |
| 2720 | 1208 } |
| 1209 #ifdef ERROR_CHECK_GC | |
| 1210 assert (!LRECORD_FREE_P (p)); | |
| 1211 MARK_LRECORD_AS_FREE (p); | |
| 1212 #endif | |
| 1213 NEXT_FREE (p) = 0; | |
| 1214 set_lookup_table (p, ph); | |
| 1215 | |
| 1216 /* hook free list into header */ | |
| 1217 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph)); | |
| 1218 } | |
| 1219 | |
| 1220 | |
| 1221 /* Cleans the object space of the given page_header. */ | |
| 1222 static void | |
| 1223 remove_cell_free_list (page_header *ph) | |
| 1224 { | |
| 1225 #if ZERO_MEM | |
| 1226 ZERO_HEAP_SPACE (ph); | |
| 1227 #endif | |
| 1228 PH_FREE_LIST (ph) = 0; | |
| 1229 } | |
| 1230 | |
| 1231 | |
| 1232 /* Installs a new page and hooks it into given page_list_header. */ | |
| 1233 static page_header * | |
| 1234 install_page_in_used_list (page_header *ph, page_list_header *plh, | |
| 3092 | 1235 size_t size, EMACS_INT elemcount) |
| 2720 | 1236 { |
| 1237 /* add to list */ | |
| 1238 add_page_header_to_plh (ph, plh); | |
| 1239 | |
| 1240 /* determine cell size */ | |
| 1241 if (PLH_SIZE (plh)) | |
| 1242 PH_CELL_SIZE (ph) = PLH_SIZE (plh); | |
| 1243 else | |
| 1244 PH_CELL_SIZE (ph) = size; | |
| 3092 | 1245 if (elemcount == 1) |
| 1246 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); | |
| 1247 else | |
| 1248 { | |
| 1249 PH_CELLS_ON_PAGE (ph) = elemcount; | |
| 1250 PH_ARRAY_BIT (ph) = 1; | |
| 1251 } | |
| 2720 | 1252 |
| 1253 /* init cell count */ | |
| 1254 PH_CELLS_USED (ph) = 0; | |
| 1255 | |
| 1256 /* install mark bits and initialize cell free list */ | |
| 3092 | 1257 install_mark_bits (ph); |
| 2720 | 1258 |
| 3092 | 1259 install_cell_free_list (ph, elemcount); |
| 2720 | 1260 |
| 1261 #ifdef MEMORY_USAGE_STATS | |
| 1262 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); | |
| 1263 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); | |
| 1264 #endif | |
| 1265 | |
| 1266 return ph; | |
| 1267 } | |
| 1268 | |
| 1269 | |
| 1270 /* Cleans and frees a page, identified by the given page_header. */ | |
| 1271 static void | |
| 1272 remove_page_from_used_list (page_header *ph) | |
| 1273 { | |
| 1274 page_list_header *plh = PH_PLH (ph); | |
| 1275 | |
|
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1276 assert (!(gc_in_progress && PH_PROTECTION_BIT (ph))); |
| 3092 | 1277 /* cleanup: remove memory protection, zero page_header bits. */ |
| 1278 | |
| 2720 | 1279 #ifdef MEMORY_USAGE_STATS |
| 1280 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | |
| 1281 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | |
| 1282 #endif | |
| 1283 | |
| 1284 /* clean up mark bits and cell free list */ | |
| 1285 remove_cell_free_list (ph); | |
| 1286 if (PH_ON_USED_LIST_P (ph)) | |
| 1287 remove_mark_bits (ph); | |
| 1288 | |
| 1289 /* clean up page header */ | |
| 1290 PH_CELL_SIZE (ph) = 0; | |
| 1291 PH_CELLS_ON_PAGE (ph) = 0; | |
| 1292 PH_CELLS_USED (ph) = 0; | |
| 1293 | |
| 1294 /* remove from used list */ | |
| 1295 remove_page_header_from_plh (ph, plh); | |
| 1296 | |
| 1297 /* move to free list */ | |
| 1298 merge_into_free_list (ph); | |
| 1299 } | |
| 1300 | |
| 1301 | |
| 1302 | |
| 1303 | |
| 1304 /*--- allocation -------------------------------------------------------*/ | |
| 1305 | |
| 1306 /* Allocates from cell free list on already allocated pages. */ | |
| 1307 static page_header * | |
| 1308 allocate_cell (page_list_header *plh) | |
| 1309 { | |
| 1310 page_header *ph = PLH_FIRST (plh); | |
| 1311 if (ph) | |
| 1312 { | |
| 1313 if (PH_FREE_LIST (ph)) | |
| 1314 /* elements free on first page */ | |
| 1315 return ph; | |
| 1316 else if ((PH_NEXT (ph)) | |
| 1317 && (PH_FREE_LIST (PH_NEXT (ph)))) | |
| 1318 /* elements free on second page */ | |
| 1319 { | |
| 1320 page_header *temp = PH_NEXT (ph); | |
| 1321 /* move full page (first page) to end of list */ | |
| 1322 PH_NEXT (PLH_LAST (plh)) = ph; | |
| 1323 PH_PREV (ph) = PLH_LAST (plh); | |
| 1324 PLH_LAST (plh) = ph; | |
| 1325 PH_NEXT (ph) = 0; | |
| 1326 /* install second page as first page */ | |
| 1327 ph = temp; | |
| 1328 PH_PREV (ph) = 0; | |
| 1329 PLH_FIRST (plh) = ph; | |
| 1330 return ph; | |
| 1331 } | |
| 1332 } | |
| 1333 return 0; | |
| 1334 } | |
| 1335 | |
| 1336 | |
| 1337 /* Finds a page which has at least the needed number of pages. | |
| 1338 Algorithm: FIRST FIT. */ | |
| 1339 static page_header * | |
| 1340 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph) | |
| 1341 { | |
| 1342 while (ph) | |
| 1343 { | |
| 1344 if (PH_N_PAGES (ph) >= needed_pages) | |
| 1345 return ph; | |
| 1346 ph = PH_NEXT (ph); | |
| 1347 } | |
| 1348 return 0; | |
| 1349 } | |
| 1350 | |
| 1351 | |
| 1352 /* Allocates a page from the free list. */ | |
| 1353 static page_header * | |
| 1354 allocate_page_from_free_list (EMACS_INT needed_pages) | |
| 1355 { | |
| 1356 page_header *ph = 0; | |
| 3092 | 1357 EMACS_INT i; |
| 2720 | 1358 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) |
| 1359 if ((ph = find_free_page_first_fit (needed_pages, | |
| 1360 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) | |
| 1361 { | |
| 1362 if (PH_N_PAGES (ph) > needed_pages) | |
| 1363 return split_page (ph, needed_pages); | |
| 1364 else | |
| 1365 { | |
| 1366 remove_page_from_free_list (ph); | |
| 1367 return ph; | |
| 1368 } | |
| 1369 } | |
| 1370 return 0; | |
| 1371 } | |
| 1372 | |
| 1373 | |
| 1374 /* Allocates a new page, either from free list or by expanding the heap. */ | |
| 1375 static page_header * | |
| 3092 | 1376 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount) |
| 2720 | 1377 { |
| 3092 | 1378 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount); |
| 2720 | 1379 /* first check free list */ |
| 1380 page_header *result = allocate_page_from_free_list (needed_pages); | |
| 1381 if (!result) | |
| 1382 /* expand heap */ | |
| 1383 result = expand_heap (needed_pages); | |
| 3092 | 1384 install_page_in_used_list (result, plh, size, elemcount); |
| 2720 | 1385 return result; |
| 1386 } | |
| 1387 | |
| 1388 | |
| 1389 /* Selects the correct size class, tries to allocate a cell of this size | |
| 1390 from the free list, if this fails, a new page is allocated. */ | |
| 1391 static void * | |
| 3092 | 1392 mc_alloc_1 (size_t size, EMACS_INT elemcount) |
| 2720 | 1393 { |
| 1394 page_list_header *plh = 0; | |
| 2723 | 1395 page_header *ph = 0; |
| 1396 void *result = 0; | |
| 1397 | |
| 3092 | 1398 plh = USED_HEAP_PAGES (get_used_list_index (size)); |
| 2720 | 1399 |
| 1400 if (size == 0) | |
| 1401 return 0; | |
| 3092 | 1402 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2)) |
| 2720 | 1403 /* first check any free cells */ |
| 1404 ph = allocate_cell (plh); | |
| 1405 if (!ph) | |
| 1406 /* allocate a new page */ | |
| 3092 | 1407 ph = allocate_new_page (plh, size, elemcount); |
| 2720 | 1408 |
| 1409 /* return first element of free list and remove it from the list */ | |
| 1410 result = (void*) PH_FREE_LIST (ph); | |
| 1411 PH_FREE_LIST (ph) = | |
| 1412 NEXT_FREE (PH_FREE_LIST (ph)); | |
| 1413 | |
| 3092 | 1414 memset (result, '\0', (size * elemcount)); |
| 1415 MARK_LRECORD_AS_FREE (result); | |
| 2720 | 1416 |
| 1417 /* bump used cells counter */ | |
| 3092 | 1418 PH_CELLS_USED (ph) += elemcount; |
| 2720 | 1419 |
| 1420 #ifdef MEMORY_USAGE_STATS | |
| 3092 | 1421 PLH_USED_CELLS (plh) += elemcount; |
| 1422 PLH_USED_SPACE (plh) += size * elemcount; | |
| 2720 | 1423 #endif |
| 1424 | |
| 1425 return result; | |
| 1426 } | |
| 1427 | |
| 3092 | 1428 /* Array allocation. */ |
| 1429 void * | |
| 1430 mc_alloc_array (size_t size, EMACS_INT elemcount) | |
| 1431 { | |
| 1432 return mc_alloc_1 (size, elemcount); | |
| 1433 } | |
| 1434 | |
| 2720 | 1435 void * |
| 1436 mc_alloc (size_t size) | |
| 1437 { | |
| 1438 return mc_alloc_1 (size, 1); | |
| 1439 } | |
| 1440 | |
| 1441 | |
| 1442 | |
| 1443 /*--- sweep & free & finalize-------------------------------------------*/ | |
| 1444 | |
| 1445 /* Frees a heap pointer. */ | |
| 1446 static void | |
| 1447 remove_cell (void *ptr, page_header *ph) | |
| 1448 { | |
| 1449 #ifdef MEMORY_USAGE_STATS | |
| 1450 PLH_USED_CELLS (PH_PLH (ph))--; | |
| 1451 if (PH_ON_USED_LIST_P (ph)) | |
| 1452 PLH_USED_SPACE (PH_PLH (ph)) -= | |
| 1453 detagged_lisp_object_size ((const struct lrecord_header *) ptr); | |
| 1454 else | |
| 1455 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph); | |
| 1456 #endif | |
| 2775 | 1457 if (PH_ON_USED_LIST_P (ph)) |
| 1458 { | |
| 2994 | 1459 #ifdef ALLOC_TYPE_STATS |
| 2775 | 1460 dec_lrecord_stats (PH_CELL_SIZE (ph), |
| 1461 (const struct lrecord_header *) ptr); | |
| 2994 | 1462 #endif /* ALLOC_TYPE_STATS */ |
| 2775 | 1463 #ifdef ERROR_CHECK_GC |
| 1464 assert (!LRECORD_FREE_P (ptr)); | |
| 1465 deadbeef_memory (ptr, PH_CELL_SIZE (ph)); | |
| 1466 MARK_LRECORD_AS_FREE (ptr); | |
| 2720 | 1467 #endif |
| 2775 | 1468 } |
| 2720 | 1469 |
| 1470 /* hooks cell into free list */ | |
| 1471 NEXT_FREE (ptr) = PH_FREE_LIST (ph); | |
| 1472 PH_FREE_LIST (ph) = FREE_LIST (ptr); | |
| 1473 /* decrease cells used */ | |
| 1474 PH_CELLS_USED (ph)--; | |
| 1475 } | |
| 1476 | |
| 1477 | |
| 1478 /* Mark free list marks all free list entries. */ | |
| 1479 static void | |
| 1480 mark_free_list (page_header *ph) | |
| 1481 { | |
| 1482 free_link *fl = PH_FREE_LIST (ph); | |
| 1483 while (fl) | |
| 1484 { | |
| 3092 | 1485 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
| 2720 | 1486 fl = NEXT_FREE (fl); |
| 1487 } | |
| 1488 } | |
| 1489 | |
| 1490 | |
| 1491 /* Finalize a page for disksave. XEmacs calls this routine before it | |
| 1492 dumps the heap image. You have to tell mc-alloc how to call your | |
| 1493 object's finalizer for disksave. Therefore, you have to define the | |
| 1494 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should | |
| 1495 do nothing else then test if there is a finalizer and call it on | |
| 3303 | 1496 the given argument, which is the heap address of the object. |
| 1497 Returns number of processed pages. */ | |
| 1498 static EMACS_INT | |
| 2720 | 1499 finalize_page_for_disksave (page_header *ph) |
| 1500 { | |
| 1501 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
| 1502 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
| 1503 EMACS_INT mark_bit = 0; | |
| 1504 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
| 1505 | |
| 1506 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
| 1507 { | |
| 1508 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
| 1509 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | |
| 1510 } | |
| 3303 | 1511 return 1; |
| 2720 | 1512 } |
| 1513 | |
| 1514 | |
| 3303 | 1515 /* Finalizes the heap for disksave. Returns number of processed |
| 1516 pages. */ | |
| 1517 EMACS_INT | |
| 2720 | 1518 mc_finalize_for_disksave (void) |
| 1519 { | |
| 3303 | 1520 return visit_all_used_page_headers (finalize_page_for_disksave); |
| 2720 | 1521 } |
| 1522 | |
| 1523 | |
| 3303 | 1524 /* Sweeps a page: all the non-marked cells are freed. If the page is |
| 1525 empty in the end, it is removed. If some cells are free, it is | |
| 1526 moved to the front of its page header list. Full pages stay where | |
| 1527 they are. Returns number of processed pages.*/ | |
| 1528 static EMACS_INT | |
| 2720 | 1529 sweep_page (page_header *ph) |
| 1530 { | |
| 3092 | 1531 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); |
| 2720 | 1532 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); |
| 1533 EMACS_INT mark_bit = 0; | |
| 1534 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
| 3092 | 1535 unsigned int bit = 0; |
| 2720 | 1536 |
| 1537 mark_free_list (ph); | |
| 1538 | |
| 3092 | 1539 /* ARRAY_BIT_HACK */ |
| 1540 if (PH_ARRAY_BIT (ph)) | |
| 1541 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
| 1542 { | |
| 1543 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
| 1544 if (bit) | |
| 1545 { | |
| 1546 zero_mark_bits (ph); | |
| 1547 PH_BLACK_BIT (ph) = 0; | |
| 3303 | 1548 return 1; |
| 3092 | 1549 } |
| 1550 } | |
| 1551 | |
| 2720 | 1552 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
| 1553 { | |
| 3092 | 1554 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
| 1555 if (bit == WHITE) | |
| 2720 | 1556 { |
| 3092 | 1557 GC_STAT_FREED; |
| 2720 | 1558 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
| 1559 } | |
| 1560 } | |
| 1561 zero_mark_bits (ph); | |
| 3092 | 1562 PH_BLACK_BIT (ph) = 0; |
| 2720 | 1563 if (PH_CELLS_USED (ph) == 0) |
| 1564 remove_page_from_used_list (ph); | |
| 1565 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
| 1566 move_page_header_to_front (ph); | |
| 3303 | 1567 |
| 1568 return 1; | |
| 2720 | 1569 } |
| 1570 | |
| 1571 | |
| 3303 | 1572 /* Sweeps the heap. Returns number of processed pages. */ |
| 1573 EMACS_INT | |
| 2720 | 1574 mc_sweep (void) |
| 1575 { | |
| 3303 | 1576 return visit_all_used_page_headers (sweep_page); |
| 2720 | 1577 } |
| 1578 | |
| 1579 | |
| 1580 /* Changes the size of the cell pointed to by ptr. | |
| 1581 Returns the new address of the new cell with new size. */ | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1582 static void * |
| 3092 | 1583 mc_realloc_1 (void *ptr, size_t size, int elemcount) |
| 2720 | 1584 { |
| 1585 if (ptr) | |
| 1586 { | |
| 3092 | 1587 if (size * elemcount) |
| 2720 | 1588 { |
| 3092 | 1589 void *result = mc_alloc_1 (size, elemcount); |
| 2720 | 1590 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); |
| 3092 | 1591 size_t cpy_size = size * elemcount; |
| 1592 if (cpy_size > from_size) | |
| 2720 | 1593 cpy_size = from_size; |
| 1594 memcpy (result, ptr, cpy_size); | |
| 3092 | 1595 #ifdef ALLOC_TYPE_STATS |
| 1596 inc_lrecord_stats (size, (struct lrecord_header *) result); | |
| 1597 #endif /* not ALLOC_TYPE_STATS */ | |
| 2720 | 1598 return result; |
| 1599 } | |
| 1600 else | |
| 1601 { | |
| 1602 return 0; | |
| 1603 } | |
| 1604 } | |
| 1605 else | |
| 3092 | 1606 return mc_alloc_1 (size, elemcount); |
| 2720 | 1607 } |
| 1608 | |
| 1609 void * | |
| 1610 mc_realloc (void *ptr, size_t size) | |
| 1611 { | |
| 1612 return mc_realloc_1 (ptr, size, 1); | |
| 1613 } | |
| 1614 | |
| 1615 void * | |
| 3092 | 1616 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount) |
| 2720 | 1617 { |
| 3092 | 1618 return mc_realloc_1 (ptr, size, elemcount); |
| 2720 | 1619 } |
| 1620 | |
| 1621 | |
| 1622 | |
| 1623 /*--- initialization ---------------------------------------------------*/ | |
| 1624 | |
| 1625 /* Call once at the very beginning. */ | |
| 1626 void | |
| 1627 init_mc_allocator (void) | |
| 1628 { | |
| 3092 | 1629 EMACS_INT i; |
| 1630 | |
| 1631 #ifdef MEMORY_USAGE_STATS | |
| 1632 MC_MALLOCED_BYTES = 0; | |
| 1633 #endif | |
| 2720 | 1634 |
| 3092 | 1635 /* init of pagesize dependent values */ |
| 1636 switch (SYS_PAGE_SIZE) | |
| 1637 { | |
| 1638 case 512: log_page_size = 9; break; | |
| 1639 case 1024: log_page_size = 10; break; | |
| 1640 case 2048: log_page_size = 11; break; | |
| 1641 case 4096: log_page_size = 12; break; | |
| 1642 case 8192: log_page_size = 13; break; | |
| 1643 case 16384: log_page_size = 14; break; | |
| 3212 | 1644 case 32768: log_page_size = 15; break; |
| 1645 case 65536: log_page_size = 16; break; | |
| 1646 default: | |
| 1647 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n", | |
| 1648 SYS_PAGE_SIZE); | |
| 1649 ABORT (); | |
| 3092 | 1650 } |
| 1651 | |
| 4125 | 1652 page_size_div_2 = (EMACS_UINT) SYS_PAGE_SIZE >> 1; |
| 3092 | 1653 |
| 1654 mc_allocator_globals.used_heap_pages = | |
| 1655 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1) | |
| 1656 * sizeof (page_list_header)); | |
| 1657 #ifdef MEMORY_USAGE_STATS | |
| 1658 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header); | |
| 1659 #endif | |
| 1660 | |
| 1661 mc_allocator_globals.ptr_lookup_table = | |
| 1662 (level_2_lookup_tree **) | |
| 1663 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *)); | |
| 1664 #ifdef MEMORY_USAGE_STATS | |
| 1665 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *); | |
| 1666 #endif | |
| 1667 | |
| 1668 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER | |
| 1669 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype); | |
| 1670 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
| 2932 | 1671 |
| 2720 | 1672 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
| 1673 { | |
| 1674 page_list_header *plh = USED_HEAP_PAGES (i); | |
| 1675 PLH_LIST_TYPE (plh) = USED_LIST; | |
| 1676 PLH_SIZE (plh) = get_used_list_size_value (i); | |
| 1677 PLH_FIRST (plh) = 0; | |
| 1678 PLH_LAST (plh) = 0; | |
| 1679 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
| 1680 #ifdef MEMORY_USAGE_STATS | |
| 1681 PLH_PAGE_COUNT (plh) = 0; | |
| 1682 PLH_USED_CELLS (plh) = 0; | |
| 1683 PLH_USED_SPACE (plh) = 0; | |
| 1684 PLH_TOTAL_CELLS (plh) = 0; | |
| 1685 PLH_TOTAL_SPACE (plh) = 0; | |
| 1686 #endif | |
| 1687 } | |
| 1688 | |
| 1689 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
| 1690 { | |
| 1691 page_list_header *plh = FREE_HEAP_PAGES (i); | |
| 1692 PLH_LIST_TYPE (plh) = FREE_LIST; | |
| 1693 PLH_SIZE (plh) = get_free_list_size_value (i); | |
| 1694 PLH_FIRST (plh) = 0; | |
| 1695 PLH_LAST (plh) = 0; | |
| 1696 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
| 1697 #ifdef MEMORY_USAGE_STATS | |
| 1698 PLH_PAGE_COUNT (plh) = 0; | |
| 1699 PLH_USED_CELLS (plh) = 0; | |
| 1700 PLH_USED_SPACE (plh) = 0; | |
| 1701 PLH_TOTAL_CELLS (plh) = 0; | |
| 1702 PLH_TOTAL_SPACE (plh) = 0; | |
| 1703 #endif | |
| 1704 } | |
| 1705 | |
| 3092 | 1706 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
| 2720 | 1707 PAGE_HEADER_FREE_LIST = 0; |
| 3092 | 1708 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
| 2720 | 1709 |
| 1710 #ifdef MEMORY_USAGE_STATS | |
| 3092 | 1711 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals); |
| 2720 | 1712 #endif |
| 1713 | |
| 1714 init_lookup_table (); | |
| 1715 } | |
| 1716 | |
| 1717 | |
| 1718 | |
| 1719 | |
| 1720 /*--- lisp function for statistics -------------------------------------*/ | |
| 1721 | |
| 1722 #ifdef MEMORY_USAGE_STATS | |
| 1723 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /* | |
| 1724 Returns stats about the mc-alloc memory usage. See diagnose.el. | |
| 1725 */ | |
| 1726 ()) | |
| 1727 { | |
| 1728 Lisp_Object free_plhs = Qnil; | |
| 1729 Lisp_Object used_plhs = Qnil; | |
| 1730 Lisp_Object heap_sects = Qnil; | |
| 3092 | 1731 EMACS_INT used_size = 0; |
| 1732 EMACS_INT real_size = 0; | |
| 2720 | 1733 |
| 3092 | 1734 EMACS_INT i; |
| 2720 | 1735 |
| 1736 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
| 1737 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) | |
| 1738 free_plhs = | |
| 1739 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), | |
| 1740 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), | |
| 1741 free_plhs); | |
| 1742 | |
| 1743 for (i = 0; i < N_USED_PAGE_LISTS; i++) | |
| 1744 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) | |
| 1745 used_plhs = | |
| 1746 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), | |
| 1747 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))), | |
| 1748 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))), | |
| 1749 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))), | |
| 1750 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))), | |
| 1751 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))), | |
| 1752 used_plhs); | |
| 1753 | |
| 1754 for (i = 0; i < N_HEAP_SECTIONS; i++) { | |
| 1755 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE; | |
| 1756 real_size += | |
| 1757 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
| 1758 } | |
| 1759 | |
| 1760 heap_sects = | |
| 1761 list3 (make_int (N_HEAP_SECTIONS), | |
| 1762 make_int (used_size), | |
| 1763 make_int (real_size)); | |
| 1764 | |
| 1765 return Fcons (make_int (PAGE_SIZE), | |
| 3092 | 1766 list5 (heap_sects, |
| 2720 | 1767 Fnreverse (used_plhs), |
| 1768 Fnreverse (free_plhs), | |
| 1769 make_int (sizeof (mc_allocator_globals)), | |
| 1770 make_int (MC_MALLOCED_BYTES))); | |
| 1771 } | |
| 1772 #endif /* MEMORY_USAGE_STATS */ | |
| 1773 | |
| 1774 void | |
| 1775 syms_of_mc_alloc (void) | |
| 1776 { | |
| 1777 #ifdef MEMORY_USAGE_STATS | |
| 1778 DEFSUBR (Fmc_alloc_memory_usage); | |
| 1779 #endif /* MEMORY_USAGE_STATS */ | |
| 1780 } | |
| 3092 | 1781 |
| 1782 | |
| 1783 /*--- incremental garbage collector ----------------------------------*/ | |
| 1784 | |
| 5054 | 1785 #if 0 /* currently unused */ |
| 1786 | |
| 3092 | 1787 /* access dirty bit of page header */ |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1788 static void |
| 3092 | 1789 set_dirty_bit (page_header *ph, unsigned int value) |
| 1790 { | |
| 1791 PH_DIRTY_BIT (ph) = value; | |
| 1792 } | |
| 1793 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1794 static void |
| 3092 | 1795 set_dirty_bit_for_address (void *ptr, unsigned int value) |
| 1796 { | |
| 1797 set_dirty_bit (get_page_header (ptr), value); | |
| 1798 } | |
| 1799 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1800 static unsigned int |
| 3092 | 1801 get_dirty_bit (page_header *ph) |
| 1802 { | |
| 1803 return PH_DIRTY_BIT (ph); | |
| 1804 } | |
| 1805 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1806 static unsigned int |
| 3092 | 1807 get_dirty_bit_for_address (void *ptr) |
| 1808 { | |
| 1809 return get_dirty_bit (get_page_header (ptr)); | |
| 1810 } | |
| 1811 | |
| 1812 | |
| 1813 /* access protection bit of page header */ | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1814 static void |
| 3092 | 1815 set_protection_bit (page_header *ph, unsigned int value) |
| 1816 { | |
| 1817 PH_PROTECTION_BIT (ph) = value; | |
| 1818 } | |
| 1819 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1820 static void |
| 3092 | 1821 set_protection_bit_for_address (void *ptr, unsigned int value) |
| 1822 { | |
| 1823 set_protection_bit (get_page_header (ptr), value); | |
| 1824 } | |
| 1825 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1826 static unsigned int |
| 3092 | 1827 get_protection_bit (page_header *ph) |
| 1828 { | |
| 1829 return PH_PROTECTION_BIT (ph); | |
| 1830 } | |
| 1831 | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1832 static unsigned int |
| 3092 | 1833 get_protection_bit_for_address (void *ptr) |
| 1834 { | |
| 1835 return get_protection_bit (get_page_header (ptr)); | |
| 1836 } | |
| 1837 | |
| 1838 | |
| 1839 /* Returns the start of the page of the object pointed to by ptr. */ | |
|
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1840 static void * |
| 3092 | 1841 get_page_start (void *ptr) |
| 1842 { | |
| 1843 return PH_HEAP_SPACE (get_page_header (ptr)); | |
| 1844 } | |
| 1845 | |
| 5054 | 1846 #endif /* 0 */ |
| 1847 | |
| 3092 | 1848 /* Make PAGE_SIZE globally available. */ |
| 1849 EMACS_INT | |
| 1850 mc_get_page_size () | |
| 1851 { | |
| 1852 return PAGE_SIZE; | |
| 1853 } | |
| 1854 | |
| 1855 /* Is the fault at ptr on a protected page? */ | |
| 1856 EMACS_INT | |
| 1857 fault_on_protected_page (void *ptr) | |
| 1858 { | |
| 1859 page_header *ph = get_page_header_internal (ptr); | |
| 1860 return (ph | |
| 1861 && PH_HEAP_SPACE (ph) | |
| 1862 && (PH_HEAP_SPACE (ph) <= ptr) | |
| 1863 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph) | |
| 1864 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr) | |
| 1865 && (PH_PROTECTION_BIT (ph) == 1)); | |
| 1866 } | |
| 1867 | |
| 1868 | |
| 1869 /* Protect the heap page of given page header ph if black objects are | |
| 3303 | 1870 on the page. Returns number of processed pages. */ |
| 1871 static EMACS_INT | |
| 3092 | 1872 protect_heap_page (page_header *ph) |
| 1873 { | |
| 1874 if (PH_BLACK_BIT (ph)) | |
| 1875 { | |
| 1876 void *heap_space = PH_HEAP_SPACE (ph); | |
| 1877 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
| 1878 vdb_protect ((void *) heap_space, heap_space_size); | |
| 1879 PH_PROTECTION_BIT (ph) = 1; | |
| 3303 | 1880 return 1; |
| 3092 | 1881 } |
| 3303 | 1882 return 0; |
| 3092 | 1883 } |
| 1884 | |
| 3303 | 1885 /* Protect all heap pages with black objects. Returns number of |
| 1886 processed pages.*/ | |
| 1887 EMACS_INT | |
| 3092 | 1888 protect_heap_pages (void) |
| 1889 { | |
| 3303 | 1890 return visit_all_used_page_headers (protect_heap_page); |
| 3092 | 1891 } |
| 1892 | |
| 1893 | |
| 1894 /* Remove protection (if there) of heap page of given page header | |
| 3303 | 1895 ph. Returns number of processed pages. */ |
| 1896 static EMACS_INT | |
| 3092 | 1897 unprotect_heap_page (page_header *ph) |
| 1898 { | |
| 1899 if (PH_PROTECTION_BIT (ph)) | |
| 1900 { | |
| 1901 void *heap_space = PH_HEAP_SPACE (ph); | |
| 1902 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
| 1903 vdb_unprotect (heap_space, heap_space_size); | |
| 1904 PH_PROTECTION_BIT (ph) = 0; | |
| 3303 | 1905 return 1; |
| 3092 | 1906 } |
| 3303 | 1907 return 0; |
| 3092 | 1908 } |
| 1909 | |
| 3303 | 1910 /* Remove protection for all heap pages which are protected. Returns |
| 1911 number of processed pages. */ | |
| 1912 EMACS_INT | |
| 3092 | 1913 unprotect_heap_pages (void) |
| 1914 { | |
| 3303 | 1915 return visit_all_used_page_headers (unprotect_heap_page); |
| 3092 | 1916 } |
| 1917 | |
| 1918 /* Remove protection and mark page dirty. */ | |
| 1919 void | |
| 1920 unprotect_page_and_mark_dirty (void *ptr) | |
| 1921 { | |
| 1922 page_header *ph = get_page_header (ptr); | |
| 1923 unprotect_heap_page (ph); | |
| 1924 PH_DIRTY_BIT (ph) = 1; | |
| 1925 } | |
| 1926 | |
| 1927 /* Repush all objects on dirty pages onto the mark stack. */ | |
| 1928 int | |
| 1929 repush_all_objects_on_page (void *ptr) | |
| 1930 { | |
| 1931 int repushed_objects = 0; | |
| 1932 page_header *ph = get_page_header (ptr); | |
| 1933 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); | |
| 1934 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
| 1935 EMACS_INT mark_bit = 0; | |
| 1936 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
| 1937 unsigned int bit = 0; | |
| 1938 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
| 1939 { | |
| 1940 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
| 1941 if (bit == BLACK) | |
| 1942 { | |
| 1943 repushed_objects++; | |
| 1944 gc_write_barrier | |
| 1945 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit)))); | |
| 1946 } | |
| 1947 } | |
| 1948 PH_BLACK_BIT (ph) = 0; | |
| 1949 PH_DIRTY_BIT (ph) = 0; | |
| 1950 return repushed_objects; | |
| 1951 } | |
| 1952 | |
| 1953 /* Mark black if object is currently grey. This first checks, if the | |
| 1954 object is really allocated on the mc-heap. If it is, it can be | |
| 1955 marked black; if it is not, it cannot be marked. */ | |
| 1956 EMACS_INT | |
| 1957 maybe_mark_black (void *ptr) | |
| 1958 { | |
| 1959 page_header *ph = get_page_header_internal (ptr); | |
| 1960 unsigned int bit = 0; | |
| 1961 | |
| 1962 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph)) | |
| 1963 { | |
| 1964 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
| 1965 if (bit == GREY) | |
| 1966 { | |
| 1967 if (!PH_BLACK_BIT (ph)) | |
| 1968 PH_BLACK_BIT (ph) = 1; | |
| 1969 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK); | |
| 1970 } | |
| 1971 return 1; | |
| 1972 } | |
| 1973 return 0; | |
| 1974 } | |
| 1975 | |
| 1976 /* Only for debugging --- not used anywhere in the sources. */ | |
| 1977 EMACS_INT | |
| 1978 object_on_heap_p (void *ptr) | |
| 1979 { | |
| 1980 page_header *ph = get_page_header_internal (ptr); | |
| 1981 return (ph && PH_ON_USED_LIST_P (ph)); | |
| 1982 } |
