Mercurial > hg > xemacs-beta
annotate src/mc-alloc.c @ 5210:23f00bfd78a4
Merge.
author | Aidan Kehoe <kehoea@parhasard.net> |
---|---|
date | Fri, 07 May 2010 14:35:00 +0100 |
parents | e374ea766cc1 |
children | 9b8c2168d231 |
rev | line source |
---|---|
2720 | 1 /* New size-based allocator for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
2720 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with XEmacs; see the file COPYING. If not, write to | |
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 Boston, MA 02111-1307, USA. */ | |
21 | |
22 /* Synched up with: Not in FSF. */ | |
23 | |
24 #include <config.h> | |
3092 | 25 |
2720 | 26 #include "lisp.h" |
27 #include "mc-alloc.h" | |
3092 | 28 #include "getpagesize.h" |
29 | |
30 | |
31 #if 0 | |
32 # define USE_MARK_BITS_FREE_LIST 1 | |
33 #endif | |
34 #if 1 | |
35 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1 | |
36 #endif | |
37 | |
38 /* Memory protection needs the real system-dependent pagesize. */ | |
39 #ifndef WIN32_NATIVE | |
40 #include <unistd.h> /* for getpagesize () */ | |
41 #endif | |
42 #if defined (HAVE_GETPAGESIZE) | |
43 # define SYS_PAGE_SIZE getpagesize () | |
44 #elif defined (_SC_PAGESIZE) | |
45 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE) | |
46 #elif defined (_SC_PAGE_SIZE) | |
47 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE) | |
48 #elif defined(get_page_size) | |
49 # define SYS_PAGE_SIZE get_page_size () | |
50 #elif defined(PAGESIZE) | |
51 # define SYS_PAGE_SIZE PAGESIZE | |
52 #elif defined(PAGE_SIZE) | |
53 # define SYS_PAGE_SIZE PAGE_SIZE | |
54 #else | |
55 /* Valid page sizes are powers of 2. */ | |
56 # define SYS_PAGE_SIZE 4096 | |
57 #endif | |
2720 | 58 |
59 | |
60 /*--- configurable values ----------------------------------------------*/ | |
61 | |
62 /* Definition of size classes */ | |
63 | |
64 /* Heap used list constants: In the used heap, it is important to | |
65 quickly find a free spot for a new object. Therefore the size | |
66 classes of the used heap are defined by the size of the cells on | |
67 the pages. The size classes should match common object sizes, to | |
68 avoid wasting memory. */ | |
69 | |
70 /* Minimum object size in bytes. */ | |
3092 | 71 #if BITS_PER_EMACS_INT > 32 |
72 # define USED_LIST_MIN_OBJECT_SIZE 16 | |
73 #else | |
74 # define USED_LIST_MIN_OBJECT_SIZE 8 | |
75 #endif | |
2720 | 76 |
77 /* The step size by which the size classes increase (up to upper | |
78 threshold). This many bytes are mapped to a single used list: */ | |
3092 | 79 #if BITS_PER_EMACS_INT > 32 |
80 # define USED_LIST_LIN_STEP 8 | |
81 #else | |
82 # define USED_LIST_LIN_STEP 4 | |
83 #endif | |
2720 | 84 |
85 /* The upper threshold should always be set to PAGE_SIZE/2, because if | |
86 a object is larger than PAGE_SIZE/2 there is no room for any other | |
87 object on this page. Objects this big are kept in the page list of | |
88 the multiple pages, since a quick search for free spots is not | |
89 needed for this kind of pages (because there are no free spots). | |
90 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ | |
3092 | 91 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 |
2720 | 92 |
93 | |
94 /* Heap free list constants: In the unused heap, the size of | |
95 consecutive memory tips the scales. A page is smallest entity which | |
96 is asked for. Therefore, the size classes of the unused heap are | |
97 defined by the number of consecutive pages. */ | |
98 /* Sizes up to this many pages each have their own free list. */ | |
99 #define FREE_LIST_LOWER_THRESHOLD 32 | |
100 /* The step size by which the size classes increase (up to upper | |
101 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a | |
102 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and | |
103 FREE_LIST_UPPER_THRESHOLD. */ | |
104 #define FREE_LIST_LIN_STEP 8 | |
105 /* Sizes of at least this many pages are mapped to a single free | |
106 list. Blocks of memory larger than this number are all kept in a | |
107 single list, which makes searching this list slow. But objects that | |
108 big are really seldom. */ | |
109 #define FREE_LIST_UPPER_THRESHOLD 256 | |
110 | |
111 | |
3092 | 112 /* used heap list count */ |
113 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \ | |
114 - USED_LIST_MIN_OBJECT_SIZE) \ | |
115 / USED_LIST_LIN_STEP) + 1 ) + 1 | |
116 | |
117 /* free heap list count */ | |
118 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \ | |
119 - FREE_LIST_LOWER_THRESHOLD) \ | |
120 / FREE_LIST_LIN_STEP) \ | |
121 + FREE_LIST_LOWER_THRESHOLD) | |
122 | |
123 | |
2720 | 124 /* Maximum number of separately added heap sections. */ |
125 #if BITS_PER_EMACS_INT > 32 | |
126 # define MAX_HEAP_SECTS 2048 | |
127 #else | |
128 # define MAX_HEAP_SECTS 768 | |
129 #endif | |
130 | |
131 | |
132 /* Heap growth constants. Heap increases by any number between the | |
133 boundaries (unit is PAGE_SIZE). */ | |
3092 | 134 #define MIN_HEAP_INCREASE 256 |
2720 | 135 #define MAX_HEAP_INCREASE 256 /* not used */ |
136 | |
137 /* Every heap growth is calculated like this: | |
138 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). | |
139 So the growth of the heap is influenced by the current size of the | |
140 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE | |
141 boundaries. | |
142 This reduces the number of heap sectors, the larger the heap grows | |
143 the larger are the newly allocated chunks. */ | |
144 #define HEAP_GROWTH_DIVISOR 3 | |
145 | |
146 | |
147 /* Zero memory before putting on free lists. */ | |
148 #define ZERO_MEM 1 | |
149 | |
150 | |
151 #ifndef CHAR_BIT /* should be included by limits.h */ | |
152 # define CHAR_BIT BITS_PER_CHAR | |
153 #endif | |
154 | |
155 | |
3092 | 156 |
157 /*--- values depending on PAGE_SIZE ------------------------------------*/ | |
2720 | 158 |
3092 | 159 /* initialized in init_mc_allocator () */ |
160 static EMACS_INT log_page_size; | |
161 static EMACS_INT page_size_div_2; | |
2720 | 162 |
3092 | 163 #undef PAGE_SIZE |
164 #define PAGE_SIZE SYS_PAGE_SIZE | |
165 #define LOG_PAGE_SIZE log_page_size | |
166 #define PAGE_SIZE_DIV_2 page_size_div_2 | |
2720 | 167 |
168 | |
169 /* Constants for heap address to page header mapping. */ | |
170 #define LOG_LEVEL2_SIZE 10 | |
171 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) | |
172 #if BITS_PER_EMACS_INT > 32 | |
173 # define USE_HASH_TABLE 1 | |
174 # define LOG_LEVEL1_SIZE 11 | |
175 #else | |
176 # define LOG_LEVEL1_SIZE \ | |
177 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE) | |
178 #endif | |
179 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE) | |
180 | |
181 #ifdef USE_HASH_TABLE | |
182 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1)) | |
4125 | 183 # define L1_INDEX(p) HASH ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 184 #else |
4125 | 185 # define L1_INDEX(p) ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 186 #endif |
4125 | 187 #define L2_INDEX(p) (((EMACS_UINT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1)) |
2720 | 188 |
189 | |
190 | |
191 | |
192 /*--- structs and typedefs ---------------------------------------------*/ | |
193 | |
3092 | 194 /* Links the free lists (mark_bit_free_list and cell free list). */ |
2720 | 195 typedef struct free_link |
196 { | |
197 struct lrecord_header lheader; | |
198 struct free_link *next_free; | |
199 } free_link; | |
200 | |
201 | |
3092 | 202 /* Header for pages. They are held in a doubly linked list. */ |
2720 | 203 typedef struct page_header |
204 { | |
205 struct page_header *next; /* next page_header */ | |
206 struct page_header *prev; /* previous page_header */ | |
207 /* Field plh holds pointer to the according header of the page list.*/ | |
208 struct page_list_header *plh; /* page list header */ | |
209 free_link *free_list; /* links free cells on page */ | |
210 EMACS_INT n_pages; /* number of pages */ | |
211 EMACS_INT cell_size; /* size of cells on page */ | |
212 EMACS_INT cells_on_page; /* total number of cells on page */ | |
213 EMACS_INT cells_used; /* number of used cells on page */ | |
214 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, | |
215 the mark bits are put in an extra memory area. Then the field | |
216 mark_bits holds the pointer to this area. Is the number of | |
217 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the | |
218 mark_bit EMACS_INT directly, without an additional indirection. */ | |
3092 | 219 unsigned int black_bit:1; /* objects on page are black */ |
220 unsigned int dirty_bit:1; /* page is dirty */ | |
221 unsigned int protection_bit:1; /* page is write protected */ | |
222 unsigned int array_bit:1; /* page holds arrays */ | |
223 Rawbyte *mark_bits; /* pointer to mark bits */ | |
2720 | 224 void *heap_space; /* pointer to heap, where objects |
225 are stored */ | |
226 } page_header; | |
227 | |
228 | |
229 /* Different list types. */ | |
230 enum list_type_enum { | |
231 USED_LIST, | |
232 FREE_LIST | |
233 }; | |
234 | |
235 | |
236 /* Header for page lists. Field list_type holds the type of the list. */ | |
237 typedef struct page_list_header | |
238 { | |
239 enum list_type_enum list_type; /* the type of the list */ | |
240 /* Size holds the size of one cell (in bytes) in a used heap list, or the | |
241 size of the heap sector (in number of pages). */ | |
242 size_t size; /* size of one cell / heap sector */ | |
243 page_header *first; /* first of page_header list */ | |
244 page_header *last; /* last of page_header list */ | |
245 /* If the number of objects on page is bigger than | |
246 BITS_PER_EMACS_INT, the mark bits are put in an extra memory | |
247 area, which is linked in this free list, if not used. Is the | |
248 number of objects smaller than BITS_PER_EMACS_INT, the mark bits | |
249 are hold in the mark bit EMACS_INT directly, without an | |
250 additional indirection. */ | |
251 free_link *mark_bit_free_list; | |
252 | |
253 #ifdef MEMORY_USAGE_STATS | |
254 EMACS_INT page_count; /* number if pages in list */ | |
255 EMACS_INT used_cells; /* number of objects in list */ | |
256 EMACS_INT used_space; /* used space */ | |
257 EMACS_INT total_cells; /* number of available cells */ | |
258 EMACS_INT total_space; /* available space */ | |
259 #endif | |
260 } page_list_header; | |
261 | |
262 | |
263 /* The heap sectors are stored with their real start pointer and their | |
264 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */ | |
265 typedef struct heap_sect { | |
266 void *real_start; /* real start pointer (NOT aligned) */ | |
267 size_t real_size; /* NOT multiple of PAGE_SIZE */ | |
268 void *start; /* aligned start pointer */ | |
269 EMACS_INT n_pages; /* multiple of PAGE_SIZE */ | |
270 } heap_sect; | |
271 | |
272 | |
273 /* 2nd tree level for mapping of heap addresses to page headers. */ | |
274 typedef struct level_2_lookup_tree { | |
275 page_header *index[LEVEL2_SIZE]; /* link to page header */ | |
276 EMACS_INT key; /* high order address bits */ | |
277 #ifdef USE_HASH_TABLE | |
278 struct level_2_lookup_tree *hash_link; /* hash chain link */ | |
279 #endif | |
280 } level_2_lookup_tree; | |
281 | |
282 | |
283 | |
284 /*--- global variable definitions --------------------------------------*/ | |
285 | |
286 /* All global allocator variables are kept in this struct. */ | |
287 typedef struct mc_allocator_globals_type { | |
288 | |
289 /* heap size */ | |
290 EMACS_INT heap_size; | |
291 | |
292 /* list of all separatly allocated chunks of heap */ | |
293 heap_sect heap_sections[MAX_HEAP_SECTS]; | |
294 EMACS_INT n_heap_sections; | |
295 | |
296 /* Holds all allocated pages, each object size class in its separate list, | |
297 to guarantee fast allocation on partially filled pages. */ | |
3092 | 298 page_list_header *used_heap_pages; |
2720 | 299 |
300 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are | |
301 kept on the Nth free list. Contiguos pages are coalesced. */ | |
302 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; | |
303 | |
304 /* ptr lookup table */ | |
3092 | 305 level_2_lookup_tree **ptr_lookup_table; |
2720 | 306 |
3092 | 307 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 308 /* page header free list */ |
309 free_link *page_header_free_list; | |
3092 | 310 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 311 |
312 #ifdef MEMORY_USAGE_STATS | |
313 EMACS_INT malloced_bytes; | |
314 #endif | |
315 } mc_allocator_globals_type; | |
316 | |
317 mc_allocator_globals_type mc_allocator_globals; | |
318 | |
319 | |
320 | |
321 | |
322 /*--- macro accessors --------------------------------------------------*/ | |
323 | |
324 #define USED_HEAP_PAGES(i) \ | |
325 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) | |
326 | |
327 #define FREE_HEAP_PAGES(i) \ | |
328 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) | |
329 | |
330 #define PLH(plh) plh | |
331 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type | |
332 # define PLH_SIZE(plh) PLH (plh)->size | |
333 # define PLH_FIRST(plh) PLH (plh)->first | |
334 # define PLH_LAST(plh) PLH (plh)->last | |
335 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list | |
336 #ifdef MEMORY_USAGE_STATS | |
337 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count | |
338 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells | |
339 # define PLH_USED_SPACE(plh) PLH (plh)->used_space | |
340 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells | |
341 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space | |
342 #endif | |
343 | |
344 #define PH(ph) ph | |
345 # define PH_NEXT(ph) PH (ph)->next | |
346 # define PH_PREV(ph) PH (ph)->prev | |
347 # define PH_PLH(ph) PH (ph)->plh | |
348 # define PH_FREE_LIST(ph) PH (ph)->free_list | |
349 # define PH_N_PAGES(ph) PH (ph)->n_pages | |
350 # define PH_CELL_SIZE(ph) PH (ph)->cell_size | |
351 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page | |
352 # define PH_CELLS_USED(ph) PH (ph)->cells_used | |
3092 | 353 # define PH_BLACK_BIT(ph) PH (ph)->black_bit |
354 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit | |
355 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit | |
356 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit | |
2720 | 357 # define PH_MARK_BITS(ph) PH (ph)->mark_bits |
358 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space | |
359 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) | |
360 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) | |
361 | |
362 #define HEAP_SIZE mc_allocator_globals.heap_size | |
363 | |
364 #ifdef MEMORY_USAGE_STATS | |
365 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes | |
366 #endif | |
367 | |
368 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] | |
369 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections | |
370 | |
3092 | 371 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 372 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list |
3092 | 373 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 374 |
375 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free | |
376 #define FREE_LIST(free_list) (free_link*) (free_list) | |
377 | |
378 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] | |
379 #define LEVEL2(l2, i) l2->index[i] | |
380 # define LEVEL2_KEY(l2) l2->key | |
381 #ifdef USE_HASH_TABLE | |
382 # define LEVEL2_HASH_LINK(l2) l2->hash_link | |
383 #endif | |
384 | |
385 #if ZERO_MEM | |
386 # define ZERO_HEAP_SPACE(ph) \ | |
387 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE) | |
388 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header)) | |
389 #endif | |
390 | |
391 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE) | |
392 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE) | |
393 | |
394 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1)))) | |
395 | |
396 #define PAGE_SIZE_ALIGNMENT(address) \ | |
4125 | 397 (void *) ((((EMACS_UINT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1)) |
2720 | 398 |
399 #define PH_ON_FREE_LIST_P(ph) \ | |
400 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) | |
401 | |
402 #define PH_ON_USED_LIST_P(ph) \ | |
403 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | |
404 | |
405 | |
3092 | 406 /* Number of mark bits: minimum 1, maximum 8. */ |
407 #define N_MARK_BITS 2 | |
2720 | 408 |
409 | |
410 | |
411 /************************************************************************/ | |
412 /* MC Allocator */ | |
413 /************************************************************************/ | |
414 | |
3305 | 415 /* Set to 1 if memory becomes short. */ |
416 EMACS_INT memory_shortage; | |
2720 | 417 |
418 /*--- misc functions ---------------------------------------------------*/ | |
419 | |
420 /* Visits all pages (page_headers) hooked into the used heap pages | |
421 list and executes f with the current page header as | |
3303 | 422 argument. Needed for sweep. Returns number of processed pages. */ |
423 static EMACS_INT | |
424 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph)) | |
2720 | 425 { |
3303 | 426 EMACS_INT number_of_pages_processed = 0; |
3092 | 427 EMACS_INT i; |
2720 | 428 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
429 if (PLH_FIRST (USED_HEAP_PAGES (i))) | |
430 { | |
431 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); | |
432 while (PH_NEXT (ph)) | |
433 { | |
434 page_header *next = PH_NEXT (ph); /* in case f removes the page */ | |
3303 | 435 number_of_pages_processed += f (ph); |
2720 | 436 ph = next; |
437 } | |
3303 | 438 number_of_pages_processed += f (ph); |
2720 | 439 } |
3303 | 440 return number_of_pages_processed; |
2720 | 441 } |
442 | |
443 | |
444 | |
445 | |
446 /*--- mapping of heap addresses to page headers and mark bits ----------*/ | |
447 | |
448 /* Sets a heap pointer and page header pair into the lookup table. */ | |
449 static void | |
450 set_lookup_table (void *ptr, page_header *ph) | |
451 { | |
3092 | 452 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 453 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
454 #ifdef USE_HASH_TABLE | |
455 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
456 l2 = LEVEL2_HASH_LINK (l2); | |
457 #endif | |
458 if (!l2) | |
459 { | |
460 l2 = (level_2_lookup_tree*) | |
461 xmalloc_and_zero (sizeof (level_2_lookup_tree)); | |
462 #ifdef MEMORY_USAGE_STATS | |
463 MC_MALLOCED_BYTES += | |
464 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0); | |
465 #endif | |
2932 | 466 memset (l2, '\0', sizeof (level_2_lookup_tree)); |
2720 | 467 #ifdef USE_HASH_TABLE |
468 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index); | |
469 #endif | |
470 PTR_LOOKUP_TABLE (l1_index) = l2; | |
471 LEVEL2_KEY (l2) = l1_index; | |
472 } | |
473 LEVEL2 (l2, L2_INDEX (ptr)) = ph; | |
474 } | |
475 | |
476 | |
477 #ifdef UNSET_LOOKUP_TABLE | |
478 /* Set the lookup table to 0 for given heap address. */ | |
479 static void | |
480 unset_lookup_table (void *ptr) | |
481 { | |
3092 | 482 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 483 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
484 #ifdef USE_HASH_TABLE | |
485 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
486 l2 = LEVEL2_HASH_LINK (l2); | |
487 #endif | |
488 if (l2) { | |
489 LEVEL2 (l2, L2_INDEX (ptr)) = 0; | |
490 } | |
491 } | |
492 #endif | |
493 | |
494 /* Returns the page header of a given heap address, or 0 if not in table. | |
495 For internal use, no error checking. */ | |
496 static page_header * | |
497 get_page_header_internal (void *ptr) | |
498 { | |
3092 | 499 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 500 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
501 #ifdef USE_HASH_TABLE | |
502 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
503 l2 = LEVEL2_HASH_LINK (l2); | |
504 #endif | |
505 if (!l2) | |
506 return 0; | |
507 return LEVEL2 (l2, L2_INDEX (ptr)); | |
508 } | |
509 | |
510 /* Returns the page header of a given heap address, or 0 if not in table. */ | |
511 static page_header * | |
512 get_page_header (void *ptr) | |
513 { | |
3092 | 514 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 515 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
2723 | 516 assert (l2); |
2720 | 517 #ifdef USE_HASH_TABLE |
518 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
519 l2 = LEVEL2_HASH_LINK (l2); | |
520 #endif | |
2723 | 521 assert (LEVEL2 (l2, L2_INDEX (ptr))); |
2720 | 522 return LEVEL2 (l2, L2_INDEX (ptr)); |
523 } | |
524 | |
525 /* Returns the mark bit index of a given heap address. */ | |
526 static EMACS_INT | |
527 get_mark_bit_index (void *ptr, page_header *ph) | |
528 { | |
529 EMACS_INT cell_size = PH_CELL_SIZE (ph); | |
530 if (cell_size) | |
3092 | 531 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size) |
532 * N_MARK_BITS; | |
2720 | 533 else /* only one object on page */ |
534 return 0; | |
535 } | |
536 | |
537 | |
538 /* Adds addresses of pages to lookup table. */ | |
539 static void | |
540 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) | |
541 { | |
3092 | 542 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 543 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); |
3092 | 544 for (p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 545 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) |
546 set_lookup_table (p, ph); | |
547 } | |
548 | |
549 | |
550 /* Initializes lookup table. */ | |
551 static void | |
552 init_lookup_table (void) | |
553 { | |
3092 | 554 EMACS_INT i; |
2720 | 555 for (i = 0; i < LEVEL1_SIZE; i++) |
556 PTR_LOOKUP_TABLE (i) = 0; | |
557 } | |
558 | |
559 | |
560 | |
561 | |
562 /*--- mark bits --------------------------------------------------------*/ | |
563 | |
564 /*--- bit operations --- */ | |
565 | |
566 /* Allocates a bit array of length bits. */ | |
3092 | 567 static Rawbyte * |
2720 | 568 alloc_bit_array(size_t bits) |
569 { | |
3092 | 570 Rawbyte *bit_array; |
571 #ifdef USE_MARK_BITS_FREE_LIST | |
572 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte); | |
573 #else /* not USE_MARK_BITS_FREE_LIST */ | |
574 size_t size = | |
575 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte), | |
576 Rawbyte *); | |
577 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 578 if (size < sizeof (free_link)) size = sizeof (free_link); |
579 #ifdef MEMORY_USAGE_STATS | |
580 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); | |
581 #endif | |
3092 | 582 bit_array = (Rawbyte *) xmalloc_and_zero (size); |
2720 | 583 return bit_array; |
584 } | |
585 | |
586 | |
587 /* Returns the bit value at pos. */ | |
588 static EMACS_INT | |
3092 | 589 get_bit (Rawbyte *bit_array, EMACS_INT pos) |
2720 | 590 { |
591 #if N_MARK_BITS > 1 | |
592 EMACS_INT result = 0; | |
593 EMACS_INT i; | |
594 #endif | |
595 bit_array += pos / CHAR_BIT; | |
596 #if N_MARK_BITS > 1 | |
597 for (i = 0; i < N_MARK_BITS; i++) | |
3092 | 598 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i; |
599 return result; | |
2720 | 600 #else |
601 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; | |
602 #endif | |
603 } | |
604 | |
605 | |
606 /* Bit_Arrays bit at pos to val. */ | |
607 static void | |
4125 | 608 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_UINT val) |
2720 | 609 { |
610 #if N_MARK_BITS > 1 | |
611 EMACS_INT i; | |
612 #endif | |
613 bit_array += pos / CHAR_BIT; | |
614 #if N_MARK_BITS > 1 | |
615 for (i = 0; i < N_MARK_BITS; i++) | |
616 if ((val >> i) & 1) | |
617 *bit_array |= 1 << ((pos + i) % CHAR_BIT); | |
618 else | |
619 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT)); | |
620 #else | |
621 if (val) | |
622 *bit_array |= 1 << (pos % CHAR_BIT); | |
623 else | |
624 *bit_array &= ~(1 << (pos % CHAR_BIT)); | |
625 #endif | |
626 } | |
627 | |
628 | |
629 /*--- mark bit functions ---*/ | |
3092 | 630 #define USE_PNTR_MARK_BITS(ph) \ |
631 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT) | |
632 #define USE_WORD_MARK_BITS(ph) \ | |
633 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT) | |
2720 | 634 |
3092 | 635 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p) |
2720 | 636 #define GET_BIT_PNTR(b, p) get_bit (b, p) |
637 | |
3092 | 638 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v) |
2720 | 639 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) |
640 | |
641 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 | |
3092 | 642 #define ZERO_MARK_BITS_PNTR(ph) \ |
643 do { \ | |
644 memset (PH_MARK_BITS (ph), '\0', \ | |
645 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \ | |
646 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \ | |
2720 | 647 } while (0) |
648 | |
649 #define GET_BIT(bit, ph, p) \ | |
650 do { \ | |
651 if (USE_PNTR_MARK_BITS (ph)) \ | |
652 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \ | |
653 else \ | |
654 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \ | |
655 } while (0) | |
656 | |
657 #define SET_BIT(ph, p, v) \ | |
658 do { \ | |
659 if (USE_PNTR_MARK_BITS (ph)) \ | |
660 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \ | |
661 else \ | |
662 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \ | |
663 } while (0) | |
664 | |
665 #define ZERO_MARK_BITS(ph) \ | |
666 do { \ | |
667 if (USE_PNTR_MARK_BITS (ph)) \ | |
668 ZERO_MARK_BITS_PNTR (ph); \ | |
669 else \ | |
670 ZERO_MARK_BITS_WORD (ph); \ | |
671 } while (0) | |
672 | |
673 | |
674 /* Allocates mark-bit space either from a free list or from the OS | |
675 for the given page header. */ | |
3092 | 676 static Rawbyte * |
2720 | 677 alloc_mark_bits (page_header *ph) |
678 { | |
3092 | 679 Rawbyte *result; |
680 #ifdef USE_MARK_BITS_FREE_LIST | |
2720 | 681 if (PH_MARK_BIT_FREE_LIST (ph) == 0) |
3092 | 682 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); |
2720 | 683 else |
684 { | |
3092 | 685 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph); |
2720 | 686 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); |
687 } | |
3092 | 688 #else /* not USE_MARK_BITS_FREE_LIST */ |
689 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); | |
690 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 691 return result; |
692 } | |
693 | |
694 | |
695 /* Frees by maintaining a free list. */ | |
696 static void | |
697 free_mark_bits (page_header *ph) | |
698 { | |
3092 | 699 #ifdef USE_MARK_BITS_FREE_LIST |
700 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph); | |
701 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph)); | |
702 #else /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 703 if (PH_MARK_BITS (ph)) |
3092 | 704 free (PH_MARK_BITS (ph)); |
705 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 706 } |
707 | |
708 | |
709 /* Installs mark bits and zeros bits. */ | |
710 static void | |
711 install_mark_bits (page_header *ph) | |
712 { | |
713 if (USE_PNTR_MARK_BITS (ph)) | |
714 { | |
715 PH_MARK_BITS (ph) = alloc_mark_bits (ph); | |
716 ZERO_MARK_BITS_PNTR (ph); | |
717 } | |
718 else | |
719 ZERO_MARK_BITS_WORD (ph); | |
720 } | |
721 | |
722 | |
723 /* Cleans and frees the mark bits of the given page_header. */ | |
724 static void | |
725 remove_mark_bits (page_header *ph) | |
726 { | |
727 if (USE_PNTR_MARK_BITS (ph)) | |
728 free_mark_bits (ph); | |
729 } | |
730 | |
731 | |
732 /* Zeros all mark bits in given header. */ | |
733 static void | |
734 zero_mark_bits (page_header *ph) | |
735 { | |
736 ZERO_MARK_BITS (ph); | |
737 } | |
738 | |
739 | |
740 /* Returns mark bit for given heap pointer. */ | |
741 EMACS_INT | |
742 get_mark_bit (void *ptr) | |
743 { | |
744 EMACS_INT bit = 0; | |
745 page_header *ph = get_page_header (ptr); | |
746 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph)); | |
747 if (ph) | |
748 { | |
749 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
750 } | |
751 return bit; | |
752 } | |
753 | |
754 | |
755 /* Sets mark bit for given heap pointer. */ | |
756 void | |
757 set_mark_bit (void *ptr, EMACS_INT value) | |
758 { | |
759 page_header *ph = get_page_header (ptr); | |
760 assert (ph && PH_ON_USED_LIST_P (ph)); | |
761 if (ph) | |
762 { | |
3092 | 763 if (value == BLACK) |
764 if (!PH_BLACK_BIT (ph)) | |
765 PH_BLACK_BIT (ph) = 1; | |
2720 | 766 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
767 } | |
768 } | |
769 | |
770 | |
771 | |
772 | |
773 /*--- page header functions --------------------------------------------*/ | |
774 | |
3092 | 775 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
776 #include "blocktype.h" | |
777 | |
778 struct page_header_blocktype | |
779 { | |
780 Blocktype_declare (page_header); | |
781 } *the_page_header_blocktype; | |
782 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
783 | |
2720 | 784 /* Allocates a page header either from a free list or from the OS. */ |
785 static page_header * | |
786 alloc_page_header (void) | |
787 { | |
3092 | 788 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
789 page_header *result; | |
790 #ifdef MEMORY_USAGE_STATS | |
791 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
792 #endif | |
793 result = Blocktype_alloc (the_page_header_blocktype); | |
794 ZERO_PAGE_HEADER (result); | |
795 return result; | |
796 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 797 page_header *result; |
798 if (PAGE_HEADER_FREE_LIST == 0) | |
799 { | |
800 result = | |
801 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); | |
802 #ifdef MEMORY_USAGE_STATS | |
803 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
804 #endif | |
805 } | |
806 else | |
807 { | |
808 result = (page_header*) PAGE_HEADER_FREE_LIST; | |
809 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); | |
810 } | |
811 return result; | |
3092 | 812 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 813 } |
814 | |
815 | |
816 /* Frees given page header by maintaining a free list. */ | |
817 static void | |
818 free_page_header (page_header *ph) | |
819 { | |
3092 | 820 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
821 Blocktype_free (the_page_header_blocktype, ph); | |
822 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 823 #if ZERO_MEM |
824 ZERO_PAGE_HEADER (ph); | |
825 #endif | |
826 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; | |
827 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); | |
3092 | 828 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 829 } |
830 | |
831 | |
832 /* Adds given page header to given page list header's list. */ | |
833 static void | |
834 add_page_header_to_plh (page_header *ph, page_list_header *plh) | |
835 { | |
836 /* insert at the front of the list */ | |
837 PH_PREV (ph) = 0; | |
838 PH_NEXT (ph) = PLH_FIRST (plh); | |
839 PH_PLH (ph) = plh; | |
840 /* if list is not empty, set prev in the first element */ | |
841 if (PLH_FIRST (plh)) | |
842 PH_PREV (PLH_FIRST (plh)) = ph; | |
843 /* one element in list is first and last at the same time */ | |
844 PLH_FIRST (plh) = ph; | |
845 if (!PLH_LAST (plh)) | |
846 PLH_LAST (plh) = ph; | |
847 | |
848 #ifdef MEMORY_USAGE_STATS | |
849 /* bump page count */ | |
850 PLH_PAGE_COUNT (plh)++; | |
851 #endif | |
852 | |
853 } | |
854 | |
855 | |
856 /* Removes given page header from given page list header's list. */ | |
857 static void | |
858 remove_page_header_from_plh (page_header *ph, page_list_header *plh) | |
859 { | |
860 if (PLH_FIRST (plh) == ph) | |
861 PLH_FIRST (plh) = PH_NEXT (ph); | |
862 if (PLH_LAST (plh) == ph) | |
863 PLH_LAST (plh) = PH_PREV (ph); | |
864 if (PH_NEXT (ph)) | |
865 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
866 if (PH_PREV (ph)) | |
867 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
868 | |
869 #ifdef MEMORY_USAGE_STATS | |
870 /* decrease page count */ | |
871 PLH_PAGE_COUNT (plh)--; | |
872 #endif | |
873 } | |
874 | |
875 | |
876 /* Moves a page header to the front of its the page header list. | |
877 This is used during sweep: Pages with some alive objects are moved to | |
878 the front. This makes allocation faster, all pages with free slots | |
879 can be found at the front of the list. */ | |
880 static void | |
881 move_page_header_to_front (page_header *ph) | |
882 { | |
883 page_list_header *plh = PH_PLH (ph); | |
884 /* is page already first? */ | |
885 if (ph == PLH_FIRST (plh)) return; | |
886 /* remove from list */ | |
887 if (PLH_LAST (plh) == ph) | |
888 PLH_LAST (plh) = PH_PREV (ph); | |
889 if (PH_NEXT (ph)) | |
890 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
891 if (PH_PREV (ph)) | |
892 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
893 /* insert at the front */ | |
894 PH_NEXT (ph) = PLH_FIRST (plh); | |
895 PH_PREV (ph) = 0; | |
896 PH_PREV (PH_NEXT (ph)) = ph; | |
897 PLH_FIRST (plh) = ph; | |
898 } | |
899 | |
900 | |
901 | |
902 | |
903 /*--- page list functions ----------------------------------------------*/ | |
904 | |
905 /* Returns the index of the used heap list according to given size. */ | |
906 static int | |
907 get_used_list_index (size_t size) | |
908 { | |
909 if (size <= USED_LIST_MIN_OBJECT_SIZE) | |
3092 | 910 { |
911 // printf ("size %d -> index %d\n", size, 0); | |
912 return 0; | |
913 } | |
914 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD) | |
915 { | |
916 // printf ("size %d -> index %d\n", size, | |
917 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
918 // / USED_LIST_LIN_STEP) + 1); | |
919 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
920 / USED_LIST_LIN_STEP) + 1; | |
921 } | |
922 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1); | |
2720 | 923 return N_USED_PAGE_LISTS - 1; |
924 } | |
925 | |
926 /* Returns the size of the used heap list according to given index. */ | |
927 static size_t | |
928 get_used_list_size_value (int used_index) | |
929 { | |
930 if (used_index < N_USED_PAGE_LISTS - 1) | |
931 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; | |
932 return 0; | |
933 } | |
934 | |
935 | |
936 /* Returns the index of the free heap list according to given size. */ | |
3092 | 937 static EMACS_INT |
2720 | 938 get_free_list_index (EMACS_INT n_pages) |
939 { | |
940 if (n_pages == 0) | |
941 return 0; | |
942 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) | |
943 return n_pages - 1; | |
944 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1) | |
945 return N_FREE_PAGE_LISTS - 1; | |
946 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1) | |
947 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
948 | |
949 } | |
950 | |
951 | |
952 /* Returns the size in number of pages of the given free list at index. */ | |
953 static size_t | |
3092 | 954 get_free_list_size_value (EMACS_INT free_index) |
2720 | 955 { |
956 if (free_index < FREE_LIST_LOWER_THRESHOLD) | |
957 return free_index + 1; | |
958 if (free_index >= N_FREE_PAGE_LISTS) | |
959 return FREE_LIST_UPPER_THRESHOLD; | |
960 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD) | |
961 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
962 } | |
963 | |
964 | |
965 Bytecount | |
5157
1fae11d56ad2
redo memory-usage mechanism, add way of dynamically initializing Lisp objects
Ben Wing <ben@xemacs.org>
parents:
5146
diff
changeset
|
966 mc_alloced_storage_size (Bytecount claimed_size, struct usage_stats *stats) |
2720 | 967 { |
968 size_t used_size = | |
969 get_used_list_size_value (get_used_list_index (claimed_size)); | |
970 if (used_size == 0) | |
971 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size)); | |
972 | |
973 if (stats) | |
974 { | |
975 stats->was_requested += claimed_size; | |
976 stats->malloc_overhead += used_size - claimed_size; | |
977 } | |
978 | |
979 return used_size; | |
980 } | |
981 | |
982 | |
983 | |
984 /*--- free heap functions ----------------------------------------------*/ | |
985 | |
986 /* Frees a heap section, if the heap_section is completly free */ | |
987 static EMACS_INT | |
988 free_heap_section (page_header *ph) | |
989 { | |
3092 | 990 EMACS_INT i; |
991 EMACS_INT removed = 0; | |
2720 | 992 for (i = 0; i < N_HEAP_SECTIONS; i++) |
993 if (!removed) | |
994 { | |
995 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) | |
996 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) | |
997 { | |
998 xfree_1 (HEAP_SECTION(i).real_start); | |
999 #ifdef MEMORY_USAGE_STATS | |
1000 MC_MALLOCED_BYTES | |
1001 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1002 #endif | |
1003 | |
1004 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE; | |
1005 | |
1006 removed = 1; | |
1007 } | |
1008 } | |
1009 else | |
1010 { | |
1011 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start; | |
1012 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size; | |
1013 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start; | |
1014 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages; | |
1015 } | |
1016 | |
1017 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed; | |
1018 | |
1019 return removed; | |
1020 } | |
1021 | |
1022 /* Removes page from free list. */ | |
1023 static void | |
1024 remove_page_from_free_list (page_header *ph) | |
1025 { | |
1026 remove_page_header_from_plh (ph, PH_PLH (ph)); | |
1027 PH_PLH (ph) = 0; | |
1028 } | |
1029 | |
1030 | |
1031 /* Adds page to according free list. */ | |
1032 static void | |
1033 add_page_to_free_list (page_header *ph) | |
1034 { | |
1035 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph))); | |
1036 add_page_header_to_plh (ph, PH_PLH (ph)); | |
1037 } | |
1038 | |
1039 | |
1040 /* Merges two adjacent pages. */ | |
1041 static page_header * | |
1042 merge_pages (page_header *first_ph, page_header *second_ph) | |
1043 { | |
1044 /* merge */ | |
1045 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph); | |
1046 /* clean up left over page header */ | |
1047 free_page_header (second_ph); | |
1048 /* update lookup table */ | |
1049 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph)); | |
1050 | |
1051 return first_ph; | |
1052 } | |
1053 | |
1054 | |
1055 /* Checks if pages are adjacent, merges them, and adds merged page to | |
1056 free list */ | |
1057 static void | |
1058 merge_into_free_list (page_header *ph) | |
1059 { | |
1060 /* check if you can coalesce adjacent pages */ | |
1061 page_header *prev_ph = | |
1062 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1063 - PAGE_SIZE)); | |
1064 page_header *succ_ph = | |
1065 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1066 + (PH_N_PAGES (ph) * PAGE_SIZE))); | |
1067 if (PH_ON_FREE_LIST_P (prev_ph)) | |
1068 { | |
1069 remove_page_from_free_list (prev_ph); | |
1070 ph = merge_pages (prev_ph, ph); | |
1071 } | |
1072 if (PH_ON_FREE_LIST_P (succ_ph)) | |
1073 { | |
1074 remove_page_from_free_list (succ_ph); | |
1075 ph = merge_pages (ph, succ_ph); | |
1076 } | |
1077 /* try to free heap_section, if the section is complete */ | |
1078 if (!free_heap_section (ph)) | |
1079 /* else add merged page to free list */ | |
1080 add_page_to_free_list (ph); | |
1081 } | |
1082 | |
1083 | |
1084 /* Cuts given page header after n_pages, returns the first (cut) part, and | |
1085 puts the rest on the free list. */ | |
1086 static page_header * | |
1087 split_page (page_header *ph, EMACS_INT n_pages) | |
1088 { | |
1089 page_header *new_ph; | |
1090 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages; | |
1091 | |
1092 /* remove the page from the free list if already hooked in */ | |
1093 if (PH_PLH (ph)) | |
1094 remove_page_from_free_list (ph); | |
1095 /* set new number of pages */ | |
1096 PH_N_PAGES (ph) = n_pages; | |
1097 /* add new page to lookup table */ | |
1098 add_pages_to_lookup_table (ph, n_pages); | |
1099 | |
1100 if (rem_pages) | |
1101 { | |
1102 /* build new page with reminder */ | |
1103 new_ph = alloc_page_header (); | |
1104 PH_N_PAGES (new_ph) = rem_pages; | |
1105 PH_HEAP_SPACE (new_ph) = | |
1106 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE)); | |
1107 /* add new page to lookup table */ | |
1108 add_pages_to_lookup_table (new_ph, rem_pages); | |
1109 /* hook the rest into free list */ | |
1110 add_page_to_free_list (new_ph); | |
1111 } | |
1112 return ph; | |
1113 } | |
1114 | |
1115 | |
1116 /* Expands the heap by given number of pages. */ | |
1117 static page_header * | |
1118 expand_heap (EMACS_INT needed_pages) | |
1119 { | |
1120 page_header *ph; | |
1121 EMACS_INT n_pages; | |
1122 size_t real_size; | |
1123 void *real_start; | |
1124 | |
1125 /* determine number of pages the heap should grow */ | |
3305 | 1126 if (memory_shortage) |
1127 n_pages = needed_pages; | |
1128 else | |
1129 n_pages = max (MIN_HEAP_INCREASE, | |
1130 needed_pages | |
1131 + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR))); | |
2720 | 1132 |
1133 /* get the real values */ | |
1134 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE; | |
1135 real_start = xmalloc_and_zero (real_size); | |
1136 #ifdef MEMORY_USAGE_STATS | |
1137 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0); | |
1138 #endif | |
1139 | |
1140 /* maintain heap section count */ | |
1141 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS) | |
1142 { | |
1143 stderr_out ("Increase number of MAX_HEAP_SECTS"); | |
1144 ABORT (); | |
1145 } | |
1146 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start; | |
1147 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size; | |
1148 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start); | |
1149 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages; | |
1150 N_HEAP_SECTIONS ++; | |
1151 | |
1152 /* get page header */ | |
1153 ph = alloc_page_header (); | |
1154 | |
1155 /* setup page header */ | |
1156 PH_N_PAGES (ph) = n_pages; | |
1157 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start); | |
1158 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0); | |
1159 HEAP_SIZE += n_pages * PAGE_SIZE; | |
1160 | |
1161 /* this was also done by allocate_lisp_storage */ | |
1162 if (need_to_check_c_alloca) | |
1163 xemacs_c_alloca (0); | |
1164 | |
1165 /* return needed size, put rest on free list */ | |
1166 return split_page (ph, needed_pages); | |
1167 } | |
1168 | |
1169 | |
1170 | |
1171 | |
1172 /*--- used heap functions ----------------------------------------------*/ | |
1173 /* Installs initial free list. */ | |
1174 static void | |
3092 | 1175 install_cell_free_list (page_header *ph, EMACS_INT elemcount) |
2720 | 1176 { |
3092 | 1177 Rawbyte *p; |
1178 EMACS_INT i; | |
2720 | 1179 EMACS_INT cell_size = PH_CELL_SIZE (ph); |
1180 /* write initial free list if cell_size is < PAGE_SIZE */ | |
3092 | 1181 p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1182 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) |
1183 { | |
1184 #ifdef ERROR_CHECK_GC | |
1185 assert (!LRECORD_FREE_P (p)); | |
1186 MARK_LRECORD_AS_FREE (p); | |
1187 #endif | |
3092 | 1188 if (elemcount == 1) |
1189 NEXT_FREE (p) = FREE_LIST (p + cell_size); | |
2720 | 1190 set_lookup_table (p, ph); |
3092 | 1191 p += cell_size; |
2720 | 1192 } |
1193 #ifdef ERROR_CHECK_GC | |
1194 assert (!LRECORD_FREE_P (p)); | |
1195 MARK_LRECORD_AS_FREE (p); | |
1196 #endif | |
1197 NEXT_FREE (p) = 0; | |
1198 set_lookup_table (p, ph); | |
1199 | |
1200 /* hook free list into header */ | |
1201 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph)); | |
1202 } | |
1203 | |
1204 | |
1205 /* Cleans the object space of the given page_header. */ | |
1206 static void | |
1207 remove_cell_free_list (page_header *ph) | |
1208 { | |
1209 #if ZERO_MEM | |
1210 ZERO_HEAP_SPACE (ph); | |
1211 #endif | |
1212 PH_FREE_LIST (ph) = 0; | |
1213 } | |
1214 | |
1215 | |
1216 /* Installs a new page and hooks it into given page_list_header. */ | |
1217 static page_header * | |
1218 install_page_in_used_list (page_header *ph, page_list_header *plh, | |
3092 | 1219 size_t size, EMACS_INT elemcount) |
2720 | 1220 { |
1221 /* add to list */ | |
1222 add_page_header_to_plh (ph, plh); | |
1223 | |
1224 /* determine cell size */ | |
1225 if (PLH_SIZE (plh)) | |
1226 PH_CELL_SIZE (ph) = PLH_SIZE (plh); | |
1227 else | |
1228 PH_CELL_SIZE (ph) = size; | |
3092 | 1229 if (elemcount == 1) |
1230 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); | |
1231 else | |
1232 { | |
1233 PH_CELLS_ON_PAGE (ph) = elemcount; | |
1234 PH_ARRAY_BIT (ph) = 1; | |
1235 } | |
2720 | 1236 |
1237 /* init cell count */ | |
1238 PH_CELLS_USED (ph) = 0; | |
1239 | |
1240 /* install mark bits and initialize cell free list */ | |
3092 | 1241 install_mark_bits (ph); |
2720 | 1242 |
3092 | 1243 install_cell_free_list (ph, elemcount); |
2720 | 1244 |
1245 #ifdef MEMORY_USAGE_STATS | |
1246 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); | |
1247 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); | |
1248 #endif | |
1249 | |
1250 return ph; | |
1251 } | |
1252 | |
1253 | |
1254 /* Cleans and frees a page, identified by the given page_header. */ | |
1255 static void | |
1256 remove_page_from_used_list (page_header *ph) | |
1257 { | |
1258 page_list_header *plh = PH_PLH (ph); | |
1259 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1260 assert (!(gc_in_progress && PH_PROTECTION_BIT (ph))); |
3092 | 1261 /* cleanup: remove memory protection, zero page_header bits. */ |
1262 | |
2720 | 1263 #ifdef MEMORY_USAGE_STATS |
1264 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | |
1265 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | |
1266 #endif | |
1267 | |
1268 /* clean up mark bits and cell free list */ | |
1269 remove_cell_free_list (ph); | |
1270 if (PH_ON_USED_LIST_P (ph)) | |
1271 remove_mark_bits (ph); | |
1272 | |
1273 /* clean up page header */ | |
1274 PH_CELL_SIZE (ph) = 0; | |
1275 PH_CELLS_ON_PAGE (ph) = 0; | |
1276 PH_CELLS_USED (ph) = 0; | |
1277 | |
1278 /* remove from used list */ | |
1279 remove_page_header_from_plh (ph, plh); | |
1280 | |
1281 /* move to free list */ | |
1282 merge_into_free_list (ph); | |
1283 } | |
1284 | |
1285 | |
1286 | |
1287 | |
1288 /*--- allocation -------------------------------------------------------*/ | |
1289 | |
1290 /* Allocates from cell free list on already allocated pages. */ | |
1291 static page_header * | |
1292 allocate_cell (page_list_header *plh) | |
1293 { | |
1294 page_header *ph = PLH_FIRST (plh); | |
1295 if (ph) | |
1296 { | |
1297 if (PH_FREE_LIST (ph)) | |
1298 /* elements free on first page */ | |
1299 return ph; | |
1300 else if ((PH_NEXT (ph)) | |
1301 && (PH_FREE_LIST (PH_NEXT (ph)))) | |
1302 /* elements free on second page */ | |
1303 { | |
1304 page_header *temp = PH_NEXT (ph); | |
1305 /* move full page (first page) to end of list */ | |
1306 PH_NEXT (PLH_LAST (plh)) = ph; | |
1307 PH_PREV (ph) = PLH_LAST (plh); | |
1308 PLH_LAST (plh) = ph; | |
1309 PH_NEXT (ph) = 0; | |
1310 /* install second page as first page */ | |
1311 ph = temp; | |
1312 PH_PREV (ph) = 0; | |
1313 PLH_FIRST (plh) = ph; | |
1314 return ph; | |
1315 } | |
1316 } | |
1317 return 0; | |
1318 } | |
1319 | |
1320 | |
1321 /* Finds a page which has at least the needed number of pages. | |
1322 Algorithm: FIRST FIT. */ | |
1323 static page_header * | |
1324 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph) | |
1325 { | |
1326 while (ph) | |
1327 { | |
1328 if (PH_N_PAGES (ph) >= needed_pages) | |
1329 return ph; | |
1330 ph = PH_NEXT (ph); | |
1331 } | |
1332 return 0; | |
1333 } | |
1334 | |
1335 | |
1336 /* Allocates a page from the free list. */ | |
1337 static page_header * | |
1338 allocate_page_from_free_list (EMACS_INT needed_pages) | |
1339 { | |
1340 page_header *ph = 0; | |
3092 | 1341 EMACS_INT i; |
2720 | 1342 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) |
1343 if ((ph = find_free_page_first_fit (needed_pages, | |
1344 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) | |
1345 { | |
1346 if (PH_N_PAGES (ph) > needed_pages) | |
1347 return split_page (ph, needed_pages); | |
1348 else | |
1349 { | |
1350 remove_page_from_free_list (ph); | |
1351 return ph; | |
1352 } | |
1353 } | |
1354 return 0; | |
1355 } | |
1356 | |
1357 | |
1358 /* Allocates a new page, either from free list or by expanding the heap. */ | |
1359 static page_header * | |
3092 | 1360 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount) |
2720 | 1361 { |
3092 | 1362 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount); |
2720 | 1363 /* first check free list */ |
1364 page_header *result = allocate_page_from_free_list (needed_pages); | |
1365 if (!result) | |
1366 /* expand heap */ | |
1367 result = expand_heap (needed_pages); | |
3092 | 1368 install_page_in_used_list (result, plh, size, elemcount); |
2720 | 1369 return result; |
1370 } | |
1371 | |
1372 | |
1373 /* Selects the correct size class, tries to allocate a cell of this size | |
1374 from the free list, if this fails, a new page is allocated. */ | |
1375 static void * | |
3092 | 1376 mc_alloc_1 (size_t size, EMACS_INT elemcount) |
2720 | 1377 { |
1378 page_list_header *plh = 0; | |
2723 | 1379 page_header *ph = 0; |
1380 void *result = 0; | |
1381 | |
3092 | 1382 plh = USED_HEAP_PAGES (get_used_list_index (size)); |
2720 | 1383 |
1384 if (size == 0) | |
1385 return 0; | |
3092 | 1386 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2)) |
2720 | 1387 /* first check any free cells */ |
1388 ph = allocate_cell (plh); | |
1389 if (!ph) | |
1390 /* allocate a new page */ | |
3092 | 1391 ph = allocate_new_page (plh, size, elemcount); |
2720 | 1392 |
1393 /* return first element of free list and remove it from the list */ | |
1394 result = (void*) PH_FREE_LIST (ph); | |
1395 PH_FREE_LIST (ph) = | |
1396 NEXT_FREE (PH_FREE_LIST (ph)); | |
1397 | |
3092 | 1398 memset (result, '\0', (size * elemcount)); |
1399 MARK_LRECORD_AS_FREE (result); | |
2720 | 1400 |
1401 /* bump used cells counter */ | |
3092 | 1402 PH_CELLS_USED (ph) += elemcount; |
2720 | 1403 |
1404 #ifdef MEMORY_USAGE_STATS | |
3092 | 1405 PLH_USED_CELLS (plh) += elemcount; |
1406 PLH_USED_SPACE (plh) += size * elemcount; | |
2720 | 1407 #endif |
1408 | |
1409 return result; | |
1410 } | |
1411 | |
3092 | 1412 /* Array allocation. */ |
1413 void * | |
1414 mc_alloc_array (size_t size, EMACS_INT elemcount) | |
1415 { | |
1416 return mc_alloc_1 (size, elemcount); | |
1417 } | |
1418 | |
2720 | 1419 void * |
1420 mc_alloc (size_t size) | |
1421 { | |
1422 return mc_alloc_1 (size, 1); | |
1423 } | |
1424 | |
1425 | |
1426 | |
1427 /*--- sweep & free & finalize-------------------------------------------*/ | |
1428 | |
1429 /* Frees a heap pointer. */ | |
1430 static void | |
1431 remove_cell (void *ptr, page_header *ph) | |
1432 { | |
1433 #ifdef MEMORY_USAGE_STATS | |
1434 PLH_USED_CELLS (PH_PLH (ph))--; | |
1435 if (PH_ON_USED_LIST_P (ph)) | |
1436 PLH_USED_SPACE (PH_PLH (ph)) -= | |
1437 detagged_lisp_object_size ((const struct lrecord_header *) ptr); | |
1438 else | |
1439 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph); | |
1440 #endif | |
2775 | 1441 if (PH_ON_USED_LIST_P (ph)) |
1442 { | |
2994 | 1443 #ifdef ALLOC_TYPE_STATS |
2775 | 1444 dec_lrecord_stats (PH_CELL_SIZE (ph), |
1445 (const struct lrecord_header *) ptr); | |
2994 | 1446 #endif /* ALLOC_TYPE_STATS */ |
2775 | 1447 #ifdef ERROR_CHECK_GC |
1448 assert (!LRECORD_FREE_P (ptr)); | |
1449 deadbeef_memory (ptr, PH_CELL_SIZE (ph)); | |
1450 MARK_LRECORD_AS_FREE (ptr); | |
2720 | 1451 #endif |
2775 | 1452 } |
2720 | 1453 |
1454 /* hooks cell into free list */ | |
1455 NEXT_FREE (ptr) = PH_FREE_LIST (ph); | |
1456 PH_FREE_LIST (ph) = FREE_LIST (ptr); | |
1457 /* decrease cells used */ | |
1458 PH_CELLS_USED (ph)--; | |
1459 } | |
1460 | |
1461 | |
1462 /* Mark free list marks all free list entries. */ | |
1463 static void | |
1464 mark_free_list (page_header *ph) | |
1465 { | |
1466 free_link *fl = PH_FREE_LIST (ph); | |
1467 while (fl) | |
1468 { | |
3092 | 1469 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
2720 | 1470 fl = NEXT_FREE (fl); |
1471 } | |
1472 } | |
1473 | |
1474 | |
1475 /* Finalize a page for disksave. XEmacs calls this routine before it | |
1476 dumps the heap image. You have to tell mc-alloc how to call your | |
1477 object's finalizer for disksave. Therefore, you have to define the | |
1478 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should | |
1479 do nothing else then test if there is a finalizer and call it on | |
3303 | 1480 the given argument, which is the heap address of the object. |
1481 Returns number of processed pages. */ | |
1482 static EMACS_INT | |
2720 | 1483 finalize_page_for_disksave (page_header *ph) |
1484 { | |
1485 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
1486 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1487 EMACS_INT mark_bit = 0; | |
1488 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1489 | |
1490 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1491 { | |
1492 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
1493 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | |
1494 } | |
3303 | 1495 return 1; |
2720 | 1496 } |
1497 | |
1498 | |
3303 | 1499 /* Finalizes the heap for disksave. Returns number of processed |
1500 pages. */ | |
1501 EMACS_INT | |
2720 | 1502 mc_finalize_for_disksave (void) |
1503 { | |
3303 | 1504 return visit_all_used_page_headers (finalize_page_for_disksave); |
2720 | 1505 } |
1506 | |
1507 | |
3303 | 1508 /* Sweeps a page: all the non-marked cells are freed. If the page is |
1509 empty in the end, it is removed. If some cells are free, it is | |
1510 moved to the front of its page header list. Full pages stay where | |
1511 they are. Returns number of processed pages.*/ | |
1512 static EMACS_INT | |
2720 | 1513 sweep_page (page_header *ph) |
1514 { | |
3092 | 1515 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1516 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); |
1517 EMACS_INT mark_bit = 0; | |
1518 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
3092 | 1519 unsigned int bit = 0; |
2720 | 1520 |
1521 mark_free_list (ph); | |
1522 | |
3092 | 1523 /* ARRAY_BIT_HACK */ |
1524 if (PH_ARRAY_BIT (ph)) | |
1525 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1526 { | |
1527 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1528 if (bit) | |
1529 { | |
1530 zero_mark_bits (ph); | |
1531 PH_BLACK_BIT (ph) = 0; | |
3303 | 1532 return 1; |
3092 | 1533 } |
1534 } | |
1535 | |
2720 | 1536 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1537 { | |
3092 | 1538 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1539 if (bit == WHITE) | |
2720 | 1540 { |
3092 | 1541 GC_STAT_FREED; |
2720 | 1542 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
1543 } | |
1544 } | |
1545 zero_mark_bits (ph); | |
3092 | 1546 PH_BLACK_BIT (ph) = 0; |
2720 | 1547 if (PH_CELLS_USED (ph) == 0) |
1548 remove_page_from_used_list (ph); | |
1549 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
1550 move_page_header_to_front (ph); | |
3303 | 1551 |
1552 return 1; | |
2720 | 1553 } |
1554 | |
1555 | |
3303 | 1556 /* Sweeps the heap. Returns number of processed pages. */ |
1557 EMACS_INT | |
2720 | 1558 mc_sweep (void) |
1559 { | |
3303 | 1560 return visit_all_used_page_headers (sweep_page); |
2720 | 1561 } |
1562 | |
1563 | |
1564 /* Changes the size of the cell pointed to by ptr. | |
1565 Returns the new address of the new cell with new size. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1566 static void * |
3092 | 1567 mc_realloc_1 (void *ptr, size_t size, int elemcount) |
2720 | 1568 { |
1569 if (ptr) | |
1570 { | |
3092 | 1571 if (size * elemcount) |
2720 | 1572 { |
3092 | 1573 void *result = mc_alloc_1 (size, elemcount); |
2720 | 1574 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); |
3092 | 1575 size_t cpy_size = size * elemcount; |
1576 if (cpy_size > from_size) | |
2720 | 1577 cpy_size = from_size; |
1578 memcpy (result, ptr, cpy_size); | |
3092 | 1579 #ifdef ALLOC_TYPE_STATS |
1580 inc_lrecord_stats (size, (struct lrecord_header *) result); | |
1581 #endif /* not ALLOC_TYPE_STATS */ | |
2720 | 1582 return result; |
1583 } | |
1584 else | |
1585 { | |
1586 return 0; | |
1587 } | |
1588 } | |
1589 else | |
3092 | 1590 return mc_alloc_1 (size, elemcount); |
2720 | 1591 } |
1592 | |
1593 void * | |
1594 mc_realloc (void *ptr, size_t size) | |
1595 { | |
1596 return mc_realloc_1 (ptr, size, 1); | |
1597 } | |
1598 | |
1599 void * | |
3092 | 1600 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount) |
2720 | 1601 { |
3092 | 1602 return mc_realloc_1 (ptr, size, elemcount); |
2720 | 1603 } |
1604 | |
1605 | |
1606 | |
1607 /*--- initialization ---------------------------------------------------*/ | |
1608 | |
1609 /* Call once at the very beginning. */ | |
1610 void | |
1611 init_mc_allocator (void) | |
1612 { | |
3092 | 1613 EMACS_INT i; |
1614 | |
1615 #ifdef MEMORY_USAGE_STATS | |
1616 MC_MALLOCED_BYTES = 0; | |
1617 #endif | |
2720 | 1618 |
3092 | 1619 /* init of pagesize dependent values */ |
1620 switch (SYS_PAGE_SIZE) | |
1621 { | |
1622 case 512: log_page_size = 9; break; | |
1623 case 1024: log_page_size = 10; break; | |
1624 case 2048: log_page_size = 11; break; | |
1625 case 4096: log_page_size = 12; break; | |
1626 case 8192: log_page_size = 13; break; | |
1627 case 16384: log_page_size = 14; break; | |
3212 | 1628 case 32768: log_page_size = 15; break; |
1629 case 65536: log_page_size = 16; break; | |
1630 default: | |
1631 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n", | |
1632 SYS_PAGE_SIZE); | |
1633 ABORT (); | |
3092 | 1634 } |
1635 | |
4125 | 1636 page_size_div_2 = (EMACS_UINT) SYS_PAGE_SIZE >> 1; |
3092 | 1637 |
1638 mc_allocator_globals.used_heap_pages = | |
1639 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1) | |
1640 * sizeof (page_list_header)); | |
1641 #ifdef MEMORY_USAGE_STATS | |
1642 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header); | |
1643 #endif | |
1644 | |
1645 mc_allocator_globals.ptr_lookup_table = | |
1646 (level_2_lookup_tree **) | |
1647 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *)); | |
1648 #ifdef MEMORY_USAGE_STATS | |
1649 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *); | |
1650 #endif | |
1651 | |
1652 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER | |
1653 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype); | |
1654 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2932 | 1655 |
2720 | 1656 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
1657 { | |
1658 page_list_header *plh = USED_HEAP_PAGES (i); | |
1659 PLH_LIST_TYPE (plh) = USED_LIST; | |
1660 PLH_SIZE (plh) = get_used_list_size_value (i); | |
1661 PLH_FIRST (plh) = 0; | |
1662 PLH_LAST (plh) = 0; | |
1663 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1664 #ifdef MEMORY_USAGE_STATS | |
1665 PLH_PAGE_COUNT (plh) = 0; | |
1666 PLH_USED_CELLS (plh) = 0; | |
1667 PLH_USED_SPACE (plh) = 0; | |
1668 PLH_TOTAL_CELLS (plh) = 0; | |
1669 PLH_TOTAL_SPACE (plh) = 0; | |
1670 #endif | |
1671 } | |
1672 | |
1673 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1674 { | |
1675 page_list_header *plh = FREE_HEAP_PAGES (i); | |
1676 PLH_LIST_TYPE (plh) = FREE_LIST; | |
1677 PLH_SIZE (plh) = get_free_list_size_value (i); | |
1678 PLH_FIRST (plh) = 0; | |
1679 PLH_LAST (plh) = 0; | |
1680 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1681 #ifdef MEMORY_USAGE_STATS | |
1682 PLH_PAGE_COUNT (plh) = 0; | |
1683 PLH_USED_CELLS (plh) = 0; | |
1684 PLH_USED_SPACE (plh) = 0; | |
1685 PLH_TOTAL_CELLS (plh) = 0; | |
1686 PLH_TOTAL_SPACE (plh) = 0; | |
1687 #endif | |
1688 } | |
1689 | |
3092 | 1690 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 1691 PAGE_HEADER_FREE_LIST = 0; |
3092 | 1692 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 1693 |
1694 #ifdef MEMORY_USAGE_STATS | |
3092 | 1695 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals); |
2720 | 1696 #endif |
1697 | |
1698 init_lookup_table (); | |
1699 } | |
1700 | |
1701 | |
1702 | |
1703 | |
1704 /*--- lisp function for statistics -------------------------------------*/ | |
1705 | |
1706 #ifdef MEMORY_USAGE_STATS | |
1707 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /* | |
1708 Returns stats about the mc-alloc memory usage. See diagnose.el. | |
1709 */ | |
1710 ()) | |
1711 { | |
1712 Lisp_Object free_plhs = Qnil; | |
1713 Lisp_Object used_plhs = Qnil; | |
1714 Lisp_Object heap_sects = Qnil; | |
3092 | 1715 EMACS_INT used_size = 0; |
1716 EMACS_INT real_size = 0; | |
2720 | 1717 |
3092 | 1718 EMACS_INT i; |
2720 | 1719 |
1720 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1721 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) | |
1722 free_plhs = | |
1723 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), | |
1724 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), | |
1725 free_plhs); | |
1726 | |
1727 for (i = 0; i < N_USED_PAGE_LISTS; i++) | |
1728 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) | |
1729 used_plhs = | |
1730 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), | |
1731 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))), | |
1732 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))), | |
1733 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))), | |
1734 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))), | |
1735 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))), | |
1736 used_plhs); | |
1737 | |
1738 for (i = 0; i < N_HEAP_SECTIONS; i++) { | |
1739 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE; | |
1740 real_size += | |
1741 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1742 } | |
1743 | |
1744 heap_sects = | |
1745 list3 (make_int (N_HEAP_SECTIONS), | |
1746 make_int (used_size), | |
1747 make_int (real_size)); | |
1748 | |
1749 return Fcons (make_int (PAGE_SIZE), | |
3092 | 1750 list5 (heap_sects, |
2720 | 1751 Fnreverse (used_plhs), |
1752 Fnreverse (free_plhs), | |
1753 make_int (sizeof (mc_allocator_globals)), | |
1754 make_int (MC_MALLOCED_BYTES))); | |
1755 } | |
1756 #endif /* MEMORY_USAGE_STATS */ | |
1757 | |
1758 void | |
1759 syms_of_mc_alloc (void) | |
1760 { | |
1761 #ifdef MEMORY_USAGE_STATS | |
1762 DEFSUBR (Fmc_alloc_memory_usage); | |
1763 #endif /* MEMORY_USAGE_STATS */ | |
1764 } | |
3092 | 1765 |
1766 | |
1767 /*--- incremental garbage collector ----------------------------------*/ | |
1768 | |
5054 | 1769 #if 0 /* currently unused */ |
1770 | |
3092 | 1771 /* access dirty bit of page header */ |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1772 static void |
3092 | 1773 set_dirty_bit (page_header *ph, unsigned int value) |
1774 { | |
1775 PH_DIRTY_BIT (ph) = value; | |
1776 } | |
1777 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1778 static void |
3092 | 1779 set_dirty_bit_for_address (void *ptr, unsigned int value) |
1780 { | |
1781 set_dirty_bit (get_page_header (ptr), value); | |
1782 } | |
1783 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1784 static unsigned int |
3092 | 1785 get_dirty_bit (page_header *ph) |
1786 { | |
1787 return PH_DIRTY_BIT (ph); | |
1788 } | |
1789 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1790 static unsigned int |
3092 | 1791 get_dirty_bit_for_address (void *ptr) |
1792 { | |
1793 return get_dirty_bit (get_page_header (ptr)); | |
1794 } | |
1795 | |
1796 | |
1797 /* access protection bit of page header */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1798 static void |
3092 | 1799 set_protection_bit (page_header *ph, unsigned int value) |
1800 { | |
1801 PH_PROTECTION_BIT (ph) = value; | |
1802 } | |
1803 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1804 static void |
3092 | 1805 set_protection_bit_for_address (void *ptr, unsigned int value) |
1806 { | |
1807 set_protection_bit (get_page_header (ptr), value); | |
1808 } | |
1809 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1810 static unsigned int |
3092 | 1811 get_protection_bit (page_header *ph) |
1812 { | |
1813 return PH_PROTECTION_BIT (ph); | |
1814 } | |
1815 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1816 static unsigned int |
3092 | 1817 get_protection_bit_for_address (void *ptr) |
1818 { | |
1819 return get_protection_bit (get_page_header (ptr)); | |
1820 } | |
1821 | |
1822 | |
1823 /* Returns the start of the page of the object pointed to by ptr. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1824 static void * |
3092 | 1825 get_page_start (void *ptr) |
1826 { | |
1827 return PH_HEAP_SPACE (get_page_header (ptr)); | |
1828 } | |
1829 | |
5054 | 1830 #endif /* 0 */ |
1831 | |
3092 | 1832 /* Make PAGE_SIZE globally available. */ |
1833 EMACS_INT | |
1834 mc_get_page_size () | |
1835 { | |
1836 return PAGE_SIZE; | |
1837 } | |
1838 | |
1839 /* Is the fault at ptr on a protected page? */ | |
1840 EMACS_INT | |
1841 fault_on_protected_page (void *ptr) | |
1842 { | |
1843 page_header *ph = get_page_header_internal (ptr); | |
1844 return (ph | |
1845 && PH_HEAP_SPACE (ph) | |
1846 && (PH_HEAP_SPACE (ph) <= ptr) | |
1847 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph) | |
1848 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr) | |
1849 && (PH_PROTECTION_BIT (ph) == 1)); | |
1850 } | |
1851 | |
1852 | |
1853 /* Protect the heap page of given page header ph if black objects are | |
3303 | 1854 on the page. Returns number of processed pages. */ |
1855 static EMACS_INT | |
3092 | 1856 protect_heap_page (page_header *ph) |
1857 { | |
1858 if (PH_BLACK_BIT (ph)) | |
1859 { | |
1860 void *heap_space = PH_HEAP_SPACE (ph); | |
1861 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1862 vdb_protect ((void *) heap_space, heap_space_size); | |
1863 PH_PROTECTION_BIT (ph) = 1; | |
3303 | 1864 return 1; |
3092 | 1865 } |
3303 | 1866 return 0; |
3092 | 1867 } |
1868 | |
3303 | 1869 /* Protect all heap pages with black objects. Returns number of |
1870 processed pages.*/ | |
1871 EMACS_INT | |
3092 | 1872 protect_heap_pages (void) |
1873 { | |
3303 | 1874 return visit_all_used_page_headers (protect_heap_page); |
3092 | 1875 } |
1876 | |
1877 | |
1878 /* Remove protection (if there) of heap page of given page header | |
3303 | 1879 ph. Returns number of processed pages. */ |
1880 static EMACS_INT | |
3092 | 1881 unprotect_heap_page (page_header *ph) |
1882 { | |
1883 if (PH_PROTECTION_BIT (ph)) | |
1884 { | |
1885 void *heap_space = PH_HEAP_SPACE (ph); | |
1886 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1887 vdb_unprotect (heap_space, heap_space_size); | |
1888 PH_PROTECTION_BIT (ph) = 0; | |
3303 | 1889 return 1; |
3092 | 1890 } |
3303 | 1891 return 0; |
3092 | 1892 } |
1893 | |
3303 | 1894 /* Remove protection for all heap pages which are protected. Returns |
1895 number of processed pages. */ | |
1896 EMACS_INT | |
3092 | 1897 unprotect_heap_pages (void) |
1898 { | |
3303 | 1899 return visit_all_used_page_headers (unprotect_heap_page); |
3092 | 1900 } |
1901 | |
1902 /* Remove protection and mark page dirty. */ | |
1903 void | |
1904 unprotect_page_and_mark_dirty (void *ptr) | |
1905 { | |
1906 page_header *ph = get_page_header (ptr); | |
1907 unprotect_heap_page (ph); | |
1908 PH_DIRTY_BIT (ph) = 1; | |
1909 } | |
1910 | |
1911 /* Repush all objects on dirty pages onto the mark stack. */ | |
1912 int | |
1913 repush_all_objects_on_page (void *ptr) | |
1914 { | |
1915 int repushed_objects = 0; | |
1916 page_header *ph = get_page_header (ptr); | |
1917 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); | |
1918 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1919 EMACS_INT mark_bit = 0; | |
1920 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1921 unsigned int bit = 0; | |
1922 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1923 { | |
1924 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1925 if (bit == BLACK) | |
1926 { | |
1927 repushed_objects++; | |
1928 gc_write_barrier | |
1929 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit)))); | |
1930 } | |
1931 } | |
1932 PH_BLACK_BIT (ph) = 0; | |
1933 PH_DIRTY_BIT (ph) = 0; | |
1934 return repushed_objects; | |
1935 } | |
1936 | |
1937 /* Mark black if object is currently grey. This first checks, if the | |
1938 object is really allocated on the mc-heap. If it is, it can be | |
1939 marked black; if it is not, it cannot be marked. */ | |
1940 EMACS_INT | |
1941 maybe_mark_black (void *ptr) | |
1942 { | |
1943 page_header *ph = get_page_header_internal (ptr); | |
1944 unsigned int bit = 0; | |
1945 | |
1946 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph)) | |
1947 { | |
1948 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
1949 if (bit == GREY) | |
1950 { | |
1951 if (!PH_BLACK_BIT (ph)) | |
1952 PH_BLACK_BIT (ph) = 1; | |
1953 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK); | |
1954 } | |
1955 return 1; | |
1956 } | |
1957 return 0; | |
1958 } | |
1959 | |
1960 /* Only for debugging --- not used anywhere in the sources. */ | |
1961 EMACS_INT | |
1962 object_on_heap_p (void *ptr) | |
1963 { | |
1964 page_header *ph = get_page_header_internal (ptr); | |
1965 return (ph && PH_ON_USED_LIST_P (ph)); | |
1966 } |