Mercurial > hg > xemacs-beta
annotate src/mc-alloc.c @ 5216:9b8c2168d231
Allocate lrecord arrays in own size class.
author | Marcus Crestani <crestani@informatik.uni-tuebingen.de> |
---|---|
date | Sat, 29 May 2010 07:10:49 +0200 |
parents | e374ea766cc1 |
children | 2cc24c69446c |
rev | line source |
---|---|
2720 | 1 /* New size-based allocator for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
2720 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with XEmacs; see the file COPYING. If not, write to | |
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 Boston, MA 02111-1307, USA. */ | |
21 | |
22 /* Synched up with: Not in FSF. */ | |
23 | |
24 #include <config.h> | |
3092 | 25 |
2720 | 26 #include "lisp.h" |
27 #include "mc-alloc.h" | |
3092 | 28 #include "getpagesize.h" |
29 | |
30 | |
31 #if 0 | |
32 # define USE_MARK_BITS_FREE_LIST 1 | |
33 #endif | |
34 #if 1 | |
35 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1 | |
36 #endif | |
37 | |
38 /* Memory protection needs the real system-dependent pagesize. */ | |
39 #ifndef WIN32_NATIVE | |
40 #include <unistd.h> /* for getpagesize () */ | |
41 #endif | |
42 #if defined (HAVE_GETPAGESIZE) | |
43 # define SYS_PAGE_SIZE getpagesize () | |
44 #elif defined (_SC_PAGESIZE) | |
45 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE) | |
46 #elif defined (_SC_PAGE_SIZE) | |
47 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE) | |
48 #elif defined(get_page_size) | |
49 # define SYS_PAGE_SIZE get_page_size () | |
50 #elif defined(PAGESIZE) | |
51 # define SYS_PAGE_SIZE PAGESIZE | |
52 #elif defined(PAGE_SIZE) | |
53 # define SYS_PAGE_SIZE PAGE_SIZE | |
54 #else | |
55 /* Valid page sizes are powers of 2. */ | |
56 # define SYS_PAGE_SIZE 4096 | |
57 #endif | |
2720 | 58 |
59 | |
60 /*--- configurable values ----------------------------------------------*/ | |
61 | |
62 /* Definition of size classes */ | |
63 | |
64 /* Heap used list constants: In the used heap, it is important to | |
65 quickly find a free spot for a new object. Therefore the size | |
66 classes of the used heap are defined by the size of the cells on | |
67 the pages. The size classes should match common object sizes, to | |
68 avoid wasting memory. */ | |
69 | |
70 /* Minimum object size in bytes. */ | |
3092 | 71 #if BITS_PER_EMACS_INT > 32 |
72 # define USED_LIST_MIN_OBJECT_SIZE 16 | |
73 #else | |
74 # define USED_LIST_MIN_OBJECT_SIZE 8 | |
75 #endif | |
2720 | 76 |
77 /* The step size by which the size classes increase (up to upper | |
78 threshold). This many bytes are mapped to a single used list: */ | |
3092 | 79 #if BITS_PER_EMACS_INT > 32 |
80 # define USED_LIST_LIN_STEP 8 | |
81 #else | |
82 # define USED_LIST_LIN_STEP 4 | |
83 #endif | |
2720 | 84 |
85 /* The upper threshold should always be set to PAGE_SIZE/2, because if | |
86 a object is larger than PAGE_SIZE/2 there is no room for any other | |
87 object on this page. Objects this big are kept in the page list of | |
88 the multiple pages, since a quick search for free spots is not | |
89 needed for this kind of pages (because there are no free spots). | |
90 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ | |
3092 | 91 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 |
2720 | 92 |
93 | |
94 /* Heap free list constants: In the unused heap, the size of | |
95 consecutive memory tips the scales. A page is smallest entity which | |
96 is asked for. Therefore, the size classes of the unused heap are | |
97 defined by the number of consecutive pages. */ | |
98 /* Sizes up to this many pages each have their own free list. */ | |
99 #define FREE_LIST_LOWER_THRESHOLD 32 | |
100 /* The step size by which the size classes increase (up to upper | |
101 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a | |
102 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and | |
103 FREE_LIST_UPPER_THRESHOLD. */ | |
104 #define FREE_LIST_LIN_STEP 8 | |
105 /* Sizes of at least this many pages are mapped to a single free | |
106 list. Blocks of memory larger than this number are all kept in a | |
107 single list, which makes searching this list slow. But objects that | |
108 big are really seldom. */ | |
109 #define FREE_LIST_UPPER_THRESHOLD 256 | |
110 | |
111 | |
3092 | 112 /* used heap list count */ |
113 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \ | |
114 - USED_LIST_MIN_OBJECT_SIZE) \ | |
115 / USED_LIST_LIN_STEP) + 1 ) + 1 | |
116 | |
117 /* free heap list count */ | |
118 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \ | |
119 - FREE_LIST_LOWER_THRESHOLD) \ | |
120 / FREE_LIST_LIN_STEP) \ | |
121 + FREE_LIST_LOWER_THRESHOLD) | |
122 | |
123 | |
2720 | 124 /* Maximum number of separately added heap sections. */ |
125 #if BITS_PER_EMACS_INT > 32 | |
126 # define MAX_HEAP_SECTS 2048 | |
127 #else | |
128 # define MAX_HEAP_SECTS 768 | |
129 #endif | |
130 | |
131 | |
132 /* Heap growth constants. Heap increases by any number between the | |
133 boundaries (unit is PAGE_SIZE). */ | |
3092 | 134 #define MIN_HEAP_INCREASE 256 |
2720 | 135 #define MAX_HEAP_INCREASE 256 /* not used */ |
136 | |
137 /* Every heap growth is calculated like this: | |
138 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). | |
139 So the growth of the heap is influenced by the current size of the | |
140 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE | |
141 boundaries. | |
142 This reduces the number of heap sectors, the larger the heap grows | |
143 the larger are the newly allocated chunks. */ | |
144 #define HEAP_GROWTH_DIVISOR 3 | |
145 | |
146 | |
147 /* Zero memory before putting on free lists. */ | |
148 #define ZERO_MEM 1 | |
149 | |
150 | |
151 #ifndef CHAR_BIT /* should be included by limits.h */ | |
152 # define CHAR_BIT BITS_PER_CHAR | |
153 #endif | |
154 | |
155 | |
3092 | 156 |
157 /*--- values depending on PAGE_SIZE ------------------------------------*/ | |
2720 | 158 |
3092 | 159 /* initialized in init_mc_allocator () */ |
160 static EMACS_INT log_page_size; | |
161 static EMACS_INT page_size_div_2; | |
2720 | 162 |
3092 | 163 #undef PAGE_SIZE |
164 #define PAGE_SIZE SYS_PAGE_SIZE | |
165 #define LOG_PAGE_SIZE log_page_size | |
166 #define PAGE_SIZE_DIV_2 page_size_div_2 | |
2720 | 167 |
168 | |
169 /* Constants for heap address to page header mapping. */ | |
170 #define LOG_LEVEL2_SIZE 10 | |
171 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) | |
172 #if BITS_PER_EMACS_INT > 32 | |
173 # define USE_HASH_TABLE 1 | |
174 # define LOG_LEVEL1_SIZE 11 | |
175 #else | |
176 # define LOG_LEVEL1_SIZE \ | |
177 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE) | |
178 #endif | |
179 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE) | |
180 | |
181 #ifdef USE_HASH_TABLE | |
182 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1)) | |
4125 | 183 # define L1_INDEX(p) HASH ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 184 #else |
4125 | 185 # define L1_INDEX(p) ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 186 #endif |
4125 | 187 #define L2_INDEX(p) (((EMACS_UINT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1)) |
2720 | 188 |
189 | |
190 | |
191 | |
192 /*--- structs and typedefs ---------------------------------------------*/ | |
193 | |
3092 | 194 /* Links the free lists (mark_bit_free_list and cell free list). */ |
2720 | 195 typedef struct free_link |
196 { | |
197 struct lrecord_header lheader; | |
198 struct free_link *next_free; | |
199 } free_link; | |
200 | |
201 | |
3092 | 202 /* Header for pages. They are held in a doubly linked list. */ |
2720 | 203 typedef struct page_header |
204 { | |
205 struct page_header *next; /* next page_header */ | |
206 struct page_header *prev; /* previous page_header */ | |
207 /* Field plh holds pointer to the according header of the page list.*/ | |
208 struct page_list_header *plh; /* page list header */ | |
209 free_link *free_list; /* links free cells on page */ | |
210 EMACS_INT n_pages; /* number of pages */ | |
211 EMACS_INT cell_size; /* size of cells on page */ | |
212 EMACS_INT cells_on_page; /* total number of cells on page */ | |
213 EMACS_INT cells_used; /* number of used cells on page */ | |
214 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, | |
215 the mark bits are put in an extra memory area. Then the field | |
216 mark_bits holds the pointer to this area. Is the number of | |
217 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the | |
218 mark_bit EMACS_INT directly, without an additional indirection. */ | |
3092 | 219 unsigned int black_bit:1; /* objects on page are black */ |
220 unsigned int dirty_bit:1; /* page is dirty */ | |
221 unsigned int protection_bit:1; /* page is write protected */ | |
222 unsigned int array_bit:1; /* page holds arrays */ | |
223 Rawbyte *mark_bits; /* pointer to mark bits */ | |
2720 | 224 void *heap_space; /* pointer to heap, where objects |
225 are stored */ | |
226 } page_header; | |
227 | |
228 | |
229 /* Different list types. */ | |
230 enum list_type_enum { | |
231 USED_LIST, | |
232 FREE_LIST | |
233 }; | |
234 | |
235 | |
236 /* Header for page lists. Field list_type holds the type of the list. */ | |
237 typedef struct page_list_header | |
238 { | |
239 enum list_type_enum list_type; /* the type of the list */ | |
240 /* Size holds the size of one cell (in bytes) in a used heap list, or the | |
241 size of the heap sector (in number of pages). */ | |
242 size_t size; /* size of one cell / heap sector */ | |
243 page_header *first; /* first of page_header list */ | |
244 page_header *last; /* last of page_header list */ | |
245 /* If the number of objects on page is bigger than | |
246 BITS_PER_EMACS_INT, the mark bits are put in an extra memory | |
247 area, which is linked in this free list, if not used. Is the | |
248 number of objects smaller than BITS_PER_EMACS_INT, the mark bits | |
249 are hold in the mark bit EMACS_INT directly, without an | |
250 additional indirection. */ | |
251 free_link *mark_bit_free_list; | |
252 | |
253 #ifdef MEMORY_USAGE_STATS | |
254 EMACS_INT page_count; /* number if pages in list */ | |
255 EMACS_INT used_cells; /* number of objects in list */ | |
256 EMACS_INT used_space; /* used space */ | |
257 EMACS_INT total_cells; /* number of available cells */ | |
258 EMACS_INT total_space; /* available space */ | |
259 #endif | |
260 } page_list_header; | |
261 | |
262 | |
263 /* The heap sectors are stored with their real start pointer and their | |
264 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */ | |
265 typedef struct heap_sect { | |
266 void *real_start; /* real start pointer (NOT aligned) */ | |
267 size_t real_size; /* NOT multiple of PAGE_SIZE */ | |
268 void *start; /* aligned start pointer */ | |
269 EMACS_INT n_pages; /* multiple of PAGE_SIZE */ | |
270 } heap_sect; | |
271 | |
272 | |
273 /* 2nd tree level for mapping of heap addresses to page headers. */ | |
274 typedef struct level_2_lookup_tree { | |
275 page_header *index[LEVEL2_SIZE]; /* link to page header */ | |
276 EMACS_INT key; /* high order address bits */ | |
277 #ifdef USE_HASH_TABLE | |
278 struct level_2_lookup_tree *hash_link; /* hash chain link */ | |
279 #endif | |
280 } level_2_lookup_tree; | |
281 | |
282 | |
283 | |
284 /*--- global variable definitions --------------------------------------*/ | |
285 | |
286 /* All global allocator variables are kept in this struct. */ | |
287 typedef struct mc_allocator_globals_type { | |
288 | |
289 /* heap size */ | |
290 EMACS_INT heap_size; | |
291 | |
292 /* list of all separatly allocated chunks of heap */ | |
293 heap_sect heap_sections[MAX_HEAP_SECTS]; | |
294 EMACS_INT n_heap_sections; | |
295 | |
296 /* Holds all allocated pages, each object size class in its separate list, | |
297 to guarantee fast allocation on partially filled pages. */ | |
3092 | 298 page_list_header *used_heap_pages; |
2720 | 299 |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
300 /* Holds all allocated pages that contain array elements. */ |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
301 page_list_header array_heap_pages; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
302 |
2720 | 303 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are |
304 kept on the Nth free list. Contiguos pages are coalesced. */ | |
305 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; | |
306 | |
307 /* ptr lookup table */ | |
3092 | 308 level_2_lookup_tree **ptr_lookup_table; |
2720 | 309 |
3092 | 310 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 311 /* page header free list */ |
312 free_link *page_header_free_list; | |
3092 | 313 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 314 |
315 #ifdef MEMORY_USAGE_STATS | |
316 EMACS_INT malloced_bytes; | |
317 #endif | |
318 } mc_allocator_globals_type; | |
319 | |
320 mc_allocator_globals_type mc_allocator_globals; | |
321 | |
322 | |
323 | |
324 | |
325 /*--- macro accessors --------------------------------------------------*/ | |
326 | |
327 #define USED_HEAP_PAGES(i) \ | |
328 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) | |
329 | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
330 #define ARRAY_HEAP_PAGES \ |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
331 ((page_list_header*) &mc_allocator_globals.array_heap_pages) |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
332 |
2720 | 333 #define FREE_HEAP_PAGES(i) \ |
334 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) | |
335 | |
336 #define PLH(plh) plh | |
337 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type | |
338 # define PLH_SIZE(plh) PLH (plh)->size | |
339 # define PLH_FIRST(plh) PLH (plh)->first | |
340 # define PLH_LAST(plh) PLH (plh)->last | |
341 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list | |
342 #ifdef MEMORY_USAGE_STATS | |
343 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count | |
344 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells | |
345 # define PLH_USED_SPACE(plh) PLH (plh)->used_space | |
346 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells | |
347 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space | |
348 #endif | |
349 | |
350 #define PH(ph) ph | |
351 # define PH_NEXT(ph) PH (ph)->next | |
352 # define PH_PREV(ph) PH (ph)->prev | |
353 # define PH_PLH(ph) PH (ph)->plh | |
354 # define PH_FREE_LIST(ph) PH (ph)->free_list | |
355 # define PH_N_PAGES(ph) PH (ph)->n_pages | |
356 # define PH_CELL_SIZE(ph) PH (ph)->cell_size | |
357 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page | |
358 # define PH_CELLS_USED(ph) PH (ph)->cells_used | |
3092 | 359 # define PH_BLACK_BIT(ph) PH (ph)->black_bit |
360 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit | |
361 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit | |
362 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit | |
2720 | 363 # define PH_MARK_BITS(ph) PH (ph)->mark_bits |
364 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space | |
365 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) | |
366 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) | |
367 | |
368 #define HEAP_SIZE mc_allocator_globals.heap_size | |
369 | |
370 #ifdef MEMORY_USAGE_STATS | |
371 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes | |
372 #endif | |
373 | |
374 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] | |
375 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections | |
376 | |
3092 | 377 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 378 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list |
3092 | 379 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 380 |
381 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free | |
382 #define FREE_LIST(free_list) (free_link*) (free_list) | |
383 | |
384 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] | |
385 #define LEVEL2(l2, i) l2->index[i] | |
386 # define LEVEL2_KEY(l2) l2->key | |
387 #ifdef USE_HASH_TABLE | |
388 # define LEVEL2_HASH_LINK(l2) l2->hash_link | |
389 #endif | |
390 | |
391 #if ZERO_MEM | |
392 # define ZERO_HEAP_SPACE(ph) \ | |
393 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE) | |
394 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header)) | |
395 #endif | |
396 | |
397 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE) | |
398 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE) | |
399 | |
400 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1)))) | |
401 | |
402 #define PAGE_SIZE_ALIGNMENT(address) \ | |
4125 | 403 (void *) ((((EMACS_UINT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1)) |
2720 | 404 |
405 #define PH_ON_FREE_LIST_P(ph) \ | |
406 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) | |
407 | |
408 #define PH_ON_USED_LIST_P(ph) \ | |
409 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | |
410 | |
411 | |
3092 | 412 /* Number of mark bits: minimum 1, maximum 8. */ |
413 #define N_MARK_BITS 2 | |
2720 | 414 |
415 | |
416 | |
417 /************************************************************************/ | |
418 /* MC Allocator */ | |
419 /************************************************************************/ | |
420 | |
3305 | 421 /* Set to 1 if memory becomes short. */ |
422 EMACS_INT memory_shortage; | |
2720 | 423 |
424 /*--- misc functions ---------------------------------------------------*/ | |
425 | |
426 /* Visits all pages (page_headers) hooked into the used heap pages | |
427 list and executes f with the current page header as | |
3303 | 428 argument. Needed for sweep. Returns number of processed pages. */ |
429 static EMACS_INT | |
430 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph)) | |
2720 | 431 { |
3303 | 432 EMACS_INT number_of_pages_processed = 0; |
3092 | 433 EMACS_INT i; |
2720 | 434 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
435 if (PLH_FIRST (USED_HEAP_PAGES (i))) | |
436 { | |
437 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); | |
438 while (PH_NEXT (ph)) | |
439 { | |
440 page_header *next = PH_NEXT (ph); /* in case f removes the page */ | |
3303 | 441 number_of_pages_processed += f (ph); |
2720 | 442 ph = next; |
443 } | |
3303 | 444 number_of_pages_processed += f (ph); |
2720 | 445 } |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
446 |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
447 if (PLH_FIRST (ARRAY_HEAP_PAGES)) |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
448 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
449 page_header *ph = PLH_FIRST (ARRAY_HEAP_PAGES); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
450 while (PH_NEXT (ph)) |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
451 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
452 page_header *next = PH_NEXT (ph); /* in case f removes the page */ |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
453 number_of_pages_processed += f (ph); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
454 ph = next; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
455 } |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
456 number_of_pages_processed += f (ph); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
457 } |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
458 |
3303 | 459 return number_of_pages_processed; |
2720 | 460 } |
461 | |
462 | |
463 | |
464 | |
465 /*--- mapping of heap addresses to page headers and mark bits ----------*/ | |
466 | |
467 /* Sets a heap pointer and page header pair into the lookup table. */ | |
468 static void | |
469 set_lookup_table (void *ptr, page_header *ph) | |
470 { | |
3092 | 471 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 472 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
473 #ifdef USE_HASH_TABLE | |
474 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
475 l2 = LEVEL2_HASH_LINK (l2); | |
476 #endif | |
477 if (!l2) | |
478 { | |
479 l2 = (level_2_lookup_tree*) | |
480 xmalloc_and_zero (sizeof (level_2_lookup_tree)); | |
481 #ifdef MEMORY_USAGE_STATS | |
482 MC_MALLOCED_BYTES += | |
483 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0); | |
484 #endif | |
2932 | 485 memset (l2, '\0', sizeof (level_2_lookup_tree)); |
2720 | 486 #ifdef USE_HASH_TABLE |
487 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index); | |
488 #endif | |
489 PTR_LOOKUP_TABLE (l1_index) = l2; | |
490 LEVEL2_KEY (l2) = l1_index; | |
491 } | |
492 LEVEL2 (l2, L2_INDEX (ptr)) = ph; | |
493 } | |
494 | |
495 | |
496 #ifdef UNSET_LOOKUP_TABLE | |
497 /* Set the lookup table to 0 for given heap address. */ | |
498 static void | |
499 unset_lookup_table (void *ptr) | |
500 { | |
3092 | 501 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 502 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
503 #ifdef USE_HASH_TABLE | |
504 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
505 l2 = LEVEL2_HASH_LINK (l2); | |
506 #endif | |
507 if (l2) { | |
508 LEVEL2 (l2, L2_INDEX (ptr)) = 0; | |
509 } | |
510 } | |
511 #endif | |
512 | |
513 /* Returns the page header of a given heap address, or 0 if not in table. | |
514 For internal use, no error checking. */ | |
515 static page_header * | |
516 get_page_header_internal (void *ptr) | |
517 { | |
3092 | 518 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 519 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
520 #ifdef USE_HASH_TABLE | |
521 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
522 l2 = LEVEL2_HASH_LINK (l2); | |
523 #endif | |
524 if (!l2) | |
525 return 0; | |
526 return LEVEL2 (l2, L2_INDEX (ptr)); | |
527 } | |
528 | |
529 /* Returns the page header of a given heap address, or 0 if not in table. */ | |
530 static page_header * | |
531 get_page_header (void *ptr) | |
532 { | |
3092 | 533 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 534 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
2723 | 535 assert (l2); |
2720 | 536 #ifdef USE_HASH_TABLE |
537 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
538 l2 = LEVEL2_HASH_LINK (l2); | |
539 #endif | |
2723 | 540 assert (LEVEL2 (l2, L2_INDEX (ptr))); |
2720 | 541 return LEVEL2 (l2, L2_INDEX (ptr)); |
542 } | |
543 | |
544 /* Returns the mark bit index of a given heap address. */ | |
545 static EMACS_INT | |
546 get_mark_bit_index (void *ptr, page_header *ph) | |
547 { | |
548 EMACS_INT cell_size = PH_CELL_SIZE (ph); | |
549 if (cell_size) | |
3092 | 550 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size) |
551 * N_MARK_BITS; | |
2720 | 552 else /* only one object on page */ |
553 return 0; | |
554 } | |
555 | |
556 | |
557 /* Adds addresses of pages to lookup table. */ | |
558 static void | |
559 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) | |
560 { | |
3092 | 561 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 562 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); |
3092 | 563 for (p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 564 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) |
565 set_lookup_table (p, ph); | |
566 } | |
567 | |
568 | |
569 /* Initializes lookup table. */ | |
570 static void | |
571 init_lookup_table (void) | |
572 { | |
3092 | 573 EMACS_INT i; |
2720 | 574 for (i = 0; i < LEVEL1_SIZE; i++) |
575 PTR_LOOKUP_TABLE (i) = 0; | |
576 } | |
577 | |
578 | |
579 | |
580 | |
581 /*--- mark bits --------------------------------------------------------*/ | |
582 | |
583 /*--- bit operations --- */ | |
584 | |
585 /* Allocates a bit array of length bits. */ | |
3092 | 586 static Rawbyte * |
2720 | 587 alloc_bit_array(size_t bits) |
588 { | |
3092 | 589 Rawbyte *bit_array; |
590 #ifdef USE_MARK_BITS_FREE_LIST | |
591 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte); | |
592 #else /* not USE_MARK_BITS_FREE_LIST */ | |
593 size_t size = | |
594 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte), | |
595 Rawbyte *); | |
596 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 597 if (size < sizeof (free_link)) size = sizeof (free_link); |
598 #ifdef MEMORY_USAGE_STATS | |
599 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); | |
600 #endif | |
3092 | 601 bit_array = (Rawbyte *) xmalloc_and_zero (size); |
2720 | 602 return bit_array; |
603 } | |
604 | |
605 | |
606 /* Returns the bit value at pos. */ | |
607 static EMACS_INT | |
3092 | 608 get_bit (Rawbyte *bit_array, EMACS_INT pos) |
2720 | 609 { |
610 #if N_MARK_BITS > 1 | |
611 EMACS_INT result = 0; | |
612 EMACS_INT i; | |
613 #endif | |
614 bit_array += pos / CHAR_BIT; | |
615 #if N_MARK_BITS > 1 | |
616 for (i = 0; i < N_MARK_BITS; i++) | |
3092 | 617 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i; |
618 return result; | |
2720 | 619 #else |
620 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; | |
621 #endif | |
622 } | |
623 | |
624 | |
625 /* Bit_Arrays bit at pos to val. */ | |
626 static void | |
4125 | 627 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_UINT val) |
2720 | 628 { |
629 #if N_MARK_BITS > 1 | |
630 EMACS_INT i; | |
631 #endif | |
632 bit_array += pos / CHAR_BIT; | |
633 #if N_MARK_BITS > 1 | |
634 for (i = 0; i < N_MARK_BITS; i++) | |
635 if ((val >> i) & 1) | |
636 *bit_array |= 1 << ((pos + i) % CHAR_BIT); | |
637 else | |
638 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT)); | |
639 #else | |
640 if (val) | |
641 *bit_array |= 1 << (pos % CHAR_BIT); | |
642 else | |
643 *bit_array &= ~(1 << (pos % CHAR_BIT)); | |
644 #endif | |
645 } | |
646 | |
647 | |
648 /*--- mark bit functions ---*/ | |
3092 | 649 #define USE_PNTR_MARK_BITS(ph) \ |
650 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT) | |
651 #define USE_WORD_MARK_BITS(ph) \ | |
652 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT) | |
2720 | 653 |
3092 | 654 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p) |
2720 | 655 #define GET_BIT_PNTR(b, p) get_bit (b, p) |
656 | |
3092 | 657 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v) |
2720 | 658 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) |
659 | |
660 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 | |
3092 | 661 #define ZERO_MARK_BITS_PNTR(ph) \ |
662 do { \ | |
663 memset (PH_MARK_BITS (ph), '\0', \ | |
664 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \ | |
665 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \ | |
2720 | 666 } while (0) |
667 | |
668 #define GET_BIT(bit, ph, p) \ | |
669 do { \ | |
670 if (USE_PNTR_MARK_BITS (ph)) \ | |
671 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \ | |
672 else \ | |
673 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \ | |
674 } while (0) | |
675 | |
676 #define SET_BIT(ph, p, v) \ | |
677 do { \ | |
678 if (USE_PNTR_MARK_BITS (ph)) \ | |
679 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \ | |
680 else \ | |
681 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \ | |
682 } while (0) | |
683 | |
684 #define ZERO_MARK_BITS(ph) \ | |
685 do { \ | |
686 if (USE_PNTR_MARK_BITS (ph)) \ | |
687 ZERO_MARK_BITS_PNTR (ph); \ | |
688 else \ | |
689 ZERO_MARK_BITS_WORD (ph); \ | |
690 } while (0) | |
691 | |
692 | |
693 /* Allocates mark-bit space either from a free list or from the OS | |
694 for the given page header. */ | |
3092 | 695 static Rawbyte * |
2720 | 696 alloc_mark_bits (page_header *ph) |
697 { | |
3092 | 698 Rawbyte *result; |
699 #ifdef USE_MARK_BITS_FREE_LIST | |
2720 | 700 if (PH_MARK_BIT_FREE_LIST (ph) == 0) |
3092 | 701 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); |
2720 | 702 else |
703 { | |
3092 | 704 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph); |
2720 | 705 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); |
706 } | |
3092 | 707 #else /* not USE_MARK_BITS_FREE_LIST */ |
708 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); | |
709 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 710 return result; |
711 } | |
712 | |
713 | |
714 /* Frees by maintaining a free list. */ | |
715 static void | |
716 free_mark_bits (page_header *ph) | |
717 { | |
3092 | 718 #ifdef USE_MARK_BITS_FREE_LIST |
719 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph); | |
720 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph)); | |
721 #else /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 722 if (PH_MARK_BITS (ph)) |
3092 | 723 free (PH_MARK_BITS (ph)); |
724 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 725 } |
726 | |
727 | |
728 /* Installs mark bits and zeros bits. */ | |
729 static void | |
730 install_mark_bits (page_header *ph) | |
731 { | |
732 if (USE_PNTR_MARK_BITS (ph)) | |
733 { | |
734 PH_MARK_BITS (ph) = alloc_mark_bits (ph); | |
735 ZERO_MARK_BITS_PNTR (ph); | |
736 } | |
737 else | |
738 ZERO_MARK_BITS_WORD (ph); | |
739 } | |
740 | |
741 | |
742 /* Cleans and frees the mark bits of the given page_header. */ | |
743 static void | |
744 remove_mark_bits (page_header *ph) | |
745 { | |
746 if (USE_PNTR_MARK_BITS (ph)) | |
747 free_mark_bits (ph); | |
748 } | |
749 | |
750 | |
751 /* Zeros all mark bits in given header. */ | |
752 static void | |
753 zero_mark_bits (page_header *ph) | |
754 { | |
755 ZERO_MARK_BITS (ph); | |
756 } | |
757 | |
758 | |
759 /* Returns mark bit for given heap pointer. */ | |
760 EMACS_INT | |
761 get_mark_bit (void *ptr) | |
762 { | |
763 EMACS_INT bit = 0; | |
764 page_header *ph = get_page_header (ptr); | |
765 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph)); | |
766 if (ph) | |
767 { | |
768 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
769 } | |
770 return bit; | |
771 } | |
772 | |
773 | |
774 /* Sets mark bit for given heap pointer. */ | |
775 void | |
776 set_mark_bit (void *ptr, EMACS_INT value) | |
777 { | |
778 page_header *ph = get_page_header (ptr); | |
779 assert (ph && PH_ON_USED_LIST_P (ph)); | |
780 if (ph) | |
781 { | |
3092 | 782 if (value == BLACK) |
783 if (!PH_BLACK_BIT (ph)) | |
784 PH_BLACK_BIT (ph) = 1; | |
2720 | 785 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
786 } | |
787 } | |
788 | |
789 | |
790 | |
791 | |
792 /*--- page header functions --------------------------------------------*/ | |
793 | |
3092 | 794 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
795 #include "blocktype.h" | |
796 | |
797 struct page_header_blocktype | |
798 { | |
799 Blocktype_declare (page_header); | |
800 } *the_page_header_blocktype; | |
801 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
802 | |
2720 | 803 /* Allocates a page header either from a free list or from the OS. */ |
804 static page_header * | |
805 alloc_page_header (void) | |
806 { | |
3092 | 807 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
808 page_header *result; | |
809 #ifdef MEMORY_USAGE_STATS | |
810 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
811 #endif | |
812 result = Blocktype_alloc (the_page_header_blocktype); | |
813 ZERO_PAGE_HEADER (result); | |
814 return result; | |
815 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 816 page_header *result; |
817 if (PAGE_HEADER_FREE_LIST == 0) | |
818 { | |
819 result = | |
820 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); | |
821 #ifdef MEMORY_USAGE_STATS | |
822 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
823 #endif | |
824 } | |
825 else | |
826 { | |
827 result = (page_header*) PAGE_HEADER_FREE_LIST; | |
828 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); | |
829 } | |
830 return result; | |
3092 | 831 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 832 } |
833 | |
834 | |
835 /* Frees given page header by maintaining a free list. */ | |
836 static void | |
837 free_page_header (page_header *ph) | |
838 { | |
3092 | 839 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
840 Blocktype_free (the_page_header_blocktype, ph); | |
841 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 842 #if ZERO_MEM |
843 ZERO_PAGE_HEADER (ph); | |
844 #endif | |
845 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; | |
846 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); | |
3092 | 847 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 848 } |
849 | |
850 | |
851 /* Adds given page header to given page list header's list. */ | |
852 static void | |
853 add_page_header_to_plh (page_header *ph, page_list_header *plh) | |
854 { | |
855 /* insert at the front of the list */ | |
856 PH_PREV (ph) = 0; | |
857 PH_NEXT (ph) = PLH_FIRST (plh); | |
858 PH_PLH (ph) = plh; | |
859 /* if list is not empty, set prev in the first element */ | |
860 if (PLH_FIRST (plh)) | |
861 PH_PREV (PLH_FIRST (plh)) = ph; | |
862 /* one element in list is first and last at the same time */ | |
863 PLH_FIRST (plh) = ph; | |
864 if (!PLH_LAST (plh)) | |
865 PLH_LAST (plh) = ph; | |
866 | |
867 #ifdef MEMORY_USAGE_STATS | |
868 /* bump page count */ | |
869 PLH_PAGE_COUNT (plh)++; | |
870 #endif | |
871 | |
872 } | |
873 | |
874 | |
875 /* Removes given page header from given page list header's list. */ | |
876 static void | |
877 remove_page_header_from_plh (page_header *ph, page_list_header *plh) | |
878 { | |
879 if (PLH_FIRST (plh) == ph) | |
880 PLH_FIRST (plh) = PH_NEXT (ph); | |
881 if (PLH_LAST (plh) == ph) | |
882 PLH_LAST (plh) = PH_PREV (ph); | |
883 if (PH_NEXT (ph)) | |
884 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
885 if (PH_PREV (ph)) | |
886 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
887 | |
888 #ifdef MEMORY_USAGE_STATS | |
889 /* decrease page count */ | |
890 PLH_PAGE_COUNT (plh)--; | |
891 #endif | |
892 } | |
893 | |
894 | |
895 /* Moves a page header to the front of its the page header list. | |
896 This is used during sweep: Pages with some alive objects are moved to | |
897 the front. This makes allocation faster, all pages with free slots | |
898 can be found at the front of the list. */ | |
899 static void | |
900 move_page_header_to_front (page_header *ph) | |
901 { | |
902 page_list_header *plh = PH_PLH (ph); | |
903 /* is page already first? */ | |
904 if (ph == PLH_FIRST (plh)) return; | |
905 /* remove from list */ | |
906 if (PLH_LAST (plh) == ph) | |
907 PLH_LAST (plh) = PH_PREV (ph); | |
908 if (PH_NEXT (ph)) | |
909 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
910 if (PH_PREV (ph)) | |
911 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
912 /* insert at the front */ | |
913 PH_NEXT (ph) = PLH_FIRST (plh); | |
914 PH_PREV (ph) = 0; | |
915 PH_PREV (PH_NEXT (ph)) = ph; | |
916 PLH_FIRST (plh) = ph; | |
917 } | |
918 | |
919 | |
920 | |
921 | |
922 /*--- page list functions ----------------------------------------------*/ | |
923 | |
924 /* Returns the index of the used heap list according to given size. */ | |
925 static int | |
926 get_used_list_index (size_t size) | |
927 { | |
928 if (size <= USED_LIST_MIN_OBJECT_SIZE) | |
3092 | 929 { |
930 // printf ("size %d -> index %d\n", size, 0); | |
931 return 0; | |
932 } | |
933 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD) | |
934 { | |
935 // printf ("size %d -> index %d\n", size, | |
936 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
937 // / USED_LIST_LIN_STEP) + 1); | |
938 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
939 / USED_LIST_LIN_STEP) + 1; | |
940 } | |
941 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1); | |
2720 | 942 return N_USED_PAGE_LISTS - 1; |
943 } | |
944 | |
945 /* Returns the size of the used heap list according to given index. */ | |
946 static size_t | |
947 get_used_list_size_value (int used_index) | |
948 { | |
949 if (used_index < N_USED_PAGE_LISTS - 1) | |
950 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; | |
951 return 0; | |
952 } | |
953 | |
954 | |
955 /* Returns the index of the free heap list according to given size. */ | |
3092 | 956 static EMACS_INT |
2720 | 957 get_free_list_index (EMACS_INT n_pages) |
958 { | |
959 if (n_pages == 0) | |
960 return 0; | |
961 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) | |
962 return n_pages - 1; | |
963 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1) | |
964 return N_FREE_PAGE_LISTS - 1; | |
965 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1) | |
966 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
967 | |
968 } | |
969 | |
970 | |
971 /* Returns the size in number of pages of the given free list at index. */ | |
972 static size_t | |
3092 | 973 get_free_list_size_value (EMACS_INT free_index) |
2720 | 974 { |
975 if (free_index < FREE_LIST_LOWER_THRESHOLD) | |
976 return free_index + 1; | |
977 if (free_index >= N_FREE_PAGE_LISTS) | |
978 return FREE_LIST_UPPER_THRESHOLD; | |
979 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD) | |
980 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
981 } | |
982 | |
983 | |
984 Bytecount | |
5157
1fae11d56ad2
redo memory-usage mechanism, add way of dynamically initializing Lisp objects
Ben Wing <ben@xemacs.org>
parents:
5146
diff
changeset
|
985 mc_alloced_storage_size (Bytecount claimed_size, struct usage_stats *stats) |
2720 | 986 { |
987 size_t used_size = | |
988 get_used_list_size_value (get_used_list_index (claimed_size)); | |
989 if (used_size == 0) | |
990 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size)); | |
991 | |
992 if (stats) | |
993 { | |
994 stats->was_requested += claimed_size; | |
995 stats->malloc_overhead += used_size - claimed_size; | |
996 } | |
997 | |
998 return used_size; | |
999 } | |
1000 | |
1001 | |
1002 | |
1003 /*--- free heap functions ----------------------------------------------*/ | |
1004 | |
1005 /* Frees a heap section, if the heap_section is completly free */ | |
1006 static EMACS_INT | |
1007 free_heap_section (page_header *ph) | |
1008 { | |
3092 | 1009 EMACS_INT i; |
1010 EMACS_INT removed = 0; | |
2720 | 1011 for (i = 0; i < N_HEAP_SECTIONS; i++) |
1012 if (!removed) | |
1013 { | |
1014 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) | |
1015 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) | |
1016 { | |
1017 xfree_1 (HEAP_SECTION(i).real_start); | |
1018 #ifdef MEMORY_USAGE_STATS | |
1019 MC_MALLOCED_BYTES | |
1020 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1021 #endif | |
1022 | |
1023 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE; | |
1024 | |
1025 removed = 1; | |
1026 } | |
1027 } | |
1028 else | |
1029 { | |
1030 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start; | |
1031 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size; | |
1032 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start; | |
1033 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages; | |
1034 } | |
1035 | |
1036 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed; | |
1037 | |
1038 return removed; | |
1039 } | |
1040 | |
1041 /* Removes page from free list. */ | |
1042 static void | |
1043 remove_page_from_free_list (page_header *ph) | |
1044 { | |
1045 remove_page_header_from_plh (ph, PH_PLH (ph)); | |
1046 PH_PLH (ph) = 0; | |
1047 } | |
1048 | |
1049 | |
1050 /* Adds page to according free list. */ | |
1051 static void | |
1052 add_page_to_free_list (page_header *ph) | |
1053 { | |
1054 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph))); | |
1055 add_page_header_to_plh (ph, PH_PLH (ph)); | |
1056 } | |
1057 | |
1058 | |
1059 /* Merges two adjacent pages. */ | |
1060 static page_header * | |
1061 merge_pages (page_header *first_ph, page_header *second_ph) | |
1062 { | |
1063 /* merge */ | |
1064 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph); | |
1065 /* clean up left over page header */ | |
1066 free_page_header (second_ph); | |
1067 /* update lookup table */ | |
1068 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph)); | |
1069 | |
1070 return first_ph; | |
1071 } | |
1072 | |
1073 | |
1074 /* Checks if pages are adjacent, merges them, and adds merged page to | |
1075 free list */ | |
1076 static void | |
1077 merge_into_free_list (page_header *ph) | |
1078 { | |
1079 /* check if you can coalesce adjacent pages */ | |
1080 page_header *prev_ph = | |
1081 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1082 - PAGE_SIZE)); | |
1083 page_header *succ_ph = | |
1084 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1085 + (PH_N_PAGES (ph) * PAGE_SIZE))); | |
1086 if (PH_ON_FREE_LIST_P (prev_ph)) | |
1087 { | |
1088 remove_page_from_free_list (prev_ph); | |
1089 ph = merge_pages (prev_ph, ph); | |
1090 } | |
1091 if (PH_ON_FREE_LIST_P (succ_ph)) | |
1092 { | |
1093 remove_page_from_free_list (succ_ph); | |
1094 ph = merge_pages (ph, succ_ph); | |
1095 } | |
1096 /* try to free heap_section, if the section is complete */ | |
1097 if (!free_heap_section (ph)) | |
1098 /* else add merged page to free list */ | |
1099 add_page_to_free_list (ph); | |
1100 } | |
1101 | |
1102 | |
1103 /* Cuts given page header after n_pages, returns the first (cut) part, and | |
1104 puts the rest on the free list. */ | |
1105 static page_header * | |
1106 split_page (page_header *ph, EMACS_INT n_pages) | |
1107 { | |
1108 page_header *new_ph; | |
1109 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages; | |
1110 | |
1111 /* remove the page from the free list if already hooked in */ | |
1112 if (PH_PLH (ph)) | |
1113 remove_page_from_free_list (ph); | |
1114 /* set new number of pages */ | |
1115 PH_N_PAGES (ph) = n_pages; | |
1116 /* add new page to lookup table */ | |
1117 add_pages_to_lookup_table (ph, n_pages); | |
1118 | |
1119 if (rem_pages) | |
1120 { | |
1121 /* build new page with reminder */ | |
1122 new_ph = alloc_page_header (); | |
1123 PH_N_PAGES (new_ph) = rem_pages; | |
1124 PH_HEAP_SPACE (new_ph) = | |
1125 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE)); | |
1126 /* add new page to lookup table */ | |
1127 add_pages_to_lookup_table (new_ph, rem_pages); | |
1128 /* hook the rest into free list */ | |
1129 add_page_to_free_list (new_ph); | |
1130 } | |
1131 return ph; | |
1132 } | |
1133 | |
1134 | |
1135 /* Expands the heap by given number of pages. */ | |
1136 static page_header * | |
1137 expand_heap (EMACS_INT needed_pages) | |
1138 { | |
1139 page_header *ph; | |
1140 EMACS_INT n_pages; | |
1141 size_t real_size; | |
1142 void *real_start; | |
1143 | |
1144 /* determine number of pages the heap should grow */ | |
3305 | 1145 if (memory_shortage) |
1146 n_pages = needed_pages; | |
1147 else | |
1148 n_pages = max (MIN_HEAP_INCREASE, | |
1149 needed_pages | |
1150 + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR))); | |
2720 | 1151 |
1152 /* get the real values */ | |
1153 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE; | |
1154 real_start = xmalloc_and_zero (real_size); | |
1155 #ifdef MEMORY_USAGE_STATS | |
1156 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0); | |
1157 #endif | |
1158 | |
1159 /* maintain heap section count */ | |
1160 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS) | |
1161 { | |
1162 stderr_out ("Increase number of MAX_HEAP_SECTS"); | |
1163 ABORT (); | |
1164 } | |
1165 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start; | |
1166 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size; | |
1167 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start); | |
1168 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages; | |
1169 N_HEAP_SECTIONS ++; | |
1170 | |
1171 /* get page header */ | |
1172 ph = alloc_page_header (); | |
1173 | |
1174 /* setup page header */ | |
1175 PH_N_PAGES (ph) = n_pages; | |
1176 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start); | |
1177 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0); | |
1178 HEAP_SIZE += n_pages * PAGE_SIZE; | |
1179 | |
1180 /* this was also done by allocate_lisp_storage */ | |
1181 if (need_to_check_c_alloca) | |
1182 xemacs_c_alloca (0); | |
1183 | |
1184 /* return needed size, put rest on free list */ | |
1185 return split_page (ph, needed_pages); | |
1186 } | |
1187 | |
1188 | |
1189 | |
1190 | |
1191 /*--- used heap functions ----------------------------------------------*/ | |
1192 /* Installs initial free list. */ | |
1193 static void | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1194 install_cell_free_list (page_header *ph) |
2720 | 1195 { |
3092 | 1196 Rawbyte *p; |
1197 EMACS_INT i; | |
2720 | 1198 EMACS_INT cell_size = PH_CELL_SIZE (ph); |
1199 /* write initial free list if cell_size is < PAGE_SIZE */ | |
3092 | 1200 p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1201 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) |
1202 { | |
1203 #ifdef ERROR_CHECK_GC | |
1204 assert (!LRECORD_FREE_P (p)); | |
1205 MARK_LRECORD_AS_FREE (p); | |
1206 #endif | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1207 if (!PH_ARRAY_BIT (ph)) |
3092 | 1208 NEXT_FREE (p) = FREE_LIST (p + cell_size); |
2720 | 1209 set_lookup_table (p, ph); |
3092 | 1210 p += cell_size; |
2720 | 1211 } |
1212 #ifdef ERROR_CHECK_GC | |
1213 assert (!LRECORD_FREE_P (p)); | |
1214 MARK_LRECORD_AS_FREE (p); | |
1215 #endif | |
1216 NEXT_FREE (p) = 0; | |
1217 set_lookup_table (p, ph); | |
1218 | |
1219 /* hook free list into header */ | |
1220 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph)); | |
1221 } | |
1222 | |
1223 | |
1224 /* Cleans the object space of the given page_header. */ | |
1225 static void | |
1226 remove_cell_free_list (page_header *ph) | |
1227 { | |
1228 #if ZERO_MEM | |
1229 ZERO_HEAP_SPACE (ph); | |
1230 #endif | |
1231 PH_FREE_LIST (ph) = 0; | |
1232 } | |
1233 | |
1234 | |
1235 /* Installs a new page and hooks it into given page_list_header. */ | |
1236 static page_header * | |
1237 install_page_in_used_list (page_header *ph, page_list_header *plh, | |
3092 | 1238 size_t size, EMACS_INT elemcount) |
2720 | 1239 { |
1240 /* add to list */ | |
1241 add_page_header_to_plh (ph, plh); | |
1242 | |
1243 /* determine cell size */ | |
1244 if (PLH_SIZE (plh)) | |
1245 PH_CELL_SIZE (ph) = PLH_SIZE (plh); | |
1246 else | |
1247 PH_CELL_SIZE (ph) = size; | |
3092 | 1248 if (elemcount == 1) |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1249 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1250 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1251 PH_ARRAY_BIT (ph) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1252 } |
3092 | 1253 else |
1254 { | |
1255 PH_CELLS_ON_PAGE (ph) = elemcount; | |
1256 PH_ARRAY_BIT (ph) = 1; | |
1257 } | |
2720 | 1258 |
1259 /* init cell count */ | |
1260 PH_CELLS_USED (ph) = 0; | |
1261 | |
1262 /* install mark bits and initialize cell free list */ | |
3092 | 1263 install_mark_bits (ph); |
2720 | 1264 |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1265 install_cell_free_list (ph); |
2720 | 1266 |
1267 #ifdef MEMORY_USAGE_STATS | |
1268 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); | |
1269 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); | |
1270 #endif | |
1271 | |
1272 return ph; | |
1273 } | |
1274 | |
1275 | |
1276 /* Cleans and frees a page, identified by the given page_header. */ | |
1277 static void | |
1278 remove_page_from_used_list (page_header *ph) | |
1279 { | |
1280 page_list_header *plh = PH_PLH (ph); | |
1281 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1282 assert (!(gc_in_progress && PH_PROTECTION_BIT (ph))); |
3092 | 1283 /* cleanup: remove memory protection, zero page_header bits. */ |
1284 | |
2720 | 1285 #ifdef MEMORY_USAGE_STATS |
1286 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | |
1287 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | |
1288 #endif | |
1289 | |
1290 /* clean up mark bits and cell free list */ | |
1291 remove_cell_free_list (ph); | |
1292 if (PH_ON_USED_LIST_P (ph)) | |
1293 remove_mark_bits (ph); | |
1294 | |
1295 /* clean up page header */ | |
1296 PH_CELL_SIZE (ph) = 0; | |
1297 PH_CELLS_ON_PAGE (ph) = 0; | |
1298 PH_CELLS_USED (ph) = 0; | |
1299 | |
1300 /* remove from used list */ | |
1301 remove_page_header_from_plh (ph, plh); | |
1302 | |
1303 /* move to free list */ | |
1304 merge_into_free_list (ph); | |
1305 } | |
1306 | |
1307 | |
1308 | |
1309 | |
1310 /*--- allocation -------------------------------------------------------*/ | |
1311 | |
1312 /* Allocates from cell free list on already allocated pages. */ | |
1313 static page_header * | |
1314 allocate_cell (page_list_header *plh) | |
1315 { | |
1316 page_header *ph = PLH_FIRST (plh); | |
1317 if (ph) | |
1318 { | |
1319 if (PH_FREE_LIST (ph)) | |
1320 /* elements free on first page */ | |
1321 return ph; | |
1322 else if ((PH_NEXT (ph)) | |
1323 && (PH_FREE_LIST (PH_NEXT (ph)))) | |
1324 /* elements free on second page */ | |
1325 { | |
1326 page_header *temp = PH_NEXT (ph); | |
1327 /* move full page (first page) to end of list */ | |
1328 PH_NEXT (PLH_LAST (plh)) = ph; | |
1329 PH_PREV (ph) = PLH_LAST (plh); | |
1330 PLH_LAST (plh) = ph; | |
1331 PH_NEXT (ph) = 0; | |
1332 /* install second page as first page */ | |
1333 ph = temp; | |
1334 PH_PREV (ph) = 0; | |
1335 PLH_FIRST (plh) = ph; | |
1336 return ph; | |
1337 } | |
1338 } | |
1339 return 0; | |
1340 } | |
1341 | |
1342 | |
1343 /* Finds a page which has at least the needed number of pages. | |
1344 Algorithm: FIRST FIT. */ | |
1345 static page_header * | |
1346 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph) | |
1347 { | |
1348 while (ph) | |
1349 { | |
1350 if (PH_N_PAGES (ph) >= needed_pages) | |
1351 return ph; | |
1352 ph = PH_NEXT (ph); | |
1353 } | |
1354 return 0; | |
1355 } | |
1356 | |
1357 | |
1358 /* Allocates a page from the free list. */ | |
1359 static page_header * | |
1360 allocate_page_from_free_list (EMACS_INT needed_pages) | |
1361 { | |
1362 page_header *ph = 0; | |
3092 | 1363 EMACS_INT i; |
2720 | 1364 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) |
1365 if ((ph = find_free_page_first_fit (needed_pages, | |
1366 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) | |
1367 { | |
1368 if (PH_N_PAGES (ph) > needed_pages) | |
1369 return split_page (ph, needed_pages); | |
1370 else | |
1371 { | |
1372 remove_page_from_free_list (ph); | |
1373 return ph; | |
1374 } | |
1375 } | |
1376 return 0; | |
1377 } | |
1378 | |
1379 | |
1380 /* Allocates a new page, either from free list or by expanding the heap. */ | |
1381 static page_header * | |
3092 | 1382 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount) |
2720 | 1383 { |
3092 | 1384 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount); |
2720 | 1385 /* first check free list */ |
1386 page_header *result = allocate_page_from_free_list (needed_pages); | |
1387 if (!result) | |
1388 /* expand heap */ | |
1389 result = expand_heap (needed_pages); | |
3092 | 1390 install_page_in_used_list (result, plh, size, elemcount); |
2720 | 1391 return result; |
1392 } | |
1393 | |
1394 | |
1395 /* Selects the correct size class, tries to allocate a cell of this size | |
1396 from the free list, if this fails, a new page is allocated. */ | |
1397 static void * | |
3092 | 1398 mc_alloc_1 (size_t size, EMACS_INT elemcount) |
2720 | 1399 { |
1400 page_list_header *plh = 0; | |
2723 | 1401 page_header *ph = 0; |
1402 void *result = 0; | |
1403 | |
2720 | 1404 if (size == 0) |
1405 return 0; | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1406 |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1407 if (elemcount == 1) |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1408 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1409 plh = USED_HEAP_PAGES (get_used_list_index (size)); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1410 if (size < (size_t) USED_LIST_UPPER_THRESHOLD) |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1411 /* first check any free cells */ |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1412 ph = allocate_cell (plh); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1413 } |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1414 else |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1415 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1416 plh = ARRAY_HEAP_PAGES; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1417 } |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1418 |
2720 | 1419 if (!ph) |
1420 /* allocate a new page */ | |
3092 | 1421 ph = allocate_new_page (plh, size, elemcount); |
2720 | 1422 |
1423 /* return first element of free list and remove it from the list */ | |
1424 result = (void*) PH_FREE_LIST (ph); | |
1425 PH_FREE_LIST (ph) = | |
1426 NEXT_FREE (PH_FREE_LIST (ph)); | |
1427 | |
3092 | 1428 memset (result, '\0', (size * elemcount)); |
1429 MARK_LRECORD_AS_FREE (result); | |
2720 | 1430 |
1431 /* bump used cells counter */ | |
3092 | 1432 PH_CELLS_USED (ph) += elemcount; |
2720 | 1433 |
1434 #ifdef MEMORY_USAGE_STATS | |
3092 | 1435 PLH_USED_CELLS (plh) += elemcount; |
1436 PLH_USED_SPACE (plh) += size * elemcount; | |
2720 | 1437 #endif |
1438 | |
1439 return result; | |
1440 } | |
1441 | |
3092 | 1442 /* Array allocation. */ |
1443 void * | |
1444 mc_alloc_array (size_t size, EMACS_INT elemcount) | |
1445 { | |
1446 return mc_alloc_1 (size, elemcount); | |
1447 } | |
1448 | |
2720 | 1449 void * |
1450 mc_alloc (size_t size) | |
1451 { | |
1452 return mc_alloc_1 (size, 1); | |
1453 } | |
1454 | |
1455 | |
1456 | |
1457 /*--- sweep & free & finalize-------------------------------------------*/ | |
1458 | |
1459 /* Frees a heap pointer. */ | |
1460 static void | |
1461 remove_cell (void *ptr, page_header *ph) | |
1462 { | |
1463 #ifdef MEMORY_USAGE_STATS | |
1464 PLH_USED_CELLS (PH_PLH (ph))--; | |
1465 if (PH_ON_USED_LIST_P (ph)) | |
1466 PLH_USED_SPACE (PH_PLH (ph)) -= | |
1467 detagged_lisp_object_size ((const struct lrecord_header *) ptr); | |
1468 else | |
1469 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph); | |
1470 #endif | |
2775 | 1471 if (PH_ON_USED_LIST_P (ph)) |
1472 { | |
2994 | 1473 #ifdef ALLOC_TYPE_STATS |
2775 | 1474 dec_lrecord_stats (PH_CELL_SIZE (ph), |
1475 (const struct lrecord_header *) ptr); | |
2994 | 1476 #endif /* ALLOC_TYPE_STATS */ |
2775 | 1477 #ifdef ERROR_CHECK_GC |
1478 assert (!LRECORD_FREE_P (ptr)); | |
1479 deadbeef_memory (ptr, PH_CELL_SIZE (ph)); | |
1480 MARK_LRECORD_AS_FREE (ptr); | |
2720 | 1481 #endif |
2775 | 1482 } |
2720 | 1483 |
1484 /* hooks cell into free list */ | |
1485 NEXT_FREE (ptr) = PH_FREE_LIST (ph); | |
1486 PH_FREE_LIST (ph) = FREE_LIST (ptr); | |
1487 /* decrease cells used */ | |
1488 PH_CELLS_USED (ph)--; | |
1489 } | |
1490 | |
1491 | |
1492 /* Mark free list marks all free list entries. */ | |
1493 static void | |
1494 mark_free_list (page_header *ph) | |
1495 { | |
1496 free_link *fl = PH_FREE_LIST (ph); | |
1497 while (fl) | |
1498 { | |
3092 | 1499 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
2720 | 1500 fl = NEXT_FREE (fl); |
1501 } | |
1502 } | |
1503 | |
1504 | |
1505 /* Finalize a page for disksave. XEmacs calls this routine before it | |
1506 dumps the heap image. You have to tell mc-alloc how to call your | |
1507 object's finalizer for disksave. Therefore, you have to define the | |
1508 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should | |
1509 do nothing else then test if there is a finalizer and call it on | |
3303 | 1510 the given argument, which is the heap address of the object. |
1511 Returns number of processed pages. */ | |
1512 static EMACS_INT | |
2720 | 1513 finalize_page_for_disksave (page_header *ph) |
1514 { | |
1515 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
1516 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1517 EMACS_INT mark_bit = 0; | |
1518 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1519 | |
1520 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1521 { | |
1522 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
1523 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | |
1524 } | |
3303 | 1525 return 1; |
2720 | 1526 } |
1527 | |
1528 | |
3303 | 1529 /* Finalizes the heap for disksave. Returns number of processed |
1530 pages. */ | |
1531 EMACS_INT | |
2720 | 1532 mc_finalize_for_disksave (void) |
1533 { | |
3303 | 1534 return visit_all_used_page_headers (finalize_page_for_disksave); |
2720 | 1535 } |
1536 | |
1537 | |
3303 | 1538 /* Sweeps a page: all the non-marked cells are freed. If the page is |
1539 empty in the end, it is removed. If some cells are free, it is | |
1540 moved to the front of its page header list. Full pages stay where | |
1541 they are. Returns number of processed pages.*/ | |
1542 static EMACS_INT | |
2720 | 1543 sweep_page (page_header *ph) |
1544 { | |
3092 | 1545 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1546 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); |
1547 EMACS_INT mark_bit = 0; | |
1548 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
3092 | 1549 unsigned int bit = 0; |
2720 | 1550 |
1551 mark_free_list (ph); | |
1552 | |
3092 | 1553 /* ARRAY_BIT_HACK */ |
1554 if (PH_ARRAY_BIT (ph)) | |
1555 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1556 { | |
1557 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1558 if (bit) | |
1559 { | |
1560 zero_mark_bits (ph); | |
1561 PH_BLACK_BIT (ph) = 0; | |
3303 | 1562 return 1; |
3092 | 1563 } |
1564 } | |
1565 | |
2720 | 1566 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1567 { | |
3092 | 1568 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1569 if (bit == WHITE) | |
2720 | 1570 { |
3092 | 1571 GC_STAT_FREED; |
2720 | 1572 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
1573 } | |
1574 } | |
1575 zero_mark_bits (ph); | |
3092 | 1576 PH_BLACK_BIT (ph) = 0; |
2720 | 1577 if (PH_CELLS_USED (ph) == 0) |
1578 remove_page_from_used_list (ph); | |
1579 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
1580 move_page_header_to_front (ph); | |
3303 | 1581 |
1582 return 1; | |
2720 | 1583 } |
1584 | |
1585 | |
3303 | 1586 /* Sweeps the heap. Returns number of processed pages. */ |
1587 EMACS_INT | |
2720 | 1588 mc_sweep (void) |
1589 { | |
3303 | 1590 return visit_all_used_page_headers (sweep_page); |
2720 | 1591 } |
1592 | |
1593 | |
1594 /* Changes the size of the cell pointed to by ptr. | |
1595 Returns the new address of the new cell with new size. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1596 static void * |
3092 | 1597 mc_realloc_1 (void *ptr, size_t size, int elemcount) |
2720 | 1598 { |
1599 if (ptr) | |
1600 { | |
3092 | 1601 if (size * elemcount) |
2720 | 1602 { |
3092 | 1603 void *result = mc_alloc_1 (size, elemcount); |
2720 | 1604 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); |
3092 | 1605 size_t cpy_size = size * elemcount; |
1606 if (cpy_size > from_size) | |
2720 | 1607 cpy_size = from_size; |
1608 memcpy (result, ptr, cpy_size); | |
3092 | 1609 #ifdef ALLOC_TYPE_STATS |
1610 inc_lrecord_stats (size, (struct lrecord_header *) result); | |
1611 #endif /* not ALLOC_TYPE_STATS */ | |
2720 | 1612 return result; |
1613 } | |
1614 else | |
1615 { | |
1616 return 0; | |
1617 } | |
1618 } | |
1619 else | |
3092 | 1620 return mc_alloc_1 (size, elemcount); |
2720 | 1621 } |
1622 | |
1623 void * | |
1624 mc_realloc (void *ptr, size_t size) | |
1625 { | |
1626 return mc_realloc_1 (ptr, size, 1); | |
1627 } | |
1628 | |
1629 void * | |
3092 | 1630 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount) |
2720 | 1631 { |
3092 | 1632 return mc_realloc_1 (ptr, size, elemcount); |
2720 | 1633 } |
1634 | |
1635 | |
1636 | |
1637 /*--- initialization ---------------------------------------------------*/ | |
1638 | |
1639 /* Call once at the very beginning. */ | |
1640 void | |
1641 init_mc_allocator (void) | |
1642 { | |
3092 | 1643 EMACS_INT i; |
1644 | |
1645 #ifdef MEMORY_USAGE_STATS | |
1646 MC_MALLOCED_BYTES = 0; | |
1647 #endif | |
2720 | 1648 |
3092 | 1649 /* init of pagesize dependent values */ |
1650 switch (SYS_PAGE_SIZE) | |
1651 { | |
1652 case 512: log_page_size = 9; break; | |
1653 case 1024: log_page_size = 10; break; | |
1654 case 2048: log_page_size = 11; break; | |
1655 case 4096: log_page_size = 12; break; | |
1656 case 8192: log_page_size = 13; break; | |
1657 case 16384: log_page_size = 14; break; | |
3212 | 1658 case 32768: log_page_size = 15; break; |
1659 case 65536: log_page_size = 16; break; | |
1660 default: | |
1661 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n", | |
1662 SYS_PAGE_SIZE); | |
1663 ABORT (); | |
3092 | 1664 } |
1665 | |
4125 | 1666 page_size_div_2 = (EMACS_UINT) SYS_PAGE_SIZE >> 1; |
3092 | 1667 |
1668 mc_allocator_globals.used_heap_pages = | |
1669 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1) | |
1670 * sizeof (page_list_header)); | |
1671 #ifdef MEMORY_USAGE_STATS | |
1672 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header); | |
1673 #endif | |
1674 | |
1675 mc_allocator_globals.ptr_lookup_table = | |
1676 (level_2_lookup_tree **) | |
1677 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *)); | |
1678 #ifdef MEMORY_USAGE_STATS | |
1679 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *); | |
1680 #endif | |
1681 | |
1682 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER | |
1683 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype); | |
1684 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2932 | 1685 |
2720 | 1686 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
1687 { | |
1688 page_list_header *plh = USED_HEAP_PAGES (i); | |
1689 PLH_LIST_TYPE (plh) = USED_LIST; | |
1690 PLH_SIZE (plh) = get_used_list_size_value (i); | |
1691 PLH_FIRST (plh) = 0; | |
1692 PLH_LAST (plh) = 0; | |
1693 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1694 #ifdef MEMORY_USAGE_STATS | |
1695 PLH_PAGE_COUNT (plh) = 0; | |
1696 PLH_USED_CELLS (plh) = 0; | |
1697 PLH_USED_SPACE (plh) = 0; | |
1698 PLH_TOTAL_CELLS (plh) = 0; | |
1699 PLH_TOTAL_SPACE (plh) = 0; | |
1700 #endif | |
1701 } | |
1702 | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1703 { |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1704 page_list_header *plh = ARRAY_HEAP_PAGES; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1705 PLH_LIST_TYPE (plh) = USED_LIST; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1706 PLH_SIZE (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1707 PLH_FIRST (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1708 PLH_LAST (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1709 PLH_MARK_BIT_FREE_LIST (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1710 #ifdef MEMORY_USAGE_STATS |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1711 PLH_PAGE_COUNT (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1712 PLH_USED_CELLS (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1713 PLH_USED_SPACE (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1714 PLH_TOTAL_CELLS (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1715 PLH_TOTAL_SPACE (plh) = 0; |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1716 #endif |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1717 } |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1718 |
2720 | 1719 for (i = 0; i < N_FREE_PAGE_LISTS; i++) |
1720 { | |
1721 page_list_header *plh = FREE_HEAP_PAGES (i); | |
1722 PLH_LIST_TYPE (plh) = FREE_LIST; | |
1723 PLH_SIZE (plh) = get_free_list_size_value (i); | |
1724 PLH_FIRST (plh) = 0; | |
1725 PLH_LAST (plh) = 0; | |
1726 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1727 #ifdef MEMORY_USAGE_STATS | |
1728 PLH_PAGE_COUNT (plh) = 0; | |
1729 PLH_USED_CELLS (plh) = 0; | |
1730 PLH_USED_SPACE (plh) = 0; | |
1731 PLH_TOTAL_CELLS (plh) = 0; | |
1732 PLH_TOTAL_SPACE (plh) = 0; | |
1733 #endif | |
1734 } | |
1735 | |
3092 | 1736 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 1737 PAGE_HEADER_FREE_LIST = 0; |
3092 | 1738 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 1739 |
1740 #ifdef MEMORY_USAGE_STATS | |
3092 | 1741 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals); |
2720 | 1742 #endif |
1743 | |
1744 init_lookup_table (); | |
1745 } | |
1746 | |
1747 | |
1748 | |
1749 | |
1750 /*--- lisp function for statistics -------------------------------------*/ | |
1751 | |
1752 #ifdef MEMORY_USAGE_STATS | |
1753 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /* | |
1754 Returns stats about the mc-alloc memory usage. See diagnose.el. | |
1755 */ | |
1756 ()) | |
1757 { | |
1758 Lisp_Object free_plhs = Qnil; | |
1759 Lisp_Object used_plhs = Qnil; | |
1760 Lisp_Object heap_sects = Qnil; | |
3092 | 1761 EMACS_INT used_size = 0; |
1762 EMACS_INT real_size = 0; | |
2720 | 1763 |
3092 | 1764 EMACS_INT i; |
2720 | 1765 |
1766 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1767 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) | |
1768 free_plhs = | |
1769 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), | |
1770 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), | |
1771 free_plhs); | |
1772 | |
1773 for (i = 0; i < N_USED_PAGE_LISTS; i++) | |
1774 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) | |
1775 used_plhs = | |
1776 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), | |
1777 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))), | |
1778 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))), | |
1779 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))), | |
1780 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))), | |
1781 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))), | |
1782 used_plhs); | |
1783 | |
5216
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1784 used_plhs = |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1785 acons (make_int (0), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1786 list5 (make_int (PLH_PAGE_COUNT(ARRAY_HEAP_PAGES)), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1787 make_int (PLH_USED_CELLS (ARRAY_HEAP_PAGES)), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1788 make_int (PLH_USED_SPACE (ARRAY_HEAP_PAGES)), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1789 make_int (PLH_TOTAL_CELLS (ARRAY_HEAP_PAGES)), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1790 make_int (PLH_TOTAL_SPACE (ARRAY_HEAP_PAGES))), |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1791 used_plhs); |
9b8c2168d231
Allocate lrecord arrays in own size class.
Marcus Crestani <crestani@informatik.uni-tuebingen.de>
parents:
5167
diff
changeset
|
1792 |
2720 | 1793 for (i = 0; i < N_HEAP_SECTIONS; i++) { |
1794 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE; | |
1795 real_size += | |
1796 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1797 } | |
1798 | |
1799 heap_sects = | |
1800 list3 (make_int (N_HEAP_SECTIONS), | |
1801 make_int (used_size), | |
1802 make_int (real_size)); | |
1803 | |
1804 return Fcons (make_int (PAGE_SIZE), | |
3092 | 1805 list5 (heap_sects, |
2720 | 1806 Fnreverse (used_plhs), |
1807 Fnreverse (free_plhs), | |
1808 make_int (sizeof (mc_allocator_globals)), | |
1809 make_int (MC_MALLOCED_BYTES))); | |
1810 } | |
1811 #endif /* MEMORY_USAGE_STATS */ | |
1812 | |
1813 void | |
1814 syms_of_mc_alloc (void) | |
1815 { | |
1816 #ifdef MEMORY_USAGE_STATS | |
1817 DEFSUBR (Fmc_alloc_memory_usage); | |
1818 #endif /* MEMORY_USAGE_STATS */ | |
1819 } | |
3092 | 1820 |
1821 | |
1822 /*--- incremental garbage collector ----------------------------------*/ | |
1823 | |
5054 | 1824 #if 0 /* currently unused */ |
1825 | |
3092 | 1826 /* access dirty bit of page header */ |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1827 static void |
3092 | 1828 set_dirty_bit (page_header *ph, unsigned int value) |
1829 { | |
1830 PH_DIRTY_BIT (ph) = value; | |
1831 } | |
1832 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1833 static void |
3092 | 1834 set_dirty_bit_for_address (void *ptr, unsigned int value) |
1835 { | |
1836 set_dirty_bit (get_page_header (ptr), value); | |
1837 } | |
1838 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1839 static unsigned int |
3092 | 1840 get_dirty_bit (page_header *ph) |
1841 { | |
1842 return PH_DIRTY_BIT (ph); | |
1843 } | |
1844 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1845 static unsigned int |
3092 | 1846 get_dirty_bit_for_address (void *ptr) |
1847 { | |
1848 return get_dirty_bit (get_page_header (ptr)); | |
1849 } | |
1850 | |
1851 | |
1852 /* access protection bit of page header */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1853 static void |
3092 | 1854 set_protection_bit (page_header *ph, unsigned int value) |
1855 { | |
1856 PH_PROTECTION_BIT (ph) = value; | |
1857 } | |
1858 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1859 static void |
3092 | 1860 set_protection_bit_for_address (void *ptr, unsigned int value) |
1861 { | |
1862 set_protection_bit (get_page_header (ptr), value); | |
1863 } | |
1864 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1865 static unsigned int |
3092 | 1866 get_protection_bit (page_header *ph) |
1867 { | |
1868 return PH_PROTECTION_BIT (ph); | |
1869 } | |
1870 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1871 static unsigned int |
3092 | 1872 get_protection_bit_for_address (void *ptr) |
1873 { | |
1874 return get_protection_bit (get_page_header (ptr)); | |
1875 } | |
1876 | |
1877 | |
1878 /* Returns the start of the page of the object pointed to by ptr. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1879 static void * |
3092 | 1880 get_page_start (void *ptr) |
1881 { | |
1882 return PH_HEAP_SPACE (get_page_header (ptr)); | |
1883 } | |
1884 | |
5054 | 1885 #endif /* 0 */ |
1886 | |
3092 | 1887 /* Make PAGE_SIZE globally available. */ |
1888 EMACS_INT | |
1889 mc_get_page_size () | |
1890 { | |
1891 return PAGE_SIZE; | |
1892 } | |
1893 | |
1894 /* Is the fault at ptr on a protected page? */ | |
1895 EMACS_INT | |
1896 fault_on_protected_page (void *ptr) | |
1897 { | |
1898 page_header *ph = get_page_header_internal (ptr); | |
1899 return (ph | |
1900 && PH_HEAP_SPACE (ph) | |
1901 && (PH_HEAP_SPACE (ph) <= ptr) | |
1902 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph) | |
1903 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr) | |
1904 && (PH_PROTECTION_BIT (ph) == 1)); | |
1905 } | |
1906 | |
1907 | |
1908 /* Protect the heap page of given page header ph if black objects are | |
3303 | 1909 on the page. Returns number of processed pages. */ |
1910 static EMACS_INT | |
3092 | 1911 protect_heap_page (page_header *ph) |
1912 { | |
1913 if (PH_BLACK_BIT (ph)) | |
1914 { | |
1915 void *heap_space = PH_HEAP_SPACE (ph); | |
1916 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1917 vdb_protect ((void *) heap_space, heap_space_size); | |
1918 PH_PROTECTION_BIT (ph) = 1; | |
3303 | 1919 return 1; |
3092 | 1920 } |
3303 | 1921 return 0; |
3092 | 1922 } |
1923 | |
3303 | 1924 /* Protect all heap pages with black objects. Returns number of |
1925 processed pages.*/ | |
1926 EMACS_INT | |
3092 | 1927 protect_heap_pages (void) |
1928 { | |
3303 | 1929 return visit_all_used_page_headers (protect_heap_page); |
3092 | 1930 } |
1931 | |
1932 | |
1933 /* Remove protection (if there) of heap page of given page header | |
3303 | 1934 ph. Returns number of processed pages. */ |
1935 static EMACS_INT | |
3092 | 1936 unprotect_heap_page (page_header *ph) |
1937 { | |
1938 if (PH_PROTECTION_BIT (ph)) | |
1939 { | |
1940 void *heap_space = PH_HEAP_SPACE (ph); | |
1941 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1942 vdb_unprotect (heap_space, heap_space_size); | |
1943 PH_PROTECTION_BIT (ph) = 0; | |
3303 | 1944 return 1; |
3092 | 1945 } |
3303 | 1946 return 0; |
3092 | 1947 } |
1948 | |
3303 | 1949 /* Remove protection for all heap pages which are protected. Returns |
1950 number of processed pages. */ | |
1951 EMACS_INT | |
3092 | 1952 unprotect_heap_pages (void) |
1953 { | |
3303 | 1954 return visit_all_used_page_headers (unprotect_heap_page); |
3092 | 1955 } |
1956 | |
1957 /* Remove protection and mark page dirty. */ | |
1958 void | |
1959 unprotect_page_and_mark_dirty (void *ptr) | |
1960 { | |
1961 page_header *ph = get_page_header (ptr); | |
1962 unprotect_heap_page (ph); | |
1963 PH_DIRTY_BIT (ph) = 1; | |
1964 } | |
1965 | |
1966 /* Repush all objects on dirty pages onto the mark stack. */ | |
1967 int | |
1968 repush_all_objects_on_page (void *ptr) | |
1969 { | |
1970 int repushed_objects = 0; | |
1971 page_header *ph = get_page_header (ptr); | |
1972 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); | |
1973 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1974 EMACS_INT mark_bit = 0; | |
1975 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1976 unsigned int bit = 0; | |
1977 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1978 { | |
1979 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1980 if (bit == BLACK) | |
1981 { | |
1982 repushed_objects++; | |
1983 gc_write_barrier | |
1984 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit)))); | |
1985 } | |
1986 } | |
1987 PH_BLACK_BIT (ph) = 0; | |
1988 PH_DIRTY_BIT (ph) = 0; | |
1989 return repushed_objects; | |
1990 } | |
1991 | |
1992 /* Mark black if object is currently grey. This first checks, if the | |
1993 object is really allocated on the mc-heap. If it is, it can be | |
1994 marked black; if it is not, it cannot be marked. */ | |
1995 EMACS_INT | |
1996 maybe_mark_black (void *ptr) | |
1997 { | |
1998 page_header *ph = get_page_header_internal (ptr); | |
1999 unsigned int bit = 0; | |
2000 | |
2001 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph)) | |
2002 { | |
2003 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
2004 if (bit == GREY) | |
2005 { | |
2006 if (!PH_BLACK_BIT (ph)) | |
2007 PH_BLACK_BIT (ph) = 1; | |
2008 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK); | |
2009 } | |
2010 return 1; | |
2011 } | |
2012 return 0; | |
2013 } | |
2014 | |
2015 /* Only for debugging --- not used anywhere in the sources. */ | |
2016 EMACS_INT | |
2017 object_on_heap_p (void *ptr) | |
2018 { | |
2019 page_header *ph = get_page_header_internal (ptr); | |
2020 return (ph && PH_ON_USED_LIST_P (ph)); | |
2021 } |