2720
|
1 /* New size-based allocator for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
3092
|
24
|
2720
|
25 #include "lisp.h"
|
|
26 #include "mc-alloc.h"
|
3092
|
27 #include "getpagesize.h"
|
|
28
|
|
29
|
|
30 #if 0
|
|
31 # define USE_MARK_BITS_FREE_LIST 1
|
|
32 #endif
|
|
33 #if 1
|
|
34 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1
|
|
35 #endif
|
|
36
|
|
37 /* Memory protection needs the real system-dependent pagesize. */
|
|
38 #ifndef WIN32_NATIVE
|
|
39 #include <unistd.h> /* for getpagesize () */
|
|
40 #endif
|
|
41 #if defined (HAVE_GETPAGESIZE)
|
|
42 # define SYS_PAGE_SIZE getpagesize ()
|
|
43 #elif defined (_SC_PAGESIZE)
|
|
44 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE)
|
|
45 #elif defined (_SC_PAGE_SIZE)
|
|
46 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE)
|
|
47 #elif defined(get_page_size)
|
|
48 # define SYS_PAGE_SIZE get_page_size ()
|
|
49 #elif defined(PAGESIZE)
|
|
50 # define SYS_PAGE_SIZE PAGESIZE
|
|
51 #elif defined(PAGE_SIZE)
|
|
52 # define SYS_PAGE_SIZE PAGE_SIZE
|
|
53 #else
|
|
54 /* Valid page sizes are powers of 2. */
|
|
55 # define SYS_PAGE_SIZE 4096
|
|
56 #endif
|
2720
|
57
|
|
58
|
|
59 /*--- configurable values ----------------------------------------------*/
|
|
60
|
|
61 /* Definition of size classes */
|
|
62
|
|
63 /* Heap used list constants: In the used heap, it is important to
|
|
64 quickly find a free spot for a new object. Therefore the size
|
|
65 classes of the used heap are defined by the size of the cells on
|
|
66 the pages. The size classes should match common object sizes, to
|
|
67 avoid wasting memory. */
|
|
68
|
|
69 /* Minimum object size in bytes. */
|
3092
|
70 #if BITS_PER_EMACS_INT > 32
|
|
71 # define USED_LIST_MIN_OBJECT_SIZE 16
|
|
72 #else
|
|
73 # define USED_LIST_MIN_OBJECT_SIZE 8
|
|
74 #endif
|
2720
|
75
|
|
76 /* The step size by which the size classes increase (up to upper
|
|
77 threshold). This many bytes are mapped to a single used list: */
|
3092
|
78 #if BITS_PER_EMACS_INT > 32
|
|
79 # define USED_LIST_LIN_STEP 8
|
|
80 #else
|
|
81 # define USED_LIST_LIN_STEP 4
|
|
82 #endif
|
2720
|
83
|
|
84 /* The upper threshold should always be set to PAGE_SIZE/2, because if
|
|
85 a object is larger than PAGE_SIZE/2 there is no room for any other
|
|
86 object on this page. Objects this big are kept in the page list of
|
|
87 the multiple pages, since a quick search for free spots is not
|
|
88 needed for this kind of pages (because there are no free spots).
|
|
89 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
|
3092
|
90 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
|
2720
|
91
|
|
92
|
|
93 /* Heap free list constants: In the unused heap, the size of
|
|
94 consecutive memory tips the scales. A page is smallest entity which
|
|
95 is asked for. Therefore, the size classes of the unused heap are
|
|
96 defined by the number of consecutive pages. */
|
|
97 /* Sizes up to this many pages each have their own free list. */
|
|
98 #define FREE_LIST_LOWER_THRESHOLD 32
|
|
99 /* The step size by which the size classes increase (up to upper
|
|
100 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a
|
|
101 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and
|
|
102 FREE_LIST_UPPER_THRESHOLD. */
|
|
103 #define FREE_LIST_LIN_STEP 8
|
|
104 /* Sizes of at least this many pages are mapped to a single free
|
|
105 list. Blocks of memory larger than this number are all kept in a
|
|
106 single list, which makes searching this list slow. But objects that
|
|
107 big are really seldom. */
|
|
108 #define FREE_LIST_UPPER_THRESHOLD 256
|
|
109
|
|
110
|
3092
|
111 /* used heap list count */
|
|
112 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
|
|
113 - USED_LIST_MIN_OBJECT_SIZE) \
|
|
114 / USED_LIST_LIN_STEP) + 1 ) + 1
|
|
115
|
|
116 /* free heap list count */
|
|
117 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
|
|
118 - FREE_LIST_LOWER_THRESHOLD) \
|
|
119 / FREE_LIST_LIN_STEP) \
|
|
120 + FREE_LIST_LOWER_THRESHOLD)
|
|
121
|
|
122
|
2720
|
123 /* Maximum number of separately added heap sections. */
|
|
124 #if BITS_PER_EMACS_INT > 32
|
|
125 # define MAX_HEAP_SECTS 2048
|
|
126 #else
|
|
127 # define MAX_HEAP_SECTS 768
|
|
128 #endif
|
|
129
|
|
130
|
|
131 /* Heap growth constants. Heap increases by any number between the
|
|
132 boundaries (unit is PAGE_SIZE). */
|
3092
|
133 #define MIN_HEAP_INCREASE 256
|
2720
|
134 #define MAX_HEAP_INCREASE 256 /* not used */
|
|
135
|
|
136 /* Every heap growth is calculated like this:
|
|
137 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
|
|
138 So the growth of the heap is influenced by the current size of the
|
|
139 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE
|
|
140 boundaries.
|
|
141 This reduces the number of heap sectors, the larger the heap grows
|
|
142 the larger are the newly allocated chunks. */
|
|
143 #define HEAP_GROWTH_DIVISOR 3
|
|
144
|
|
145
|
|
146 /* Zero memory before putting on free lists. */
|
|
147 #define ZERO_MEM 1
|
|
148
|
|
149
|
|
150 #ifndef CHAR_BIT /* should be included by limits.h */
|
|
151 # define CHAR_BIT BITS_PER_CHAR
|
|
152 #endif
|
|
153
|
|
154
|
3092
|
155
|
|
156 /*--- values depending on PAGE_SIZE ------------------------------------*/
|
2720
|
157
|
3092
|
158 /* initialized in init_mc_allocator () */
|
|
159 static EMACS_INT log_page_size;
|
|
160 static EMACS_INT page_size_div_2;
|
2720
|
161
|
3092
|
162 #undef PAGE_SIZE
|
|
163 #define PAGE_SIZE SYS_PAGE_SIZE
|
|
164 #define LOG_PAGE_SIZE log_page_size
|
|
165 #define PAGE_SIZE_DIV_2 page_size_div_2
|
2720
|
166
|
|
167
|
|
168 /* Constants for heap address to page header mapping. */
|
|
169 #define LOG_LEVEL2_SIZE 10
|
|
170 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
|
|
171 #if BITS_PER_EMACS_INT > 32
|
|
172 # define USE_HASH_TABLE 1
|
|
173 # define LOG_LEVEL1_SIZE 11
|
|
174 #else
|
|
175 # define LOG_LEVEL1_SIZE \
|
|
176 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE)
|
|
177 #endif
|
|
178 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE)
|
|
179
|
|
180 #ifdef USE_HASH_TABLE
|
|
181 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1))
|
|
182 # define L1_INDEX(p) HASH ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
183 #else
|
|
184 # define L1_INDEX(p) ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
185 #endif
|
|
186 #define L2_INDEX(p) (((EMACS_INT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1))
|
|
187
|
|
188
|
|
189
|
|
190
|
|
191 /*--- structs and typedefs ---------------------------------------------*/
|
|
192
|
3092
|
193 /* Links the free lists (mark_bit_free_list and cell free list). */
|
2720
|
194 typedef struct free_link
|
|
195 {
|
|
196 struct lrecord_header lheader;
|
|
197 struct free_link *next_free;
|
|
198 } free_link;
|
|
199
|
|
200
|
3092
|
201 /* Header for pages. They are held in a doubly linked list. */
|
2720
|
202 typedef struct page_header
|
|
203 {
|
|
204 struct page_header *next; /* next page_header */
|
|
205 struct page_header *prev; /* previous page_header */
|
|
206 /* Field plh holds pointer to the according header of the page list.*/
|
|
207 struct page_list_header *plh; /* page list header */
|
|
208 free_link *free_list; /* links free cells on page */
|
|
209 EMACS_INT n_pages; /* number of pages */
|
|
210 EMACS_INT cell_size; /* size of cells on page */
|
|
211 EMACS_INT cells_on_page; /* total number of cells on page */
|
|
212 EMACS_INT cells_used; /* number of used cells on page */
|
|
213 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
|
|
214 the mark bits are put in an extra memory area. Then the field
|
|
215 mark_bits holds the pointer to this area. Is the number of
|
|
216 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
|
|
217 mark_bit EMACS_INT directly, without an additional indirection. */
|
3092
|
218 unsigned int black_bit:1; /* objects on page are black */
|
|
219 unsigned int dirty_bit:1; /* page is dirty */
|
|
220 unsigned int protection_bit:1; /* page is write protected */
|
|
221 unsigned int array_bit:1; /* page holds arrays */
|
|
222 Rawbyte *mark_bits; /* pointer to mark bits */
|
2720
|
223 void *heap_space; /* pointer to heap, where objects
|
|
224 are stored */
|
|
225 } page_header;
|
|
226
|
|
227
|
|
228 /* Different list types. */
|
|
229 enum list_type_enum {
|
|
230 USED_LIST,
|
|
231 FREE_LIST
|
|
232 };
|
|
233
|
|
234
|
|
235 /* Header for page lists. Field list_type holds the type of the list. */
|
|
236 typedef struct page_list_header
|
|
237 {
|
|
238 enum list_type_enum list_type; /* the type of the list */
|
|
239 /* Size holds the size of one cell (in bytes) in a used heap list, or the
|
|
240 size of the heap sector (in number of pages). */
|
|
241 size_t size; /* size of one cell / heap sector */
|
|
242 page_header *first; /* first of page_header list */
|
|
243 page_header *last; /* last of page_header list */
|
|
244 /* If the number of objects on page is bigger than
|
|
245 BITS_PER_EMACS_INT, the mark bits are put in an extra memory
|
|
246 area, which is linked in this free list, if not used. Is the
|
|
247 number of objects smaller than BITS_PER_EMACS_INT, the mark bits
|
|
248 are hold in the mark bit EMACS_INT directly, without an
|
|
249 additional indirection. */
|
|
250 free_link *mark_bit_free_list;
|
|
251
|
|
252 #ifdef MEMORY_USAGE_STATS
|
|
253 EMACS_INT page_count; /* number if pages in list */
|
|
254 EMACS_INT used_cells; /* number of objects in list */
|
|
255 EMACS_INT used_space; /* used space */
|
|
256 EMACS_INT total_cells; /* number of available cells */
|
|
257 EMACS_INT total_space; /* available space */
|
|
258 #endif
|
|
259 } page_list_header;
|
|
260
|
|
261
|
|
262 /* The heap sectors are stored with their real start pointer and their
|
|
263 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */
|
|
264 typedef struct heap_sect {
|
|
265 void *real_start; /* real start pointer (NOT aligned) */
|
|
266 size_t real_size; /* NOT multiple of PAGE_SIZE */
|
|
267 void *start; /* aligned start pointer */
|
|
268 EMACS_INT n_pages; /* multiple of PAGE_SIZE */
|
|
269 } heap_sect;
|
|
270
|
|
271
|
|
272 /* 2nd tree level for mapping of heap addresses to page headers. */
|
|
273 typedef struct level_2_lookup_tree {
|
|
274 page_header *index[LEVEL2_SIZE]; /* link to page header */
|
|
275 EMACS_INT key; /* high order address bits */
|
|
276 #ifdef USE_HASH_TABLE
|
|
277 struct level_2_lookup_tree *hash_link; /* hash chain link */
|
|
278 #endif
|
|
279 } level_2_lookup_tree;
|
|
280
|
|
281
|
|
282
|
|
283 /*--- global variable definitions --------------------------------------*/
|
|
284
|
|
285 /* All global allocator variables are kept in this struct. */
|
|
286 typedef struct mc_allocator_globals_type {
|
|
287
|
|
288 /* heap size */
|
|
289 EMACS_INT heap_size;
|
|
290
|
|
291 /* list of all separatly allocated chunks of heap */
|
|
292 heap_sect heap_sections[MAX_HEAP_SECTS];
|
|
293 EMACS_INT n_heap_sections;
|
|
294
|
|
295 /* Holds all allocated pages, each object size class in its separate list,
|
|
296 to guarantee fast allocation on partially filled pages. */
|
3092
|
297 page_list_header *used_heap_pages;
|
2720
|
298
|
|
299 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
|
|
300 kept on the Nth free list. Contiguos pages are coalesced. */
|
|
301 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
|
|
302
|
|
303 /* ptr lookup table */
|
3092
|
304 level_2_lookup_tree **ptr_lookup_table;
|
2720
|
305
|
3092
|
306 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
307 /* page header free list */
|
|
308 free_link *page_header_free_list;
|
3092
|
309 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
310
|
|
311 #ifdef MEMORY_USAGE_STATS
|
|
312 EMACS_INT malloced_bytes;
|
|
313 #endif
|
|
314 } mc_allocator_globals_type;
|
|
315
|
|
316 mc_allocator_globals_type mc_allocator_globals;
|
|
317
|
|
318
|
|
319
|
|
320
|
|
321 /*--- macro accessors --------------------------------------------------*/
|
|
322
|
|
323 #define USED_HEAP_PAGES(i) \
|
|
324 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
|
|
325
|
|
326 #define FREE_HEAP_PAGES(i) \
|
|
327 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
|
|
328
|
|
329 #define PLH(plh) plh
|
|
330 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type
|
|
331 # define PLH_SIZE(plh) PLH (plh)->size
|
|
332 # define PLH_FIRST(plh) PLH (plh)->first
|
|
333 # define PLH_LAST(plh) PLH (plh)->last
|
|
334 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list
|
|
335 #ifdef MEMORY_USAGE_STATS
|
|
336 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count
|
|
337 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells
|
|
338 # define PLH_USED_SPACE(plh) PLH (plh)->used_space
|
|
339 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells
|
|
340 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space
|
|
341 #endif
|
|
342
|
|
343 #define PH(ph) ph
|
|
344 # define PH_NEXT(ph) PH (ph)->next
|
|
345 # define PH_PREV(ph) PH (ph)->prev
|
|
346 # define PH_PLH(ph) PH (ph)->plh
|
|
347 # define PH_FREE_LIST(ph) PH (ph)->free_list
|
|
348 # define PH_N_PAGES(ph) PH (ph)->n_pages
|
|
349 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
|
|
350 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
|
|
351 # define PH_CELLS_USED(ph) PH (ph)->cells_used
|
3092
|
352 # define PH_BLACK_BIT(ph) PH (ph)->black_bit
|
|
353 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit
|
|
354 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit
|
|
355 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit
|
2720
|
356 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
|
|
357 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
|
|
358 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
|
|
359 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
|
|
360
|
|
361 #define HEAP_SIZE mc_allocator_globals.heap_size
|
|
362
|
|
363 #ifdef MEMORY_USAGE_STATS
|
|
364 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes
|
|
365 #endif
|
|
366
|
|
367 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
|
|
368 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
|
|
369
|
3092
|
370 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
371 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
|
3092
|
372 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
373
|
|
374 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
|
|
375 #define FREE_LIST(free_list) (free_link*) (free_list)
|
|
376
|
|
377 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
|
|
378 #define LEVEL2(l2, i) l2->index[i]
|
|
379 # define LEVEL2_KEY(l2) l2->key
|
|
380 #ifdef USE_HASH_TABLE
|
|
381 # define LEVEL2_HASH_LINK(l2) l2->hash_link
|
|
382 #endif
|
|
383
|
|
384 #if ZERO_MEM
|
|
385 # define ZERO_HEAP_SPACE(ph) \
|
|
386 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE)
|
|
387 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header))
|
|
388 #endif
|
|
389
|
|
390 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE)
|
|
391 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE)
|
|
392
|
|
393 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1))))
|
|
394
|
|
395 #define PAGE_SIZE_ALIGNMENT(address) \
|
|
396 (void *) ((((EMACS_INT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1))
|
|
397
|
|
398 #define PH_ON_FREE_LIST_P(ph) \
|
|
399 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
|
|
400
|
|
401 #define PH_ON_USED_LIST_P(ph) \
|
|
402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
|
|
403
|
|
404
|
3092
|
405 /* Number of mark bits: minimum 1, maximum 8. */
|
|
406 #ifdef NEW_GC
|
|
407 #define N_MARK_BITS 2
|
|
408 #else /* not NEW_GC */
|
|
409 #define N_MARK_BITS 1
|
|
410 #endif /* not NEW_GC */
|
2720
|
411
|
|
412
|
|
413
|
|
414 /************************************************************************/
|
|
415 /* MC Allocator */
|
|
416 /************************************************************************/
|
|
417
|
|
418
|
|
419 /*--- misc functions ---------------------------------------------------*/
|
|
420
|
|
421 /* moved here from alloc.c */
|
|
422 #ifdef ERROR_CHECK_GC
|
|
423 static void
|
|
424 deadbeef_memory (void *ptr, Bytecount size)
|
|
425 {
|
|
426 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr;
|
|
427 Bytecount beefs = size >> 2;
|
|
428
|
|
429 /* In practice, size will always be a multiple of four. */
|
|
430 while (beefs--)
|
|
431 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */
|
|
432 }
|
|
433 #endif /* ERROR_CHECK_GC */
|
|
434
|
|
435 /* Visits all pages (page_headers) hooked into the used heap pages
|
|
436 list and executes f with the current page header as
|
|
437 argument. Needed for sweep. */
|
|
438 static void
|
|
439 visit_all_used_page_headers (void (*f) (page_header *ph))
|
|
440 {
|
3092
|
441 EMACS_INT i;
|
2720
|
442 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
443 if (PLH_FIRST (USED_HEAP_PAGES (i)))
|
|
444 {
|
|
445 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
|
|
446 while (PH_NEXT (ph))
|
|
447 {
|
|
448 page_header *next = PH_NEXT (ph); /* in case f removes the page */
|
|
449 f (ph);
|
|
450 ph = next;
|
|
451 }
|
|
452 f (ph);
|
|
453 }
|
|
454 }
|
|
455
|
|
456
|
|
457
|
|
458
|
|
459 /*--- mapping of heap addresses to page headers and mark bits ----------*/
|
|
460
|
|
461 /* Sets a heap pointer and page header pair into the lookup table. */
|
|
462 static void
|
|
463 set_lookup_table (void *ptr, page_header *ph)
|
|
464 {
|
3092
|
465 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
466 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
467 #ifdef USE_HASH_TABLE
|
|
468 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
469 l2 = LEVEL2_HASH_LINK (l2);
|
|
470 #endif
|
|
471 if (!l2)
|
|
472 {
|
|
473 l2 = (level_2_lookup_tree*)
|
|
474 xmalloc_and_zero (sizeof (level_2_lookup_tree));
|
|
475 #ifdef MEMORY_USAGE_STATS
|
|
476 MC_MALLOCED_BYTES +=
|
|
477 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0);
|
|
478 #endif
|
2932
|
479 memset (l2, '\0', sizeof (level_2_lookup_tree));
|
2720
|
480 #ifdef USE_HASH_TABLE
|
|
481 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index);
|
|
482 #endif
|
|
483 PTR_LOOKUP_TABLE (l1_index) = l2;
|
|
484 LEVEL2_KEY (l2) = l1_index;
|
|
485 }
|
|
486 LEVEL2 (l2, L2_INDEX (ptr)) = ph;
|
|
487 }
|
|
488
|
|
489
|
|
490 #ifdef UNSET_LOOKUP_TABLE
|
|
491 /* Set the lookup table to 0 for given heap address. */
|
|
492 static void
|
|
493 unset_lookup_table (void *ptr)
|
|
494 {
|
3092
|
495 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
496 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
497 #ifdef USE_HASH_TABLE
|
|
498 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
499 l2 = LEVEL2_HASH_LINK (l2);
|
|
500 #endif
|
|
501 if (l2) {
|
|
502 LEVEL2 (l2, L2_INDEX (ptr)) = 0;
|
|
503 }
|
|
504 }
|
|
505 #endif
|
|
506
|
|
507 /* Returns the page header of a given heap address, or 0 if not in table.
|
|
508 For internal use, no error checking. */
|
|
509 static page_header *
|
|
510 get_page_header_internal (void *ptr)
|
|
511 {
|
3092
|
512 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
513 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
514 #ifdef USE_HASH_TABLE
|
|
515 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
516 l2 = LEVEL2_HASH_LINK (l2);
|
|
517 #endif
|
|
518 if (!l2)
|
|
519 return 0;
|
|
520 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
521 }
|
|
522
|
|
523 /* Returns the page header of a given heap address, or 0 if not in table. */
|
|
524 static page_header *
|
|
525 get_page_header (void *ptr)
|
|
526 {
|
3092
|
527 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
528 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
2723
|
529 assert (l2);
|
2720
|
530 #ifdef USE_HASH_TABLE
|
|
531 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
532 l2 = LEVEL2_HASH_LINK (l2);
|
|
533 #endif
|
2723
|
534 assert (LEVEL2 (l2, L2_INDEX (ptr)));
|
2720
|
535 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
536 }
|
|
537
|
|
538 /* Returns the mark bit index of a given heap address. */
|
|
539 static EMACS_INT
|
|
540 get_mark_bit_index (void *ptr, page_header *ph)
|
|
541 {
|
|
542 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
543 if (cell_size)
|
3092
|
544 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size)
|
|
545 * N_MARK_BITS;
|
2720
|
546 else /* only one object on page */
|
|
547 return 0;
|
|
548 }
|
|
549
|
|
550
|
|
551 /* Adds addresses of pages to lookup table. */
|
|
552 static void
|
|
553 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
|
|
554 {
|
3092
|
555 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
556 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
|
3092
|
557 for (p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
558 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
|
|
559 set_lookup_table (p, ph);
|
|
560 }
|
|
561
|
|
562
|
|
563 /* Initializes lookup table. */
|
|
564 static void
|
|
565 init_lookup_table (void)
|
|
566 {
|
3092
|
567 EMACS_INT i;
|
2720
|
568 for (i = 0; i < LEVEL1_SIZE; i++)
|
|
569 PTR_LOOKUP_TABLE (i) = 0;
|
|
570 }
|
|
571
|
|
572
|
|
573
|
|
574
|
|
575 /*--- mark bits --------------------------------------------------------*/
|
|
576
|
|
577 /*--- bit operations --- */
|
|
578
|
|
579 /* Allocates a bit array of length bits. */
|
3092
|
580 static Rawbyte *
|
2720
|
581 alloc_bit_array(size_t bits)
|
|
582 {
|
3092
|
583 Rawbyte *bit_array;
|
|
584 #ifdef USE_MARK_BITS_FREE_LIST
|
|
585 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte);
|
|
586 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
587 size_t size =
|
|
588 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte),
|
|
589 Rawbyte *);
|
|
590 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
591 if (size < sizeof (free_link)) size = sizeof (free_link);
|
|
592 #ifdef MEMORY_USAGE_STATS
|
|
593 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
|
|
594 #endif
|
3092
|
595 bit_array = (Rawbyte *) xmalloc_and_zero (size);
|
2720
|
596 return bit_array;
|
|
597 }
|
|
598
|
|
599
|
|
600 /* Returns the bit value at pos. */
|
|
601 static EMACS_INT
|
3092
|
602 get_bit (Rawbyte *bit_array, EMACS_INT pos)
|
2720
|
603 {
|
|
604 #if N_MARK_BITS > 1
|
|
605 EMACS_INT result = 0;
|
|
606 EMACS_INT i;
|
|
607 #endif
|
|
608 bit_array += pos / CHAR_BIT;
|
|
609 #if N_MARK_BITS > 1
|
|
610 for (i = 0; i < N_MARK_BITS; i++)
|
3092
|
611 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i;
|
|
612 return result;
|
2720
|
613 #else
|
|
614 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
|
|
615 #endif
|
|
616 }
|
|
617
|
|
618
|
|
619 /* Bit_Arrays bit at pos to val. */
|
|
620 static void
|
3092
|
621 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_INT val)
|
2720
|
622 {
|
|
623 #if N_MARK_BITS > 1
|
|
624 EMACS_INT i;
|
|
625 #endif
|
|
626 bit_array += pos / CHAR_BIT;
|
|
627 #if N_MARK_BITS > 1
|
|
628 for (i = 0; i < N_MARK_BITS; i++)
|
|
629 if ((val >> i) & 1)
|
|
630 *bit_array |= 1 << ((pos + i) % CHAR_BIT);
|
|
631 else
|
|
632 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT));
|
|
633 #else
|
|
634 if (val)
|
|
635 *bit_array |= 1 << (pos % CHAR_BIT);
|
|
636 else
|
|
637 *bit_array &= ~(1 << (pos % CHAR_BIT));
|
|
638 #endif
|
|
639 }
|
|
640
|
|
641
|
|
642 /*--- mark bit functions ---*/
|
3092
|
643 #define USE_PNTR_MARK_BITS(ph) \
|
|
644 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT)
|
|
645 #define USE_WORD_MARK_BITS(ph) \
|
|
646 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT)
|
2720
|
647
|
3092
|
648 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p)
|
2720
|
649 #define GET_BIT_PNTR(b, p) get_bit (b, p)
|
|
650
|
3092
|
651 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v)
|
2720
|
652 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
|
|
653
|
|
654 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
|
3092
|
655 #define ZERO_MARK_BITS_PNTR(ph) \
|
|
656 do { \
|
|
657 memset (PH_MARK_BITS (ph), '\0', \
|
|
658 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \
|
|
659 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \
|
2720
|
660 } while (0)
|
|
661
|
|
662 #define GET_BIT(bit, ph, p) \
|
|
663 do { \
|
|
664 if (USE_PNTR_MARK_BITS (ph)) \
|
|
665 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \
|
|
666 else \
|
|
667 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \
|
|
668 } while (0)
|
|
669
|
|
670 #define SET_BIT(ph, p, v) \
|
|
671 do { \
|
|
672 if (USE_PNTR_MARK_BITS (ph)) \
|
|
673 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \
|
|
674 else \
|
|
675 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \
|
|
676 } while (0)
|
|
677
|
|
678 #define ZERO_MARK_BITS(ph) \
|
|
679 do { \
|
|
680 if (USE_PNTR_MARK_BITS (ph)) \
|
|
681 ZERO_MARK_BITS_PNTR (ph); \
|
|
682 else \
|
|
683 ZERO_MARK_BITS_WORD (ph); \
|
|
684 } while (0)
|
|
685
|
|
686
|
|
687 /* Allocates mark-bit space either from a free list or from the OS
|
|
688 for the given page header. */
|
3092
|
689 static Rawbyte *
|
2720
|
690 alloc_mark_bits (page_header *ph)
|
|
691 {
|
3092
|
692 Rawbyte *result;
|
|
693 #ifdef USE_MARK_BITS_FREE_LIST
|
2720
|
694 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
|
3092
|
695 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
2720
|
696 else
|
|
697 {
|
3092
|
698 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph);
|
2720
|
699 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
|
|
700 }
|
3092
|
701 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
702 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
|
703 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
704 return result;
|
|
705 }
|
|
706
|
|
707
|
|
708 /* Frees by maintaining a free list. */
|
|
709 static void
|
|
710 free_mark_bits (page_header *ph)
|
|
711 {
|
3092
|
712 #ifdef USE_MARK_BITS_FREE_LIST
|
|
713 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
|
|
714 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
|
|
715 #else /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
716 if (PH_MARK_BITS (ph))
|
3092
|
717 free (PH_MARK_BITS (ph));
|
|
718 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
719 }
|
|
720
|
|
721
|
|
722 /* Installs mark bits and zeros bits. */
|
|
723 static void
|
|
724 install_mark_bits (page_header *ph)
|
|
725 {
|
|
726 if (USE_PNTR_MARK_BITS (ph))
|
|
727 {
|
|
728 PH_MARK_BITS (ph) = alloc_mark_bits (ph);
|
|
729 ZERO_MARK_BITS_PNTR (ph);
|
|
730 }
|
|
731 else
|
|
732 ZERO_MARK_BITS_WORD (ph);
|
|
733 }
|
|
734
|
|
735
|
|
736 /* Cleans and frees the mark bits of the given page_header. */
|
|
737 static void
|
|
738 remove_mark_bits (page_header *ph)
|
|
739 {
|
|
740 if (USE_PNTR_MARK_BITS (ph))
|
|
741 free_mark_bits (ph);
|
|
742 }
|
|
743
|
|
744
|
|
745 /* Zeros all mark bits in given header. */
|
|
746 static void
|
|
747 zero_mark_bits (page_header *ph)
|
|
748 {
|
|
749 ZERO_MARK_BITS (ph);
|
|
750 }
|
|
751
|
|
752
|
|
753 /* Returns mark bit for given heap pointer. */
|
|
754 EMACS_INT
|
|
755 get_mark_bit (void *ptr)
|
|
756 {
|
|
757 EMACS_INT bit = 0;
|
|
758 page_header *ph = get_page_header (ptr);
|
|
759 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph));
|
|
760 if (ph)
|
|
761 {
|
|
762 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
763 }
|
|
764 return bit;
|
|
765 }
|
|
766
|
|
767
|
|
768 /* Sets mark bit for given heap pointer. */
|
|
769 void
|
|
770 set_mark_bit (void *ptr, EMACS_INT value)
|
|
771 {
|
|
772 page_header *ph = get_page_header (ptr);
|
|
773 assert (ph && PH_ON_USED_LIST_P (ph));
|
|
774 if (ph)
|
|
775 {
|
3092
|
776 #ifdef NEW_GC
|
|
777 if (value == BLACK)
|
|
778 if (!PH_BLACK_BIT (ph))
|
|
779 PH_BLACK_BIT (ph) = 1;
|
|
780 #endif /* NEW_GC */
|
2720
|
781 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
|
|
782 }
|
|
783 }
|
|
784
|
|
785
|
|
786
|
|
787
|
|
788 /*--- page header functions --------------------------------------------*/
|
|
789
|
3092
|
790 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
791 #include "blocktype.h"
|
|
792
|
|
793 struct page_header_blocktype
|
|
794 {
|
|
795 Blocktype_declare (page_header);
|
|
796 } *the_page_header_blocktype;
|
|
797 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
|
798
|
2720
|
799 /* Allocates a page header either from a free list or from the OS. */
|
|
800 static page_header *
|
|
801 alloc_page_header (void)
|
|
802 {
|
3092
|
803 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
804 page_header *result;
|
|
805 #ifdef MEMORY_USAGE_STATS
|
|
806 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
807 #endif
|
|
808 result = Blocktype_alloc (the_page_header_blocktype);
|
|
809 ZERO_PAGE_HEADER (result);
|
|
810 return result;
|
|
811 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
812 page_header *result;
|
|
813 if (PAGE_HEADER_FREE_LIST == 0)
|
|
814 {
|
|
815 result =
|
|
816 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
|
|
817 #ifdef MEMORY_USAGE_STATS
|
|
818 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
819 #endif
|
|
820 }
|
|
821 else
|
|
822 {
|
|
823 result = (page_header*) PAGE_HEADER_FREE_LIST;
|
|
824 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
|
|
825 }
|
|
826 return result;
|
3092
|
827 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
828 }
|
|
829
|
|
830
|
|
831 /* Frees given page header by maintaining a free list. */
|
|
832 static void
|
|
833 free_page_header (page_header *ph)
|
|
834 {
|
3092
|
835 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
836 Blocktype_free (the_page_header_blocktype, ph);
|
|
837 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
838 #if ZERO_MEM
|
|
839 ZERO_PAGE_HEADER (ph);
|
|
840 #endif
|
|
841 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
|
|
842 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
|
3092
|
843 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
844 }
|
|
845
|
|
846
|
|
847 /* Adds given page header to given page list header's list. */
|
|
848 static void
|
|
849 add_page_header_to_plh (page_header *ph, page_list_header *plh)
|
|
850 {
|
|
851 /* insert at the front of the list */
|
|
852 PH_PREV (ph) = 0;
|
|
853 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
854 PH_PLH (ph) = plh;
|
|
855 /* if list is not empty, set prev in the first element */
|
|
856 if (PLH_FIRST (plh))
|
|
857 PH_PREV (PLH_FIRST (plh)) = ph;
|
|
858 /* one element in list is first and last at the same time */
|
|
859 PLH_FIRST (plh) = ph;
|
|
860 if (!PLH_LAST (plh))
|
|
861 PLH_LAST (plh) = ph;
|
|
862
|
|
863 #ifdef MEMORY_USAGE_STATS
|
|
864 /* bump page count */
|
|
865 PLH_PAGE_COUNT (plh)++;
|
|
866 #endif
|
|
867
|
|
868 }
|
|
869
|
|
870
|
|
871 /* Removes given page header from given page list header's list. */
|
|
872 static void
|
|
873 remove_page_header_from_plh (page_header *ph, page_list_header *plh)
|
|
874 {
|
|
875 if (PLH_FIRST (plh) == ph)
|
|
876 PLH_FIRST (plh) = PH_NEXT (ph);
|
|
877 if (PLH_LAST (plh) == ph)
|
|
878 PLH_LAST (plh) = PH_PREV (ph);
|
|
879 if (PH_NEXT (ph))
|
|
880 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
881 if (PH_PREV (ph))
|
|
882 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
883
|
|
884 #ifdef MEMORY_USAGE_STATS
|
|
885 /* decrease page count */
|
|
886 PLH_PAGE_COUNT (plh)--;
|
|
887 #endif
|
|
888 }
|
|
889
|
|
890
|
|
891 /* Moves a page header to the front of its the page header list.
|
|
892 This is used during sweep: Pages with some alive objects are moved to
|
|
893 the front. This makes allocation faster, all pages with free slots
|
|
894 can be found at the front of the list. */
|
|
895 static void
|
|
896 move_page_header_to_front (page_header *ph)
|
|
897 {
|
|
898 page_list_header *plh = PH_PLH (ph);
|
|
899 /* is page already first? */
|
|
900 if (ph == PLH_FIRST (plh)) return;
|
|
901 /* remove from list */
|
|
902 if (PLH_LAST (plh) == ph)
|
|
903 PLH_LAST (plh) = PH_PREV (ph);
|
|
904 if (PH_NEXT (ph))
|
|
905 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
906 if (PH_PREV (ph))
|
|
907 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
908 /* insert at the front */
|
|
909 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
910 PH_PREV (ph) = 0;
|
|
911 PH_PREV (PH_NEXT (ph)) = ph;
|
|
912 PLH_FIRST (plh) = ph;
|
|
913 }
|
|
914
|
|
915
|
|
916
|
|
917
|
|
918 /*--- page list functions ----------------------------------------------*/
|
|
919
|
|
920 /* Returns the index of the used heap list according to given size. */
|
|
921 static int
|
|
922 get_used_list_index (size_t size)
|
|
923 {
|
|
924 if (size <= USED_LIST_MIN_OBJECT_SIZE)
|
3092
|
925 {
|
|
926 // printf ("size %d -> index %d\n", size, 0);
|
|
927 return 0;
|
|
928 }
|
|
929 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD)
|
|
930 {
|
|
931 // printf ("size %d -> index %d\n", size,
|
|
932 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
933 // / USED_LIST_LIN_STEP) + 1);
|
|
934 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
935 / USED_LIST_LIN_STEP) + 1;
|
|
936 }
|
|
937 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1);
|
2720
|
938 return N_USED_PAGE_LISTS - 1;
|
|
939 }
|
|
940
|
|
941 /* Returns the size of the used heap list according to given index. */
|
|
942 static size_t
|
|
943 get_used_list_size_value (int used_index)
|
|
944 {
|
|
945 if (used_index < N_USED_PAGE_LISTS - 1)
|
|
946 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
|
|
947 return 0;
|
|
948 }
|
|
949
|
|
950
|
|
951 /* Returns the index of the free heap list according to given size. */
|
3092
|
952 static EMACS_INT
|
2720
|
953 get_free_list_index (EMACS_INT n_pages)
|
|
954 {
|
|
955 if (n_pages == 0)
|
|
956 return 0;
|
|
957 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
|
|
958 return n_pages - 1;
|
|
959 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1)
|
|
960 return N_FREE_PAGE_LISTS - 1;
|
|
961 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1)
|
|
962 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
963
|
|
964 }
|
|
965
|
|
966
|
|
967 /* Returns the size in number of pages of the given free list at index. */
|
|
968 static size_t
|
3092
|
969 get_free_list_size_value (EMACS_INT free_index)
|
2720
|
970 {
|
|
971 if (free_index < FREE_LIST_LOWER_THRESHOLD)
|
|
972 return free_index + 1;
|
|
973 if (free_index >= N_FREE_PAGE_LISTS)
|
|
974 return FREE_LIST_UPPER_THRESHOLD;
|
|
975 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD)
|
|
976 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
977 }
|
|
978
|
|
979
|
|
980 #ifdef MEMORY_USAGE_STATS
|
|
981 Bytecount
|
|
982 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats)
|
|
983 {
|
|
984 size_t used_size =
|
|
985 get_used_list_size_value (get_used_list_index (claimed_size));
|
|
986 if (used_size == 0)
|
|
987 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size));
|
|
988
|
|
989 if (stats)
|
|
990 {
|
|
991 stats->was_requested += claimed_size;
|
|
992 stats->malloc_overhead += used_size - claimed_size;
|
|
993 }
|
|
994
|
|
995 return used_size;
|
|
996 }
|
|
997 #endif /* not MEMORY_USAGE_STATS */
|
|
998
|
|
999
|
|
1000
|
|
1001 /*--- free heap functions ----------------------------------------------*/
|
|
1002
|
|
1003 /* Frees a heap section, if the heap_section is completly free */
|
|
1004 static EMACS_INT
|
|
1005 free_heap_section (page_header *ph)
|
|
1006 {
|
3092
|
1007 EMACS_INT i;
|
|
1008 EMACS_INT removed = 0;
|
2720
|
1009 for (i = 0; i < N_HEAP_SECTIONS; i++)
|
|
1010 if (!removed)
|
|
1011 {
|
|
1012 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
|
|
1013 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
|
|
1014 {
|
|
1015 xfree_1 (HEAP_SECTION(i).real_start);
|
|
1016 #ifdef MEMORY_USAGE_STATS
|
|
1017 MC_MALLOCED_BYTES
|
|
1018 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1019 #endif
|
|
1020
|
|
1021 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1022
|
|
1023 removed = 1;
|
|
1024 }
|
|
1025 }
|
|
1026 else
|
|
1027 {
|
|
1028 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start;
|
|
1029 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size;
|
|
1030 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start;
|
|
1031 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages;
|
|
1032 }
|
|
1033
|
|
1034 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed;
|
|
1035
|
|
1036 return removed;
|
|
1037 }
|
|
1038
|
|
1039 /* Removes page from free list. */
|
|
1040 static void
|
|
1041 remove_page_from_free_list (page_header *ph)
|
|
1042 {
|
|
1043 remove_page_header_from_plh (ph, PH_PLH (ph));
|
|
1044 PH_PLH (ph) = 0;
|
|
1045 }
|
|
1046
|
|
1047
|
|
1048 /* Adds page to according free list. */
|
|
1049 static void
|
|
1050 add_page_to_free_list (page_header *ph)
|
|
1051 {
|
|
1052 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph)));
|
|
1053 add_page_header_to_plh (ph, PH_PLH (ph));
|
|
1054 }
|
|
1055
|
|
1056
|
|
1057 /* Merges two adjacent pages. */
|
|
1058 static page_header *
|
|
1059 merge_pages (page_header *first_ph, page_header *second_ph)
|
|
1060 {
|
|
1061 /* merge */
|
|
1062 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph);
|
|
1063 /* clean up left over page header */
|
|
1064 free_page_header (second_ph);
|
|
1065 /* update lookup table */
|
|
1066 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph));
|
|
1067
|
|
1068 return first_ph;
|
|
1069 }
|
|
1070
|
|
1071
|
|
1072 /* Checks if pages are adjacent, merges them, and adds merged page to
|
|
1073 free list */
|
|
1074 static void
|
|
1075 merge_into_free_list (page_header *ph)
|
|
1076 {
|
|
1077 /* check if you can coalesce adjacent pages */
|
|
1078 page_header *prev_ph =
|
|
1079 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1080 - PAGE_SIZE));
|
|
1081 page_header *succ_ph =
|
|
1082 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1083 + (PH_N_PAGES (ph) * PAGE_SIZE)));
|
|
1084 if (PH_ON_FREE_LIST_P (prev_ph))
|
|
1085 {
|
|
1086 remove_page_from_free_list (prev_ph);
|
|
1087 ph = merge_pages (prev_ph, ph);
|
|
1088 }
|
|
1089 if (PH_ON_FREE_LIST_P (succ_ph))
|
|
1090 {
|
|
1091 remove_page_from_free_list (succ_ph);
|
|
1092 ph = merge_pages (ph, succ_ph);
|
|
1093 }
|
|
1094 /* try to free heap_section, if the section is complete */
|
|
1095 if (!free_heap_section (ph))
|
|
1096 /* else add merged page to free list */
|
|
1097 add_page_to_free_list (ph);
|
|
1098 }
|
|
1099
|
|
1100
|
|
1101 /* Cuts given page header after n_pages, returns the first (cut) part, and
|
|
1102 puts the rest on the free list. */
|
|
1103 static page_header *
|
|
1104 split_page (page_header *ph, EMACS_INT n_pages)
|
|
1105 {
|
|
1106 page_header *new_ph;
|
|
1107 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages;
|
|
1108
|
|
1109 /* remove the page from the free list if already hooked in */
|
|
1110 if (PH_PLH (ph))
|
|
1111 remove_page_from_free_list (ph);
|
|
1112 /* set new number of pages */
|
|
1113 PH_N_PAGES (ph) = n_pages;
|
|
1114 /* add new page to lookup table */
|
|
1115 add_pages_to_lookup_table (ph, n_pages);
|
|
1116
|
|
1117 if (rem_pages)
|
|
1118 {
|
|
1119 /* build new page with reminder */
|
|
1120 new_ph = alloc_page_header ();
|
|
1121 PH_N_PAGES (new_ph) = rem_pages;
|
|
1122 PH_HEAP_SPACE (new_ph) =
|
|
1123 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE));
|
|
1124 /* add new page to lookup table */
|
|
1125 add_pages_to_lookup_table (new_ph, rem_pages);
|
|
1126 /* hook the rest into free list */
|
|
1127 add_page_to_free_list (new_ph);
|
|
1128 }
|
|
1129 return ph;
|
|
1130 }
|
|
1131
|
|
1132
|
|
1133 /* Expands the heap by given number of pages. */
|
|
1134 static page_header *
|
|
1135 expand_heap (EMACS_INT needed_pages)
|
|
1136 {
|
|
1137 page_header *ph;
|
|
1138 EMACS_INT n_pages;
|
|
1139 size_t real_size;
|
|
1140 void *real_start;
|
|
1141
|
|
1142 /* determine number of pages the heap should grow */
|
|
1143 n_pages = needed_pages + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR));
|
|
1144 if (n_pages < MIN_HEAP_INCREASE)
|
|
1145 n_pages = MIN_HEAP_INCREASE;
|
|
1146
|
|
1147 /* get the real values */
|
|
1148 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE;
|
|
1149 real_start = xmalloc_and_zero (real_size);
|
|
1150 #ifdef MEMORY_USAGE_STATS
|
|
1151 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0);
|
|
1152 #endif
|
|
1153
|
|
1154 /* maintain heap section count */
|
|
1155 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS)
|
|
1156 {
|
|
1157 stderr_out ("Increase number of MAX_HEAP_SECTS");
|
|
1158 ABORT ();
|
|
1159 }
|
|
1160 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start;
|
|
1161 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size;
|
|
1162 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1163 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages;
|
|
1164 N_HEAP_SECTIONS ++;
|
|
1165
|
|
1166 /* get page header */
|
|
1167 ph = alloc_page_header ();
|
|
1168
|
|
1169 /* setup page header */
|
|
1170 PH_N_PAGES (ph) = n_pages;
|
|
1171 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1172 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0);
|
|
1173 HEAP_SIZE += n_pages * PAGE_SIZE;
|
|
1174
|
|
1175 /* this was also done by allocate_lisp_storage */
|
|
1176 if (need_to_check_c_alloca)
|
|
1177 xemacs_c_alloca (0);
|
|
1178
|
|
1179 /* return needed size, put rest on free list */
|
|
1180 return split_page (ph, needed_pages);
|
|
1181 }
|
|
1182
|
|
1183
|
|
1184
|
|
1185
|
|
1186 /*--- used heap functions ----------------------------------------------*/
|
|
1187 /* Installs initial free list. */
|
|
1188 static void
|
3092
|
1189 install_cell_free_list (page_header *ph, EMACS_INT elemcount)
|
2720
|
1190 {
|
3092
|
1191 Rawbyte *p;
|
|
1192 EMACS_INT i;
|
2720
|
1193 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
1194 /* write initial free list if cell_size is < PAGE_SIZE */
|
3092
|
1195 p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1196 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
|
|
1197 {
|
|
1198 #ifdef ERROR_CHECK_GC
|
|
1199 assert (!LRECORD_FREE_P (p));
|
|
1200 MARK_LRECORD_AS_FREE (p);
|
|
1201 #endif
|
3092
|
1202 if (elemcount == 1)
|
|
1203 NEXT_FREE (p) = FREE_LIST (p + cell_size);
|
2720
|
1204 set_lookup_table (p, ph);
|
3092
|
1205 p += cell_size;
|
2720
|
1206 }
|
|
1207 #ifdef ERROR_CHECK_GC
|
|
1208 assert (!LRECORD_FREE_P (p));
|
|
1209 MARK_LRECORD_AS_FREE (p);
|
|
1210 #endif
|
|
1211 NEXT_FREE (p) = 0;
|
|
1212 set_lookup_table (p, ph);
|
|
1213
|
|
1214 /* hook free list into header */
|
|
1215 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph));
|
|
1216 }
|
|
1217
|
|
1218
|
|
1219 /* Cleans the object space of the given page_header. */
|
|
1220 static void
|
|
1221 remove_cell_free_list (page_header *ph)
|
|
1222 {
|
|
1223 #if ZERO_MEM
|
|
1224 ZERO_HEAP_SPACE (ph);
|
|
1225 #endif
|
|
1226 PH_FREE_LIST (ph) = 0;
|
|
1227 }
|
|
1228
|
|
1229
|
|
1230 /* Installs a new page and hooks it into given page_list_header. */
|
|
1231 static page_header *
|
|
1232 install_page_in_used_list (page_header *ph, page_list_header *plh,
|
3092
|
1233 size_t size, EMACS_INT elemcount)
|
2720
|
1234 {
|
|
1235 /* add to list */
|
|
1236 add_page_header_to_plh (ph, plh);
|
|
1237
|
|
1238 /* determine cell size */
|
|
1239 if (PLH_SIZE (plh))
|
|
1240 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
|
|
1241 else
|
|
1242 PH_CELL_SIZE (ph) = size;
|
3092
|
1243 if (elemcount == 1)
|
|
1244 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
|
|
1245 else
|
|
1246 {
|
|
1247 PH_CELLS_ON_PAGE (ph) = elemcount;
|
|
1248 PH_ARRAY_BIT (ph) = 1;
|
|
1249 }
|
2720
|
1250
|
|
1251 /* init cell count */
|
|
1252 PH_CELLS_USED (ph) = 0;
|
|
1253
|
|
1254 /* install mark bits and initialize cell free list */
|
3092
|
1255 install_mark_bits (ph);
|
2720
|
1256
|
3092
|
1257 install_cell_free_list (ph, elemcount);
|
2720
|
1258
|
|
1259 #ifdef MEMORY_USAGE_STATS
|
|
1260 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
|
|
1261 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
|
|
1262 #endif
|
|
1263
|
|
1264 return ph;
|
|
1265 }
|
|
1266
|
|
1267
|
|
1268 /* Cleans and frees a page, identified by the given page_header. */
|
|
1269 static void
|
|
1270 remove_page_from_used_list (page_header *ph)
|
|
1271 {
|
|
1272 page_list_header *plh = PH_PLH (ph);
|
|
1273
|
3092
|
1274 #ifdef NEW_GC
|
|
1275 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT();
|
|
1276 /* cleanup: remove memory protection, zero page_header bits. */
|
|
1277 #endif /* not NEW_GC */
|
|
1278
|
2720
|
1279 #ifdef MEMORY_USAGE_STATS
|
|
1280 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
|
|
1281 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
|
|
1282 #endif
|
|
1283
|
|
1284 /* clean up mark bits and cell free list */
|
|
1285 remove_cell_free_list (ph);
|
|
1286 if (PH_ON_USED_LIST_P (ph))
|
|
1287 remove_mark_bits (ph);
|
|
1288
|
|
1289 /* clean up page header */
|
|
1290 PH_CELL_SIZE (ph) = 0;
|
|
1291 PH_CELLS_ON_PAGE (ph) = 0;
|
|
1292 PH_CELLS_USED (ph) = 0;
|
|
1293
|
|
1294 /* remove from used list */
|
|
1295 remove_page_header_from_plh (ph, plh);
|
|
1296
|
|
1297 /* move to free list */
|
|
1298 merge_into_free_list (ph);
|
|
1299 }
|
|
1300
|
|
1301
|
|
1302
|
|
1303
|
|
1304 /*--- allocation -------------------------------------------------------*/
|
|
1305
|
|
1306 /* Allocates from cell free list on already allocated pages. */
|
|
1307 static page_header *
|
|
1308 allocate_cell (page_list_header *plh)
|
|
1309 {
|
|
1310 page_header *ph = PLH_FIRST (plh);
|
|
1311 if (ph)
|
|
1312 {
|
|
1313 if (PH_FREE_LIST (ph))
|
|
1314 /* elements free on first page */
|
|
1315 return ph;
|
|
1316 else if ((PH_NEXT (ph))
|
|
1317 && (PH_FREE_LIST (PH_NEXT (ph))))
|
|
1318 /* elements free on second page */
|
|
1319 {
|
|
1320 page_header *temp = PH_NEXT (ph);
|
|
1321 /* move full page (first page) to end of list */
|
|
1322 PH_NEXT (PLH_LAST (plh)) = ph;
|
|
1323 PH_PREV (ph) = PLH_LAST (plh);
|
|
1324 PLH_LAST (plh) = ph;
|
|
1325 PH_NEXT (ph) = 0;
|
|
1326 /* install second page as first page */
|
|
1327 ph = temp;
|
|
1328 PH_PREV (ph) = 0;
|
|
1329 PLH_FIRST (plh) = ph;
|
|
1330 return ph;
|
|
1331 }
|
|
1332 }
|
|
1333 return 0;
|
|
1334 }
|
|
1335
|
|
1336
|
|
1337 /* Finds a page which has at least the needed number of pages.
|
|
1338 Algorithm: FIRST FIT. */
|
|
1339 static page_header *
|
|
1340 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph)
|
|
1341 {
|
|
1342 while (ph)
|
|
1343 {
|
|
1344 if (PH_N_PAGES (ph) >= needed_pages)
|
|
1345 return ph;
|
|
1346 ph = PH_NEXT (ph);
|
|
1347 }
|
|
1348 return 0;
|
|
1349 }
|
|
1350
|
|
1351
|
|
1352 /* Allocates a page from the free list. */
|
|
1353 static page_header *
|
|
1354 allocate_page_from_free_list (EMACS_INT needed_pages)
|
|
1355 {
|
|
1356 page_header *ph = 0;
|
3092
|
1357 EMACS_INT i;
|
2720
|
1358 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
|
|
1359 if ((ph = find_free_page_first_fit (needed_pages,
|
|
1360 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
|
|
1361 {
|
|
1362 if (PH_N_PAGES (ph) > needed_pages)
|
|
1363 return split_page (ph, needed_pages);
|
|
1364 else
|
|
1365 {
|
|
1366 remove_page_from_free_list (ph);
|
|
1367 return ph;
|
|
1368 }
|
|
1369 }
|
|
1370 return 0;
|
|
1371 }
|
|
1372
|
|
1373
|
|
1374 /* Allocates a new page, either from free list or by expanding the heap. */
|
|
1375 static page_header *
|
3092
|
1376 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount)
|
2720
|
1377 {
|
3092
|
1378 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount);
|
2720
|
1379 /* first check free list */
|
|
1380 page_header *result = allocate_page_from_free_list (needed_pages);
|
|
1381 if (!result)
|
|
1382 /* expand heap */
|
|
1383 result = expand_heap (needed_pages);
|
3092
|
1384 install_page_in_used_list (result, plh, size, elemcount);
|
2720
|
1385 return result;
|
|
1386 }
|
|
1387
|
|
1388
|
|
1389 /* Selects the correct size class, tries to allocate a cell of this size
|
|
1390 from the free list, if this fails, a new page is allocated. */
|
|
1391 static void *
|
3092
|
1392 mc_alloc_1 (size_t size, EMACS_INT elemcount)
|
2720
|
1393 {
|
|
1394 page_list_header *plh = 0;
|
2723
|
1395 page_header *ph = 0;
|
|
1396 void *result = 0;
|
|
1397
|
3092
|
1398 plh = USED_HEAP_PAGES (get_used_list_index (size));
|
2720
|
1399
|
|
1400 if (size == 0)
|
|
1401 return 0;
|
3092
|
1402 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2))
|
2720
|
1403 /* first check any free cells */
|
|
1404 ph = allocate_cell (plh);
|
|
1405 if (!ph)
|
|
1406 /* allocate a new page */
|
3092
|
1407 ph = allocate_new_page (plh, size, elemcount);
|
2720
|
1408
|
|
1409 /* return first element of free list and remove it from the list */
|
|
1410 result = (void*) PH_FREE_LIST (ph);
|
|
1411 PH_FREE_LIST (ph) =
|
|
1412 NEXT_FREE (PH_FREE_LIST (ph));
|
|
1413
|
3092
|
1414 memset (result, '\0', (size * elemcount));
|
|
1415 MARK_LRECORD_AS_FREE (result);
|
2720
|
1416
|
|
1417 /* bump used cells counter */
|
3092
|
1418 PH_CELLS_USED (ph) += elemcount;
|
2720
|
1419
|
|
1420 #ifdef MEMORY_USAGE_STATS
|
3092
|
1421 PLH_USED_CELLS (plh) += elemcount;
|
|
1422 PLH_USED_SPACE (plh) += size * elemcount;
|
2720
|
1423 #endif
|
|
1424
|
|
1425 return result;
|
|
1426 }
|
|
1427
|
3092
|
1428 /* Array allocation. */
|
|
1429 void *
|
|
1430 mc_alloc_array (size_t size, EMACS_INT elemcount)
|
|
1431 {
|
|
1432 return mc_alloc_1 (size, elemcount);
|
|
1433 }
|
|
1434
|
2720
|
1435 void *
|
|
1436 mc_alloc (size_t size)
|
|
1437 {
|
|
1438 return mc_alloc_1 (size, 1);
|
|
1439 }
|
|
1440
|
|
1441
|
|
1442
|
|
1443 /*--- sweep & free & finalize-------------------------------------------*/
|
|
1444
|
|
1445 /* Frees a heap pointer. */
|
|
1446 static void
|
|
1447 remove_cell (void *ptr, page_header *ph)
|
|
1448 {
|
|
1449 #ifdef MEMORY_USAGE_STATS
|
|
1450 PLH_USED_CELLS (PH_PLH (ph))--;
|
|
1451 if (PH_ON_USED_LIST_P (ph))
|
|
1452 PLH_USED_SPACE (PH_PLH (ph)) -=
|
|
1453 detagged_lisp_object_size ((const struct lrecord_header *) ptr);
|
|
1454 else
|
|
1455 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph);
|
|
1456 #endif
|
2775
|
1457 if (PH_ON_USED_LIST_P (ph))
|
|
1458 {
|
2994
|
1459 #ifdef ALLOC_TYPE_STATS
|
2775
|
1460 dec_lrecord_stats (PH_CELL_SIZE (ph),
|
|
1461 (const struct lrecord_header *) ptr);
|
2994
|
1462 #endif /* ALLOC_TYPE_STATS */
|
2775
|
1463 #ifdef ERROR_CHECK_GC
|
|
1464 assert (!LRECORD_FREE_P (ptr));
|
|
1465 deadbeef_memory (ptr, PH_CELL_SIZE (ph));
|
|
1466 MARK_LRECORD_AS_FREE (ptr);
|
2720
|
1467 #endif
|
2775
|
1468 }
|
2720
|
1469
|
|
1470 /* hooks cell into free list */
|
|
1471 NEXT_FREE (ptr) = PH_FREE_LIST (ph);
|
|
1472 PH_FREE_LIST (ph) = FREE_LIST (ptr);
|
|
1473 /* decrease cells used */
|
|
1474 PH_CELLS_USED (ph)--;
|
|
1475 }
|
|
1476
|
|
1477
|
|
1478 /* Mark free list marks all free list entries. */
|
|
1479 static void
|
|
1480 mark_free_list (page_header *ph)
|
|
1481 {
|
|
1482 free_link *fl = PH_FREE_LIST (ph);
|
|
1483 while (fl)
|
|
1484 {
|
3092
|
1485 #ifdef NEW_GC
|
|
1486 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK);
|
|
1487 #else /* not NEW_GC */
|
2720
|
1488 SET_BIT (ph, get_mark_bit_index (fl, ph), 1);
|
3092
|
1489 #endif /* not NEW_GC */
|
2720
|
1490 fl = NEXT_FREE (fl);
|
|
1491 }
|
|
1492 }
|
|
1493
|
|
1494 /* Finalize a page. You have to tell mc-alloc how to call your
|
|
1495 object's finalizer. Therefore, you have to define the macro
|
|
1496 MC_ALLOC_CALL_FINALIZER(ptr). This macro should do nothing else
|
|
1497 then test if there is a finalizer and call it on the given
|
|
1498 argument, which is the heap address of the object. */
|
|
1499 static void
|
|
1500 finalize_page (page_header *ph)
|
|
1501 {
|
|
1502 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1503 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1504 EMACS_INT mark_bit = 0;
|
|
1505 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
3092
|
1506 unsigned int bit = 0;
|
2720
|
1507
|
|
1508 mark_free_list (ph);
|
|
1509
|
3092
|
1510 #ifdef NEW_GC
|
|
1511 /* ARRAY_BIT_HACK */
|
|
1512 if (PH_ARRAY_BIT (ph))
|
|
1513 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1514 {
|
|
1515 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1516 if (bit)
|
|
1517 {
|
|
1518 return;
|
|
1519 }
|
|
1520 }
|
|
1521 #endif /* NEW_GC */
|
|
1522
|
2720
|
1523 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1524 {
|
3092
|
1525 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1526 #ifdef NEW_GC
|
|
1527 if (bit == WHITE)
|
|
1528 #else /* not NEW_GC */
|
|
1529 if (bit == 0)
|
|
1530 #endif /* not NEW_GC */
|
2720
|
1531 {
|
|
1532 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1533 MC_ALLOC_CALL_FINALIZER ((void *) ptr);
|
|
1534 }
|
|
1535 }
|
|
1536 }
|
|
1537
|
|
1538
|
|
1539 /* Finalize a page for disksave. XEmacs calls this routine before it
|
|
1540 dumps the heap image. You have to tell mc-alloc how to call your
|
|
1541 object's finalizer for disksave. Therefore, you have to define the
|
|
1542 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should
|
|
1543 do nothing else then test if there is a finalizer and call it on
|
|
1544 the given argument, which is the heap address of the object. */
|
|
1545 static void
|
|
1546 finalize_page_for_disksave (page_header *ph)
|
|
1547 {
|
|
1548 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1549 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1550 EMACS_INT mark_bit = 0;
|
|
1551 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1552
|
|
1553 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1554 {
|
|
1555 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1556 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
|
|
1557 }
|
|
1558 }
|
|
1559
|
|
1560
|
|
1561 /* Finalizes the heap. */
|
|
1562 void
|
|
1563 mc_finalize (void)
|
|
1564 {
|
|
1565 visit_all_used_page_headers (finalize_page);
|
|
1566 }
|
|
1567
|
|
1568
|
|
1569 /* Finalizes the heap for disksave. */
|
|
1570 void
|
|
1571 mc_finalize_for_disksave (void)
|
|
1572 {
|
|
1573 visit_all_used_page_headers (finalize_page_for_disksave);
|
|
1574 }
|
|
1575
|
|
1576
|
|
1577 /* Sweeps a page: all the non-marked cells are freed. If the page is empty
|
|
1578 in the end, it is removed. If some cells are free, it is moved to the
|
|
1579 front of its page header list. Full pages stay where they are. */
|
|
1580 static void
|
|
1581 sweep_page (page_header *ph)
|
|
1582 {
|
3092
|
1583 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1584 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1585 EMACS_INT mark_bit = 0;
|
|
1586 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
3092
|
1587 unsigned int bit = 0;
|
2720
|
1588
|
|
1589 mark_free_list (ph);
|
|
1590
|
3092
|
1591 #ifdef NEW_GC
|
|
1592 /* ARRAY_BIT_HACK */
|
|
1593 if (PH_ARRAY_BIT (ph))
|
|
1594 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1595 {
|
|
1596 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1597 if (bit)
|
|
1598 {
|
|
1599 zero_mark_bits (ph);
|
|
1600 PH_BLACK_BIT (ph) = 0;
|
|
1601 return;
|
|
1602 }
|
|
1603 }
|
|
1604 #endif /* NEW_GC */
|
|
1605
|
2720
|
1606 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1607 {
|
3092
|
1608 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1609 #ifdef NEW_GC
|
|
1610 if (bit == WHITE)
|
|
1611 #else /* not NEW_GC */
|
|
1612 if (bit == 0)
|
|
1613 #endif /* not NEW_GC */
|
2720
|
1614 {
|
3092
|
1615 #ifdef NEW_GC
|
|
1616 GC_STAT_FREED;
|
|
1617 #endif /* NEW_GC */
|
2720
|
1618 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
|
|
1619 }
|
|
1620 }
|
|
1621 zero_mark_bits (ph);
|
3092
|
1622 PH_BLACK_BIT (ph) = 0;
|
2720
|
1623 if (PH_CELLS_USED (ph) == 0)
|
|
1624 remove_page_from_used_list (ph);
|
|
1625 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1626 move_page_header_to_front (ph);
|
|
1627 }
|
|
1628
|
|
1629
|
|
1630 /* Sweeps the heap. */
|
|
1631 void
|
|
1632 mc_sweep (void)
|
|
1633 {
|
|
1634 visit_all_used_page_headers (sweep_page);
|
|
1635 }
|
|
1636
|
|
1637
|
|
1638 /* Frees the cell pointed to by ptr. */
|
|
1639 void
|
|
1640 mc_free (void *ptr)
|
|
1641 {
|
3092
|
1642 page_header *ph;
|
|
1643
|
|
1644 #ifdef NEW_GC
|
|
1645 /* Do not allow manual freeing while a gc is running. Data is going
|
|
1646 to be freed next gc cycle. */
|
|
1647 if (write_barrier_enabled || gc_in_progress)
|
|
1648 return;
|
|
1649 #endif /* NEW_GC */
|
2720
|
1650
|
3092
|
1651 ph = get_page_header (ptr);
|
|
1652 assert (ph);
|
|
1653 assert (PH_PLH (ph));
|
|
1654 assert (PLH_LIST_TYPE (PH_PLH (ph)) != FREE_LIST);
|
|
1655
|
|
1656 #ifdef NEW_GC
|
|
1657 if (PH_ON_USED_LIST_P (ph))
|
|
1658 SET_BIT (ph, get_mark_bit_index (ptr, ph), WHITE);
|
|
1659 #endif /* NEW_GC */
|
2720
|
1660 remove_cell (ptr, ph);
|
|
1661
|
|
1662 if (PH_CELLS_USED (ph) == 0)
|
|
1663 remove_page_from_used_list (ph);
|
|
1664 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1665 move_page_header_to_front (ph);
|
|
1666 }
|
|
1667
|
|
1668
|
|
1669 /* Changes the size of the cell pointed to by ptr.
|
|
1670 Returns the new address of the new cell with new size. */
|
|
1671 void *
|
3092
|
1672 mc_realloc_1 (void *ptr, size_t size, int elemcount)
|
2720
|
1673 {
|
|
1674 if (ptr)
|
|
1675 {
|
3092
|
1676 if (size * elemcount)
|
2720
|
1677 {
|
3092
|
1678 void *result = mc_alloc_1 (size, elemcount);
|
2720
|
1679 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
|
3092
|
1680 size_t cpy_size = size * elemcount;
|
|
1681 if (cpy_size > from_size)
|
2720
|
1682 cpy_size = from_size;
|
|
1683 memcpy (result, ptr, cpy_size);
|
3092
|
1684 #ifdef ALLOC_TYPE_STATS
|
|
1685 inc_lrecord_stats (size, (struct lrecord_header *) result);
|
|
1686 #endif /* not ALLOC_TYPE_STATS */
|
|
1687 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1688 return result;
|
|
1689 }
|
|
1690 else
|
|
1691 {
|
3092
|
1692 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1693 return 0;
|
|
1694 }
|
|
1695 }
|
|
1696 else
|
3092
|
1697 return mc_alloc_1 (size, elemcount);
|
2720
|
1698 }
|
|
1699
|
|
1700 void *
|
|
1701 mc_realloc (void *ptr, size_t size)
|
|
1702 {
|
|
1703 return mc_realloc_1 (ptr, size, 1);
|
|
1704 }
|
|
1705
|
|
1706 void *
|
3092
|
1707 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount)
|
2720
|
1708 {
|
3092
|
1709 return mc_realloc_1 (ptr, size, elemcount);
|
2720
|
1710 }
|
|
1711
|
|
1712
|
|
1713
|
|
1714 /*--- initialization ---------------------------------------------------*/
|
|
1715
|
|
1716 /* Call once at the very beginning. */
|
|
1717 void
|
|
1718 init_mc_allocator (void)
|
|
1719 {
|
3092
|
1720 EMACS_INT i;
|
|
1721
|
|
1722 #ifdef MEMORY_USAGE_STATS
|
|
1723 MC_MALLOCED_BYTES = 0;
|
|
1724 #endif
|
2720
|
1725
|
3092
|
1726 /* init of pagesize dependent values */
|
|
1727 switch (SYS_PAGE_SIZE)
|
|
1728 {
|
|
1729 case 512: log_page_size = 9; break;
|
|
1730 case 1024: log_page_size = 10; break;
|
|
1731 case 2048: log_page_size = 11; break;
|
|
1732 case 4096: log_page_size = 12; break;
|
|
1733 case 8192: log_page_size = 13; break;
|
|
1734 case 16384: log_page_size = 14; break;
|
3212
|
1735 case 32768: log_page_size = 15; break;
|
|
1736 case 65536: log_page_size = 16; break;
|
|
1737 default:
|
|
1738 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n",
|
|
1739 SYS_PAGE_SIZE);
|
|
1740 ABORT ();
|
3092
|
1741 }
|
|
1742
|
|
1743 page_size_div_2 = (EMACS_INT) SYS_PAGE_SIZE >> 1;
|
|
1744
|
|
1745 mc_allocator_globals.used_heap_pages =
|
|
1746 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1)
|
|
1747 * sizeof (page_list_header));
|
|
1748 #ifdef MEMORY_USAGE_STATS
|
|
1749 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header);
|
|
1750 #endif
|
|
1751
|
|
1752 mc_allocator_globals.ptr_lookup_table =
|
|
1753 (level_2_lookup_tree **)
|
|
1754 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *));
|
|
1755 #ifdef MEMORY_USAGE_STATS
|
|
1756 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *);
|
|
1757 #endif
|
|
1758
|
|
1759 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
1760 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype);
|
|
1761 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2932
|
1762
|
2720
|
1763 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1764 {
|
|
1765 page_list_header *plh = USED_HEAP_PAGES (i);
|
|
1766 PLH_LIST_TYPE (plh) = USED_LIST;
|
|
1767 PLH_SIZE (plh) = get_used_list_size_value (i);
|
|
1768 PLH_FIRST (plh) = 0;
|
|
1769 PLH_LAST (plh) = 0;
|
|
1770 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1771 #ifdef MEMORY_USAGE_STATS
|
|
1772 PLH_PAGE_COUNT (plh) = 0;
|
|
1773 PLH_USED_CELLS (plh) = 0;
|
|
1774 PLH_USED_SPACE (plh) = 0;
|
|
1775 PLH_TOTAL_CELLS (plh) = 0;
|
|
1776 PLH_TOTAL_SPACE (plh) = 0;
|
|
1777 #endif
|
|
1778 }
|
|
1779
|
|
1780 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1781 {
|
|
1782 page_list_header *plh = FREE_HEAP_PAGES (i);
|
|
1783 PLH_LIST_TYPE (plh) = FREE_LIST;
|
|
1784 PLH_SIZE (plh) = get_free_list_size_value (i);
|
|
1785 PLH_FIRST (plh) = 0;
|
|
1786 PLH_LAST (plh) = 0;
|
|
1787 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1788 #ifdef MEMORY_USAGE_STATS
|
|
1789 PLH_PAGE_COUNT (plh) = 0;
|
|
1790 PLH_USED_CELLS (plh) = 0;
|
|
1791 PLH_USED_SPACE (plh) = 0;
|
|
1792 PLH_TOTAL_CELLS (plh) = 0;
|
|
1793 PLH_TOTAL_SPACE (plh) = 0;
|
|
1794 #endif
|
|
1795 }
|
|
1796
|
3092
|
1797 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
1798 PAGE_HEADER_FREE_LIST = 0;
|
3092
|
1799 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
1800
|
|
1801 #ifdef MEMORY_USAGE_STATS
|
3092
|
1802 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals);
|
2720
|
1803 #endif
|
|
1804
|
|
1805 init_lookup_table ();
|
|
1806 }
|
|
1807
|
|
1808
|
|
1809
|
|
1810
|
|
1811 /*--- lisp function for statistics -------------------------------------*/
|
|
1812
|
|
1813 #ifdef MEMORY_USAGE_STATS
|
|
1814 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /*
|
|
1815 Returns stats about the mc-alloc memory usage. See diagnose.el.
|
|
1816 */
|
|
1817 ())
|
|
1818 {
|
|
1819 Lisp_Object free_plhs = Qnil;
|
|
1820 Lisp_Object used_plhs = Qnil;
|
|
1821 Lisp_Object heap_sects = Qnil;
|
3092
|
1822 EMACS_INT used_size = 0;
|
|
1823 EMACS_INT real_size = 0;
|
2720
|
1824
|
3092
|
1825 EMACS_INT i;
|
2720
|
1826
|
|
1827 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1828 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
|
|
1829 free_plhs =
|
|
1830 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
|
|
1831 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
|
|
1832 free_plhs);
|
|
1833
|
|
1834 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1835 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
|
|
1836 used_plhs =
|
|
1837 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
|
|
1838 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))),
|
|
1839 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))),
|
|
1840 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))),
|
|
1841 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))),
|
|
1842 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))),
|
|
1843 used_plhs);
|
|
1844
|
|
1845 for (i = 0; i < N_HEAP_SECTIONS; i++) {
|
|
1846 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE;
|
|
1847 real_size +=
|
|
1848 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1849 }
|
|
1850
|
|
1851 heap_sects =
|
|
1852 list3 (make_int (N_HEAP_SECTIONS),
|
|
1853 make_int (used_size),
|
|
1854 make_int (real_size));
|
|
1855
|
|
1856 return Fcons (make_int (PAGE_SIZE),
|
3092
|
1857 list5 (heap_sects,
|
2720
|
1858 Fnreverse (used_plhs),
|
|
1859 Fnreverse (free_plhs),
|
|
1860 make_int (sizeof (mc_allocator_globals)),
|
|
1861 make_int (MC_MALLOCED_BYTES)));
|
|
1862 }
|
|
1863 #endif /* MEMORY_USAGE_STATS */
|
|
1864
|
|
1865 void
|
|
1866 syms_of_mc_alloc (void)
|
|
1867 {
|
|
1868 #ifdef MEMORY_USAGE_STATS
|
|
1869 DEFSUBR (Fmc_alloc_memory_usage);
|
|
1870 #endif /* MEMORY_USAGE_STATS */
|
|
1871 }
|
3092
|
1872
|
|
1873
|
|
1874 #ifdef NEW_GC
|
|
1875 /*--- incremental garbage collector ----------------------------------*/
|
|
1876
|
|
1877 /* access dirty bit of page header */
|
|
1878 void
|
|
1879 set_dirty_bit (page_header *ph, unsigned int value)
|
|
1880 {
|
|
1881 PH_DIRTY_BIT (ph) = value;
|
|
1882 }
|
|
1883
|
|
1884 void
|
|
1885 set_dirty_bit_for_address (void *ptr, unsigned int value)
|
|
1886 {
|
|
1887 set_dirty_bit (get_page_header (ptr), value);
|
|
1888 }
|
|
1889
|
|
1890 unsigned int
|
|
1891 get_dirty_bit (page_header *ph)
|
|
1892 {
|
|
1893 return PH_DIRTY_BIT (ph);
|
|
1894 }
|
|
1895
|
|
1896 unsigned int
|
|
1897 get_dirty_bit_for_address (void *ptr)
|
|
1898 {
|
|
1899 return get_dirty_bit (get_page_header (ptr));
|
|
1900 }
|
|
1901
|
|
1902
|
|
1903 /* access protection bit of page header */
|
|
1904 void
|
|
1905 set_protection_bit (page_header *ph, unsigned int value)
|
|
1906 {
|
|
1907 PH_PROTECTION_BIT (ph) = value;
|
|
1908 }
|
|
1909
|
|
1910 void
|
|
1911 set_protection_bit_for_address (void *ptr, unsigned int value)
|
|
1912 {
|
|
1913 set_protection_bit (get_page_header (ptr), value);
|
|
1914 }
|
|
1915
|
|
1916 unsigned int
|
|
1917 get_protection_bit (page_header *ph)
|
|
1918 {
|
|
1919 return PH_PROTECTION_BIT (ph);
|
|
1920 }
|
|
1921
|
|
1922 unsigned int
|
|
1923 get_protection_bit_for_address (void *ptr)
|
|
1924 {
|
|
1925 return get_protection_bit (get_page_header (ptr));
|
|
1926 }
|
|
1927
|
|
1928
|
|
1929 /* Returns the start of the page of the object pointed to by ptr. */
|
|
1930 void *
|
|
1931 get_page_start (void *ptr)
|
|
1932 {
|
|
1933 return PH_HEAP_SPACE (get_page_header (ptr));
|
|
1934 }
|
|
1935
|
|
1936 /* Make PAGE_SIZE globally available. */
|
|
1937 EMACS_INT
|
|
1938 mc_get_page_size ()
|
|
1939 {
|
|
1940 return PAGE_SIZE;
|
|
1941 }
|
|
1942
|
|
1943 /* Is the fault at ptr on a protected page? */
|
|
1944 EMACS_INT
|
|
1945 fault_on_protected_page (void *ptr)
|
|
1946 {
|
|
1947 page_header *ph = get_page_header_internal (ptr);
|
|
1948 return (ph
|
|
1949 && PH_HEAP_SPACE (ph)
|
|
1950 && (PH_HEAP_SPACE (ph) <= ptr)
|
|
1951 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph)
|
|
1952 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr)
|
|
1953 && (PH_PROTECTION_BIT (ph) == 1));
|
|
1954 }
|
|
1955
|
|
1956
|
|
1957 /* Protect the heap page of given page header ph if black objects are
|
|
1958 on the page. */
|
|
1959 static void
|
|
1960 protect_heap_page (page_header *ph)
|
|
1961 {
|
|
1962 if (PH_BLACK_BIT (ph))
|
|
1963 {
|
|
1964 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1965 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1966 vdb_protect ((void *) heap_space, heap_space_size);
|
|
1967 PH_PROTECTION_BIT (ph) = 1;
|
|
1968 }
|
|
1969 }
|
|
1970
|
|
1971 /* Protect all heap pages with black objects. */
|
|
1972 void
|
|
1973 protect_heap_pages (void)
|
|
1974 {
|
|
1975 visit_all_used_page_headers (protect_heap_page);
|
|
1976 }
|
|
1977
|
|
1978
|
|
1979 /* Remove protection (if there) of heap page of given page header
|
|
1980 ph. */
|
|
1981 static void
|
|
1982 unprotect_heap_page (page_header *ph)
|
|
1983 {
|
|
1984 if (PH_PROTECTION_BIT (ph))
|
|
1985 {
|
|
1986 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1987 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1988 vdb_unprotect (heap_space, heap_space_size);
|
|
1989 PH_PROTECTION_BIT (ph) = 0;
|
|
1990 }
|
|
1991 }
|
|
1992
|
|
1993 /* Remove protection for all heap pages which are protected. */
|
|
1994 void
|
|
1995 unprotect_heap_pages (void)
|
|
1996 {
|
|
1997 visit_all_used_page_headers (unprotect_heap_page);
|
|
1998 }
|
|
1999
|
|
2000 /* Remove protection and mark page dirty. */
|
|
2001 void
|
|
2002 unprotect_page_and_mark_dirty (void *ptr)
|
|
2003 {
|
|
2004 page_header *ph = get_page_header (ptr);
|
|
2005 unprotect_heap_page (ph);
|
|
2006 PH_DIRTY_BIT (ph) = 1;
|
|
2007 }
|
|
2008
|
|
2009 /* Repush all objects on dirty pages onto the mark stack. */
|
|
2010 int
|
|
2011 repush_all_objects_on_page (void *ptr)
|
|
2012 {
|
|
2013 int repushed_objects = 0;
|
|
2014 page_header *ph = get_page_header (ptr);
|
|
2015 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
|
2016 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
2017 EMACS_INT mark_bit = 0;
|
|
2018 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
2019 unsigned int bit = 0;
|
|
2020 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
2021 {
|
|
2022 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
2023 if (bit == BLACK)
|
|
2024 {
|
|
2025 repushed_objects++;
|
|
2026 gc_write_barrier
|
|
2027 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit))));
|
|
2028 }
|
|
2029 }
|
|
2030 PH_BLACK_BIT (ph) = 0;
|
|
2031 PH_DIRTY_BIT (ph) = 0;
|
|
2032 return repushed_objects;
|
|
2033 }
|
|
2034
|
|
2035 /* Mark black if object is currently grey. This first checks, if the
|
|
2036 object is really allocated on the mc-heap. If it is, it can be
|
|
2037 marked black; if it is not, it cannot be marked. */
|
|
2038 EMACS_INT
|
|
2039 maybe_mark_black (void *ptr)
|
|
2040 {
|
|
2041 page_header *ph = get_page_header_internal (ptr);
|
|
2042 unsigned int bit = 0;
|
|
2043
|
|
2044 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph))
|
|
2045 {
|
|
2046 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
2047 if (bit == GREY)
|
|
2048 {
|
|
2049 if (!PH_BLACK_BIT (ph))
|
|
2050 PH_BLACK_BIT (ph) = 1;
|
|
2051 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK);
|
|
2052 }
|
|
2053 return 1;
|
|
2054 }
|
|
2055 return 0;
|
|
2056 }
|
|
2057
|
|
2058 /* Only for debugging --- not used anywhere in the sources. */
|
|
2059 EMACS_INT
|
|
2060 object_on_heap_p (void *ptr)
|
|
2061 {
|
|
2062 page_header *ph = get_page_header_internal (ptr);
|
|
2063 return (ph && PH_ON_USED_LIST_P (ph));
|
|
2064 }
|
|
2065
|
|
2066 #endif /* NEW_GC */
|