2720
|
1 /* New size-based allocator for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
3092
|
24
|
2720
|
25 #include "lisp.h"
|
|
26 #include "mc-alloc.h"
|
3092
|
27 #include "getpagesize.h"
|
|
28
|
|
29
|
|
30 #if 0
|
|
31 # define USE_MARK_BITS_FREE_LIST 1
|
|
32 #endif
|
|
33 #if 1
|
|
34 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1
|
|
35 #endif
|
|
36
|
|
37 /* Memory protection needs the real system-dependent pagesize. */
|
|
38 #ifndef WIN32_NATIVE
|
|
39 #include <unistd.h> /* for getpagesize () */
|
|
40 #endif
|
|
41 #if defined (HAVE_GETPAGESIZE)
|
|
42 # define SYS_PAGE_SIZE getpagesize ()
|
|
43 #elif defined (_SC_PAGESIZE)
|
|
44 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE)
|
|
45 #elif defined (_SC_PAGE_SIZE)
|
|
46 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE)
|
|
47 #elif defined(get_page_size)
|
|
48 # define SYS_PAGE_SIZE get_page_size ()
|
|
49 #elif defined(PAGESIZE)
|
|
50 # define SYS_PAGE_SIZE PAGESIZE
|
|
51 #elif defined(PAGE_SIZE)
|
|
52 # define SYS_PAGE_SIZE PAGE_SIZE
|
|
53 #else
|
|
54 /* Valid page sizes are powers of 2. */
|
|
55 # define SYS_PAGE_SIZE 4096
|
|
56 #endif
|
2720
|
57
|
|
58
|
|
59 /*--- configurable values ----------------------------------------------*/
|
|
60
|
|
61 /* Definition of size classes */
|
|
62
|
|
63 /* Heap used list constants: In the used heap, it is important to
|
|
64 quickly find a free spot for a new object. Therefore the size
|
|
65 classes of the used heap are defined by the size of the cells on
|
|
66 the pages. The size classes should match common object sizes, to
|
|
67 avoid wasting memory. */
|
|
68
|
|
69 /* Minimum object size in bytes. */
|
3092
|
70 #if BITS_PER_EMACS_INT > 32
|
|
71 # define USED_LIST_MIN_OBJECT_SIZE 16
|
|
72 #else
|
|
73 # define USED_LIST_MIN_OBJECT_SIZE 8
|
|
74 #endif
|
2720
|
75
|
|
76 /* The step size by which the size classes increase (up to upper
|
|
77 threshold). This many bytes are mapped to a single used list: */
|
3092
|
78 #if BITS_PER_EMACS_INT > 32
|
|
79 # define USED_LIST_LIN_STEP 8
|
|
80 #else
|
|
81 # define USED_LIST_LIN_STEP 4
|
|
82 #endif
|
2720
|
83
|
|
84 /* The upper threshold should always be set to PAGE_SIZE/2, because if
|
|
85 a object is larger than PAGE_SIZE/2 there is no room for any other
|
|
86 object on this page. Objects this big are kept in the page list of
|
|
87 the multiple pages, since a quick search for free spots is not
|
|
88 needed for this kind of pages (because there are no free spots).
|
|
89 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
|
3092
|
90 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
|
2720
|
91
|
|
92
|
|
93 /* Heap free list constants: In the unused heap, the size of
|
|
94 consecutive memory tips the scales. A page is smallest entity which
|
|
95 is asked for. Therefore, the size classes of the unused heap are
|
|
96 defined by the number of consecutive pages. */
|
|
97 /* Sizes up to this many pages each have their own free list. */
|
|
98 #define FREE_LIST_LOWER_THRESHOLD 32
|
|
99 /* The step size by which the size classes increase (up to upper
|
|
100 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a
|
|
101 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and
|
|
102 FREE_LIST_UPPER_THRESHOLD. */
|
|
103 #define FREE_LIST_LIN_STEP 8
|
|
104 /* Sizes of at least this many pages are mapped to a single free
|
|
105 list. Blocks of memory larger than this number are all kept in a
|
|
106 single list, which makes searching this list slow. But objects that
|
|
107 big are really seldom. */
|
|
108 #define FREE_LIST_UPPER_THRESHOLD 256
|
|
109
|
|
110
|
3092
|
111 /* used heap list count */
|
|
112 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
|
|
113 - USED_LIST_MIN_OBJECT_SIZE) \
|
|
114 / USED_LIST_LIN_STEP) + 1 ) + 1
|
|
115
|
|
116 /* free heap list count */
|
|
117 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
|
|
118 - FREE_LIST_LOWER_THRESHOLD) \
|
|
119 / FREE_LIST_LIN_STEP) \
|
|
120 + FREE_LIST_LOWER_THRESHOLD)
|
|
121
|
|
122
|
2720
|
123 /* Maximum number of separately added heap sections. */
|
|
124 #if BITS_PER_EMACS_INT > 32
|
|
125 # define MAX_HEAP_SECTS 2048
|
|
126 #else
|
|
127 # define MAX_HEAP_SECTS 768
|
|
128 #endif
|
|
129
|
|
130
|
|
131 /* Heap growth constants. Heap increases by any number between the
|
|
132 boundaries (unit is PAGE_SIZE). */
|
3092
|
133 #define MIN_HEAP_INCREASE 256
|
2720
|
134 #define MAX_HEAP_INCREASE 256 /* not used */
|
|
135
|
|
136 /* Every heap growth is calculated like this:
|
|
137 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
|
|
138 So the growth of the heap is influenced by the current size of the
|
|
139 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE
|
|
140 boundaries.
|
|
141 This reduces the number of heap sectors, the larger the heap grows
|
|
142 the larger are the newly allocated chunks. */
|
|
143 #define HEAP_GROWTH_DIVISOR 3
|
|
144
|
|
145
|
|
146 /* Zero memory before putting on free lists. */
|
|
147 #define ZERO_MEM 1
|
|
148
|
|
149
|
|
150 #ifndef CHAR_BIT /* should be included by limits.h */
|
|
151 # define CHAR_BIT BITS_PER_CHAR
|
|
152 #endif
|
|
153
|
|
154
|
3092
|
155
|
|
156 /*--- values depending on PAGE_SIZE ------------------------------------*/
|
2720
|
157
|
3092
|
158 /* initialized in init_mc_allocator () */
|
|
159 static EMACS_INT log_page_size;
|
|
160 static EMACS_INT page_size_div_2;
|
2720
|
161
|
3092
|
162 #undef PAGE_SIZE
|
|
163 #define PAGE_SIZE SYS_PAGE_SIZE
|
|
164 #define LOG_PAGE_SIZE log_page_size
|
|
165 #define PAGE_SIZE_DIV_2 page_size_div_2
|
2720
|
166
|
|
167
|
|
168 /* Constants for heap address to page header mapping. */
|
|
169 #define LOG_LEVEL2_SIZE 10
|
|
170 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
|
|
171 #if BITS_PER_EMACS_INT > 32
|
|
172 # define USE_HASH_TABLE 1
|
|
173 # define LOG_LEVEL1_SIZE 11
|
|
174 #else
|
|
175 # define LOG_LEVEL1_SIZE \
|
|
176 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE)
|
|
177 #endif
|
|
178 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE)
|
|
179
|
|
180 #ifdef USE_HASH_TABLE
|
|
181 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1))
|
|
182 # define L1_INDEX(p) HASH ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
183 #else
|
|
184 # define L1_INDEX(p) ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
185 #endif
|
|
186 #define L2_INDEX(p) (((EMACS_INT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1))
|
|
187
|
|
188
|
|
189
|
|
190
|
|
191 /*--- structs and typedefs ---------------------------------------------*/
|
|
192
|
3092
|
193 /* Links the free lists (mark_bit_free_list and cell free list). */
|
2720
|
194 typedef struct free_link
|
|
195 {
|
|
196 struct lrecord_header lheader;
|
|
197 struct free_link *next_free;
|
|
198 } free_link;
|
|
199
|
|
200
|
3092
|
201 /* Header for pages. They are held in a doubly linked list. */
|
2720
|
202 typedef struct page_header
|
|
203 {
|
|
204 struct page_header *next; /* next page_header */
|
|
205 struct page_header *prev; /* previous page_header */
|
|
206 /* Field plh holds pointer to the according header of the page list.*/
|
|
207 struct page_list_header *plh; /* page list header */
|
|
208 free_link *free_list; /* links free cells on page */
|
|
209 EMACS_INT n_pages; /* number of pages */
|
|
210 EMACS_INT cell_size; /* size of cells on page */
|
|
211 EMACS_INT cells_on_page; /* total number of cells on page */
|
|
212 EMACS_INT cells_used; /* number of used cells on page */
|
|
213 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
|
|
214 the mark bits are put in an extra memory area. Then the field
|
|
215 mark_bits holds the pointer to this area. Is the number of
|
|
216 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
|
|
217 mark_bit EMACS_INT directly, without an additional indirection. */
|
3092
|
218 unsigned int black_bit:1; /* objects on page are black */
|
|
219 unsigned int dirty_bit:1; /* page is dirty */
|
|
220 unsigned int protection_bit:1; /* page is write protected */
|
|
221 unsigned int array_bit:1; /* page holds arrays */
|
|
222 Rawbyte *mark_bits; /* pointer to mark bits */
|
2720
|
223 void *heap_space; /* pointer to heap, where objects
|
|
224 are stored */
|
|
225 } page_header;
|
|
226
|
|
227
|
|
228 /* Different list types. */
|
|
229 enum list_type_enum {
|
|
230 USED_LIST,
|
|
231 FREE_LIST
|
|
232 };
|
|
233
|
|
234
|
|
235 /* Header for page lists. Field list_type holds the type of the list. */
|
|
236 typedef struct page_list_header
|
|
237 {
|
|
238 enum list_type_enum list_type; /* the type of the list */
|
|
239 /* Size holds the size of one cell (in bytes) in a used heap list, or the
|
|
240 size of the heap sector (in number of pages). */
|
|
241 size_t size; /* size of one cell / heap sector */
|
|
242 page_header *first; /* first of page_header list */
|
|
243 page_header *last; /* last of page_header list */
|
|
244 /* If the number of objects on page is bigger than
|
|
245 BITS_PER_EMACS_INT, the mark bits are put in an extra memory
|
|
246 area, which is linked in this free list, if not used. Is the
|
|
247 number of objects smaller than BITS_PER_EMACS_INT, the mark bits
|
|
248 are hold in the mark bit EMACS_INT directly, without an
|
|
249 additional indirection. */
|
|
250 free_link *mark_bit_free_list;
|
|
251
|
|
252 #ifdef MEMORY_USAGE_STATS
|
|
253 EMACS_INT page_count; /* number if pages in list */
|
|
254 EMACS_INT used_cells; /* number of objects in list */
|
|
255 EMACS_INT used_space; /* used space */
|
|
256 EMACS_INT total_cells; /* number of available cells */
|
|
257 EMACS_INT total_space; /* available space */
|
|
258 #endif
|
|
259 } page_list_header;
|
|
260
|
|
261
|
|
262 /* The heap sectors are stored with their real start pointer and their
|
|
263 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */
|
|
264 typedef struct heap_sect {
|
|
265 void *real_start; /* real start pointer (NOT aligned) */
|
|
266 size_t real_size; /* NOT multiple of PAGE_SIZE */
|
|
267 void *start; /* aligned start pointer */
|
|
268 EMACS_INT n_pages; /* multiple of PAGE_SIZE */
|
|
269 } heap_sect;
|
|
270
|
|
271
|
|
272 /* 2nd tree level for mapping of heap addresses to page headers. */
|
|
273 typedef struct level_2_lookup_tree {
|
|
274 page_header *index[LEVEL2_SIZE]; /* link to page header */
|
|
275 EMACS_INT key; /* high order address bits */
|
|
276 #ifdef USE_HASH_TABLE
|
|
277 struct level_2_lookup_tree *hash_link; /* hash chain link */
|
|
278 #endif
|
|
279 } level_2_lookup_tree;
|
|
280
|
|
281
|
|
282
|
|
283 /*--- global variable definitions --------------------------------------*/
|
|
284
|
|
285 /* All global allocator variables are kept in this struct. */
|
|
286 typedef struct mc_allocator_globals_type {
|
|
287
|
|
288 /* heap size */
|
|
289 EMACS_INT heap_size;
|
|
290
|
|
291 /* list of all separatly allocated chunks of heap */
|
|
292 heap_sect heap_sections[MAX_HEAP_SECTS];
|
|
293 EMACS_INT n_heap_sections;
|
|
294
|
|
295 /* Holds all allocated pages, each object size class in its separate list,
|
|
296 to guarantee fast allocation on partially filled pages. */
|
3092
|
297 page_list_header *used_heap_pages;
|
2720
|
298
|
|
299 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
|
|
300 kept on the Nth free list. Contiguos pages are coalesced. */
|
|
301 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
|
|
302
|
|
303 /* ptr lookup table */
|
3092
|
304 level_2_lookup_tree **ptr_lookup_table;
|
2720
|
305
|
3092
|
306 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
307 /* page header free list */
|
|
308 free_link *page_header_free_list;
|
3092
|
309 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
310
|
|
311 #ifdef MEMORY_USAGE_STATS
|
|
312 EMACS_INT malloced_bytes;
|
|
313 #endif
|
|
314 } mc_allocator_globals_type;
|
|
315
|
|
316 mc_allocator_globals_type mc_allocator_globals;
|
|
317
|
|
318
|
|
319
|
|
320
|
|
321 /*--- macro accessors --------------------------------------------------*/
|
|
322
|
|
323 #define USED_HEAP_PAGES(i) \
|
|
324 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
|
|
325
|
|
326 #define FREE_HEAP_PAGES(i) \
|
|
327 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
|
|
328
|
|
329 #define PLH(plh) plh
|
|
330 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type
|
|
331 # define PLH_SIZE(plh) PLH (plh)->size
|
|
332 # define PLH_FIRST(plh) PLH (plh)->first
|
|
333 # define PLH_LAST(plh) PLH (plh)->last
|
|
334 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list
|
|
335 #ifdef MEMORY_USAGE_STATS
|
|
336 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count
|
|
337 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells
|
|
338 # define PLH_USED_SPACE(plh) PLH (plh)->used_space
|
|
339 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells
|
|
340 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space
|
|
341 #endif
|
|
342
|
|
343 #define PH(ph) ph
|
|
344 # define PH_NEXT(ph) PH (ph)->next
|
|
345 # define PH_PREV(ph) PH (ph)->prev
|
|
346 # define PH_PLH(ph) PH (ph)->plh
|
|
347 # define PH_FREE_LIST(ph) PH (ph)->free_list
|
|
348 # define PH_N_PAGES(ph) PH (ph)->n_pages
|
|
349 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
|
|
350 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
|
|
351 # define PH_CELLS_USED(ph) PH (ph)->cells_used
|
3092
|
352 # define PH_BLACK_BIT(ph) PH (ph)->black_bit
|
|
353 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit
|
|
354 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit
|
|
355 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit
|
2720
|
356 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
|
|
357 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
|
|
358 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
|
|
359 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
|
|
360
|
|
361 #define HEAP_SIZE mc_allocator_globals.heap_size
|
|
362
|
|
363 #ifdef MEMORY_USAGE_STATS
|
|
364 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes
|
|
365 #endif
|
|
366
|
|
367 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
|
|
368 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
|
|
369
|
3092
|
370 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
371 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
|
3092
|
372 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
373
|
|
374 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
|
|
375 #define FREE_LIST(free_list) (free_link*) (free_list)
|
|
376
|
|
377 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
|
|
378 #define LEVEL2(l2, i) l2->index[i]
|
|
379 # define LEVEL2_KEY(l2) l2->key
|
|
380 #ifdef USE_HASH_TABLE
|
|
381 # define LEVEL2_HASH_LINK(l2) l2->hash_link
|
|
382 #endif
|
|
383
|
|
384 #if ZERO_MEM
|
|
385 # define ZERO_HEAP_SPACE(ph) \
|
|
386 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE)
|
|
387 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header))
|
|
388 #endif
|
|
389
|
|
390 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE)
|
|
391 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE)
|
|
392
|
|
393 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1))))
|
|
394
|
|
395 #define PAGE_SIZE_ALIGNMENT(address) \
|
|
396 (void *) ((((EMACS_INT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1))
|
|
397
|
|
398 #define PH_ON_FREE_LIST_P(ph) \
|
|
399 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
|
|
400
|
|
401 #define PH_ON_USED_LIST_P(ph) \
|
|
402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
|
|
403
|
|
404
|
3092
|
405 /* Number of mark bits: minimum 1, maximum 8. */
|
|
406 #define N_MARK_BITS 2
|
2720
|
407
|
|
408
|
|
409
|
|
410 /************************************************************************/
|
|
411 /* MC Allocator */
|
|
412 /************************************************************************/
|
|
413
|
|
414
|
|
415 /*--- misc functions ---------------------------------------------------*/
|
|
416
|
|
417 /* moved here from alloc.c */
|
|
418 #ifdef ERROR_CHECK_GC
|
|
419 static void
|
|
420 deadbeef_memory (void *ptr, Bytecount size)
|
|
421 {
|
|
422 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr;
|
|
423 Bytecount beefs = size >> 2;
|
|
424
|
|
425 /* In practice, size will always be a multiple of four. */
|
|
426 while (beefs--)
|
|
427 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */
|
|
428 }
|
|
429 #endif /* ERROR_CHECK_GC */
|
|
430
|
|
431 /* Visits all pages (page_headers) hooked into the used heap pages
|
|
432 list and executes f with the current page header as
|
|
433 argument. Needed for sweep. */
|
|
434 static void
|
|
435 visit_all_used_page_headers (void (*f) (page_header *ph))
|
|
436 {
|
3092
|
437 EMACS_INT i;
|
2720
|
438 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
439 if (PLH_FIRST (USED_HEAP_PAGES (i)))
|
|
440 {
|
|
441 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
|
|
442 while (PH_NEXT (ph))
|
|
443 {
|
|
444 page_header *next = PH_NEXT (ph); /* in case f removes the page */
|
|
445 f (ph);
|
|
446 ph = next;
|
|
447 }
|
|
448 f (ph);
|
|
449 }
|
|
450 }
|
|
451
|
|
452
|
|
453
|
|
454
|
|
455 /*--- mapping of heap addresses to page headers and mark bits ----------*/
|
|
456
|
|
457 /* Sets a heap pointer and page header pair into the lookup table. */
|
|
458 static void
|
|
459 set_lookup_table (void *ptr, page_header *ph)
|
|
460 {
|
3092
|
461 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
462 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
463 #ifdef USE_HASH_TABLE
|
|
464 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
465 l2 = LEVEL2_HASH_LINK (l2);
|
|
466 #endif
|
|
467 if (!l2)
|
|
468 {
|
|
469 l2 = (level_2_lookup_tree*)
|
|
470 xmalloc_and_zero (sizeof (level_2_lookup_tree));
|
|
471 #ifdef MEMORY_USAGE_STATS
|
|
472 MC_MALLOCED_BYTES +=
|
|
473 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0);
|
|
474 #endif
|
2932
|
475 memset (l2, '\0', sizeof (level_2_lookup_tree));
|
2720
|
476 #ifdef USE_HASH_TABLE
|
|
477 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index);
|
|
478 #endif
|
|
479 PTR_LOOKUP_TABLE (l1_index) = l2;
|
|
480 LEVEL2_KEY (l2) = l1_index;
|
|
481 }
|
|
482 LEVEL2 (l2, L2_INDEX (ptr)) = ph;
|
|
483 }
|
|
484
|
|
485
|
|
486 #ifdef UNSET_LOOKUP_TABLE
|
|
487 /* Set the lookup table to 0 for given heap address. */
|
|
488 static void
|
|
489 unset_lookup_table (void *ptr)
|
|
490 {
|
3092
|
491 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
492 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
493 #ifdef USE_HASH_TABLE
|
|
494 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
495 l2 = LEVEL2_HASH_LINK (l2);
|
|
496 #endif
|
|
497 if (l2) {
|
|
498 LEVEL2 (l2, L2_INDEX (ptr)) = 0;
|
|
499 }
|
|
500 }
|
|
501 #endif
|
|
502
|
|
503 /* Returns the page header of a given heap address, or 0 if not in table.
|
|
504 For internal use, no error checking. */
|
|
505 static page_header *
|
|
506 get_page_header_internal (void *ptr)
|
|
507 {
|
3092
|
508 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
509 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
510 #ifdef USE_HASH_TABLE
|
|
511 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
512 l2 = LEVEL2_HASH_LINK (l2);
|
|
513 #endif
|
|
514 if (!l2)
|
|
515 return 0;
|
|
516 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
517 }
|
|
518
|
|
519 /* Returns the page header of a given heap address, or 0 if not in table. */
|
|
520 static page_header *
|
|
521 get_page_header (void *ptr)
|
|
522 {
|
3092
|
523 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
524 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
2723
|
525 assert (l2);
|
2720
|
526 #ifdef USE_HASH_TABLE
|
|
527 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
528 l2 = LEVEL2_HASH_LINK (l2);
|
|
529 #endif
|
2723
|
530 assert (LEVEL2 (l2, L2_INDEX (ptr)));
|
2720
|
531 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
532 }
|
|
533
|
|
534 /* Returns the mark bit index of a given heap address. */
|
|
535 static EMACS_INT
|
|
536 get_mark_bit_index (void *ptr, page_header *ph)
|
|
537 {
|
|
538 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
539 if (cell_size)
|
3092
|
540 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size)
|
|
541 * N_MARK_BITS;
|
2720
|
542 else /* only one object on page */
|
|
543 return 0;
|
|
544 }
|
|
545
|
|
546
|
|
547 /* Adds addresses of pages to lookup table. */
|
|
548 static void
|
|
549 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
|
|
550 {
|
3092
|
551 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
552 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
|
3092
|
553 for (p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
554 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
|
|
555 set_lookup_table (p, ph);
|
|
556 }
|
|
557
|
|
558
|
|
559 /* Initializes lookup table. */
|
|
560 static void
|
|
561 init_lookup_table (void)
|
|
562 {
|
3092
|
563 EMACS_INT i;
|
2720
|
564 for (i = 0; i < LEVEL1_SIZE; i++)
|
|
565 PTR_LOOKUP_TABLE (i) = 0;
|
|
566 }
|
|
567
|
|
568
|
|
569
|
|
570
|
|
571 /*--- mark bits --------------------------------------------------------*/
|
|
572
|
|
573 /*--- bit operations --- */
|
|
574
|
|
575 /* Allocates a bit array of length bits. */
|
3092
|
576 static Rawbyte *
|
2720
|
577 alloc_bit_array(size_t bits)
|
|
578 {
|
3092
|
579 Rawbyte *bit_array;
|
|
580 #ifdef USE_MARK_BITS_FREE_LIST
|
|
581 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte);
|
|
582 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
583 size_t size =
|
|
584 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte),
|
|
585 Rawbyte *);
|
|
586 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
587 if (size < sizeof (free_link)) size = sizeof (free_link);
|
|
588 #ifdef MEMORY_USAGE_STATS
|
|
589 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
|
|
590 #endif
|
3092
|
591 bit_array = (Rawbyte *) xmalloc_and_zero (size);
|
2720
|
592 return bit_array;
|
|
593 }
|
|
594
|
|
595
|
|
596 /* Returns the bit value at pos. */
|
|
597 static EMACS_INT
|
3092
|
598 get_bit (Rawbyte *bit_array, EMACS_INT pos)
|
2720
|
599 {
|
|
600 #if N_MARK_BITS > 1
|
|
601 EMACS_INT result = 0;
|
|
602 EMACS_INT i;
|
|
603 #endif
|
|
604 bit_array += pos / CHAR_BIT;
|
|
605 #if N_MARK_BITS > 1
|
|
606 for (i = 0; i < N_MARK_BITS; i++)
|
3092
|
607 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i;
|
|
608 return result;
|
2720
|
609 #else
|
|
610 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
|
|
611 #endif
|
|
612 }
|
|
613
|
|
614
|
|
615 /* Bit_Arrays bit at pos to val. */
|
|
616 static void
|
3092
|
617 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_INT val)
|
2720
|
618 {
|
|
619 #if N_MARK_BITS > 1
|
|
620 EMACS_INT i;
|
|
621 #endif
|
|
622 bit_array += pos / CHAR_BIT;
|
|
623 #if N_MARK_BITS > 1
|
|
624 for (i = 0; i < N_MARK_BITS; i++)
|
|
625 if ((val >> i) & 1)
|
|
626 *bit_array |= 1 << ((pos + i) % CHAR_BIT);
|
|
627 else
|
|
628 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT));
|
|
629 #else
|
|
630 if (val)
|
|
631 *bit_array |= 1 << (pos % CHAR_BIT);
|
|
632 else
|
|
633 *bit_array &= ~(1 << (pos % CHAR_BIT));
|
|
634 #endif
|
|
635 }
|
|
636
|
|
637
|
|
638 /*--- mark bit functions ---*/
|
3092
|
639 #define USE_PNTR_MARK_BITS(ph) \
|
|
640 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT)
|
|
641 #define USE_WORD_MARK_BITS(ph) \
|
|
642 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT)
|
2720
|
643
|
3092
|
644 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p)
|
2720
|
645 #define GET_BIT_PNTR(b, p) get_bit (b, p)
|
|
646
|
3092
|
647 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v)
|
2720
|
648 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
|
|
649
|
|
650 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
|
3092
|
651 #define ZERO_MARK_BITS_PNTR(ph) \
|
|
652 do { \
|
|
653 memset (PH_MARK_BITS (ph), '\0', \
|
|
654 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \
|
|
655 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \
|
2720
|
656 } while (0)
|
|
657
|
|
658 #define GET_BIT(bit, ph, p) \
|
|
659 do { \
|
|
660 if (USE_PNTR_MARK_BITS (ph)) \
|
|
661 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \
|
|
662 else \
|
|
663 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \
|
|
664 } while (0)
|
|
665
|
|
666 #define SET_BIT(ph, p, v) \
|
|
667 do { \
|
|
668 if (USE_PNTR_MARK_BITS (ph)) \
|
|
669 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \
|
|
670 else \
|
|
671 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \
|
|
672 } while (0)
|
|
673
|
|
674 #define ZERO_MARK_BITS(ph) \
|
|
675 do { \
|
|
676 if (USE_PNTR_MARK_BITS (ph)) \
|
|
677 ZERO_MARK_BITS_PNTR (ph); \
|
|
678 else \
|
|
679 ZERO_MARK_BITS_WORD (ph); \
|
|
680 } while (0)
|
|
681
|
|
682
|
|
683 /* Allocates mark-bit space either from a free list or from the OS
|
|
684 for the given page header. */
|
3092
|
685 static Rawbyte *
|
2720
|
686 alloc_mark_bits (page_header *ph)
|
|
687 {
|
3092
|
688 Rawbyte *result;
|
|
689 #ifdef USE_MARK_BITS_FREE_LIST
|
2720
|
690 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
|
3092
|
691 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
2720
|
692 else
|
|
693 {
|
3092
|
694 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph);
|
2720
|
695 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
|
|
696 }
|
3092
|
697 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
698 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
|
699 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
700 return result;
|
|
701 }
|
|
702
|
|
703
|
|
704 /* Frees by maintaining a free list. */
|
|
705 static void
|
|
706 free_mark_bits (page_header *ph)
|
|
707 {
|
3092
|
708 #ifdef USE_MARK_BITS_FREE_LIST
|
|
709 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
|
|
710 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
|
|
711 #else /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
712 if (PH_MARK_BITS (ph))
|
3092
|
713 free (PH_MARK_BITS (ph));
|
|
714 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
715 }
|
|
716
|
|
717
|
|
718 /* Installs mark bits and zeros bits. */
|
|
719 static void
|
|
720 install_mark_bits (page_header *ph)
|
|
721 {
|
|
722 if (USE_PNTR_MARK_BITS (ph))
|
|
723 {
|
|
724 PH_MARK_BITS (ph) = alloc_mark_bits (ph);
|
|
725 ZERO_MARK_BITS_PNTR (ph);
|
|
726 }
|
|
727 else
|
|
728 ZERO_MARK_BITS_WORD (ph);
|
|
729 }
|
|
730
|
|
731
|
|
732 /* Cleans and frees the mark bits of the given page_header. */
|
|
733 static void
|
|
734 remove_mark_bits (page_header *ph)
|
|
735 {
|
|
736 if (USE_PNTR_MARK_BITS (ph))
|
|
737 free_mark_bits (ph);
|
|
738 }
|
|
739
|
|
740
|
|
741 /* Zeros all mark bits in given header. */
|
|
742 static void
|
|
743 zero_mark_bits (page_header *ph)
|
|
744 {
|
|
745 ZERO_MARK_BITS (ph);
|
|
746 }
|
|
747
|
|
748
|
|
749 /* Returns mark bit for given heap pointer. */
|
|
750 EMACS_INT
|
|
751 get_mark_bit (void *ptr)
|
|
752 {
|
|
753 EMACS_INT bit = 0;
|
|
754 page_header *ph = get_page_header (ptr);
|
|
755 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph));
|
|
756 if (ph)
|
|
757 {
|
|
758 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
759 }
|
|
760 return bit;
|
|
761 }
|
|
762
|
|
763
|
|
764 /* Sets mark bit for given heap pointer. */
|
|
765 void
|
|
766 set_mark_bit (void *ptr, EMACS_INT value)
|
|
767 {
|
|
768 page_header *ph = get_page_header (ptr);
|
|
769 assert (ph && PH_ON_USED_LIST_P (ph));
|
|
770 if (ph)
|
|
771 {
|
3092
|
772 if (value == BLACK)
|
|
773 if (!PH_BLACK_BIT (ph))
|
|
774 PH_BLACK_BIT (ph) = 1;
|
2720
|
775 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
|
|
776 }
|
|
777 }
|
|
778
|
|
779
|
|
780
|
|
781
|
|
782 /*--- page header functions --------------------------------------------*/
|
|
783
|
3092
|
784 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
785 #include "blocktype.h"
|
|
786
|
|
787 struct page_header_blocktype
|
|
788 {
|
|
789 Blocktype_declare (page_header);
|
|
790 } *the_page_header_blocktype;
|
|
791 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
|
792
|
2720
|
793 /* Allocates a page header either from a free list or from the OS. */
|
|
794 static page_header *
|
|
795 alloc_page_header (void)
|
|
796 {
|
3092
|
797 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
798 page_header *result;
|
|
799 #ifdef MEMORY_USAGE_STATS
|
|
800 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
801 #endif
|
|
802 result = Blocktype_alloc (the_page_header_blocktype);
|
|
803 ZERO_PAGE_HEADER (result);
|
|
804 return result;
|
|
805 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
806 page_header *result;
|
|
807 if (PAGE_HEADER_FREE_LIST == 0)
|
|
808 {
|
|
809 result =
|
|
810 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
|
|
811 #ifdef MEMORY_USAGE_STATS
|
|
812 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
813 #endif
|
|
814 }
|
|
815 else
|
|
816 {
|
|
817 result = (page_header*) PAGE_HEADER_FREE_LIST;
|
|
818 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
|
|
819 }
|
|
820 return result;
|
3092
|
821 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
822 }
|
|
823
|
|
824
|
|
825 /* Frees given page header by maintaining a free list. */
|
|
826 static void
|
|
827 free_page_header (page_header *ph)
|
|
828 {
|
3092
|
829 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
830 Blocktype_free (the_page_header_blocktype, ph);
|
|
831 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
832 #if ZERO_MEM
|
|
833 ZERO_PAGE_HEADER (ph);
|
|
834 #endif
|
|
835 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
|
|
836 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
|
3092
|
837 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
838 }
|
|
839
|
|
840
|
|
841 /* Adds given page header to given page list header's list. */
|
|
842 static void
|
|
843 add_page_header_to_plh (page_header *ph, page_list_header *plh)
|
|
844 {
|
|
845 /* insert at the front of the list */
|
|
846 PH_PREV (ph) = 0;
|
|
847 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
848 PH_PLH (ph) = plh;
|
|
849 /* if list is not empty, set prev in the first element */
|
|
850 if (PLH_FIRST (plh))
|
|
851 PH_PREV (PLH_FIRST (plh)) = ph;
|
|
852 /* one element in list is first and last at the same time */
|
|
853 PLH_FIRST (plh) = ph;
|
|
854 if (!PLH_LAST (plh))
|
|
855 PLH_LAST (plh) = ph;
|
|
856
|
|
857 #ifdef MEMORY_USAGE_STATS
|
|
858 /* bump page count */
|
|
859 PLH_PAGE_COUNT (plh)++;
|
|
860 #endif
|
|
861
|
|
862 }
|
|
863
|
|
864
|
|
865 /* Removes given page header from given page list header's list. */
|
|
866 static void
|
|
867 remove_page_header_from_plh (page_header *ph, page_list_header *plh)
|
|
868 {
|
|
869 if (PLH_FIRST (plh) == ph)
|
|
870 PLH_FIRST (plh) = PH_NEXT (ph);
|
|
871 if (PLH_LAST (plh) == ph)
|
|
872 PLH_LAST (plh) = PH_PREV (ph);
|
|
873 if (PH_NEXT (ph))
|
|
874 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
875 if (PH_PREV (ph))
|
|
876 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
877
|
|
878 #ifdef MEMORY_USAGE_STATS
|
|
879 /* decrease page count */
|
|
880 PLH_PAGE_COUNT (plh)--;
|
|
881 #endif
|
|
882 }
|
|
883
|
|
884
|
|
885 /* Moves a page header to the front of its the page header list.
|
|
886 This is used during sweep: Pages with some alive objects are moved to
|
|
887 the front. This makes allocation faster, all pages with free slots
|
|
888 can be found at the front of the list. */
|
|
889 static void
|
|
890 move_page_header_to_front (page_header *ph)
|
|
891 {
|
|
892 page_list_header *plh = PH_PLH (ph);
|
|
893 /* is page already first? */
|
|
894 if (ph == PLH_FIRST (plh)) return;
|
|
895 /* remove from list */
|
|
896 if (PLH_LAST (plh) == ph)
|
|
897 PLH_LAST (plh) = PH_PREV (ph);
|
|
898 if (PH_NEXT (ph))
|
|
899 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
900 if (PH_PREV (ph))
|
|
901 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
902 /* insert at the front */
|
|
903 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
904 PH_PREV (ph) = 0;
|
|
905 PH_PREV (PH_NEXT (ph)) = ph;
|
|
906 PLH_FIRST (plh) = ph;
|
|
907 }
|
|
908
|
|
909
|
|
910
|
|
911
|
|
912 /*--- page list functions ----------------------------------------------*/
|
|
913
|
|
914 /* Returns the index of the used heap list according to given size. */
|
|
915 static int
|
|
916 get_used_list_index (size_t size)
|
|
917 {
|
|
918 if (size <= USED_LIST_MIN_OBJECT_SIZE)
|
3092
|
919 {
|
|
920 // printf ("size %d -> index %d\n", size, 0);
|
|
921 return 0;
|
|
922 }
|
|
923 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD)
|
|
924 {
|
|
925 // printf ("size %d -> index %d\n", size,
|
|
926 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
927 // / USED_LIST_LIN_STEP) + 1);
|
|
928 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
929 / USED_LIST_LIN_STEP) + 1;
|
|
930 }
|
|
931 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1);
|
2720
|
932 return N_USED_PAGE_LISTS - 1;
|
|
933 }
|
|
934
|
|
935 /* Returns the size of the used heap list according to given index. */
|
|
936 static size_t
|
|
937 get_used_list_size_value (int used_index)
|
|
938 {
|
|
939 if (used_index < N_USED_PAGE_LISTS - 1)
|
|
940 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
|
|
941 return 0;
|
|
942 }
|
|
943
|
|
944
|
|
945 /* Returns the index of the free heap list according to given size. */
|
3092
|
946 static EMACS_INT
|
2720
|
947 get_free_list_index (EMACS_INT n_pages)
|
|
948 {
|
|
949 if (n_pages == 0)
|
|
950 return 0;
|
|
951 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
|
|
952 return n_pages - 1;
|
|
953 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1)
|
|
954 return N_FREE_PAGE_LISTS - 1;
|
|
955 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1)
|
|
956 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
957
|
|
958 }
|
|
959
|
|
960
|
|
961 /* Returns the size in number of pages of the given free list at index. */
|
|
962 static size_t
|
3092
|
963 get_free_list_size_value (EMACS_INT free_index)
|
2720
|
964 {
|
|
965 if (free_index < FREE_LIST_LOWER_THRESHOLD)
|
|
966 return free_index + 1;
|
|
967 if (free_index >= N_FREE_PAGE_LISTS)
|
|
968 return FREE_LIST_UPPER_THRESHOLD;
|
|
969 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD)
|
|
970 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
971 }
|
|
972
|
|
973
|
|
974 #ifdef MEMORY_USAGE_STATS
|
|
975 Bytecount
|
|
976 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats)
|
|
977 {
|
|
978 size_t used_size =
|
|
979 get_used_list_size_value (get_used_list_index (claimed_size));
|
|
980 if (used_size == 0)
|
|
981 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size));
|
|
982
|
|
983 if (stats)
|
|
984 {
|
|
985 stats->was_requested += claimed_size;
|
|
986 stats->malloc_overhead += used_size - claimed_size;
|
|
987 }
|
|
988
|
|
989 return used_size;
|
|
990 }
|
|
991 #endif /* not MEMORY_USAGE_STATS */
|
|
992
|
|
993
|
|
994
|
|
995 /*--- free heap functions ----------------------------------------------*/
|
|
996
|
|
997 /* Frees a heap section, if the heap_section is completly free */
|
|
998 static EMACS_INT
|
|
999 free_heap_section (page_header *ph)
|
|
1000 {
|
3092
|
1001 EMACS_INT i;
|
|
1002 EMACS_INT removed = 0;
|
2720
|
1003 for (i = 0; i < N_HEAP_SECTIONS; i++)
|
|
1004 if (!removed)
|
|
1005 {
|
|
1006 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
|
|
1007 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
|
|
1008 {
|
|
1009 xfree_1 (HEAP_SECTION(i).real_start);
|
|
1010 #ifdef MEMORY_USAGE_STATS
|
|
1011 MC_MALLOCED_BYTES
|
|
1012 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1013 #endif
|
|
1014
|
|
1015 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1016
|
|
1017 removed = 1;
|
|
1018 }
|
|
1019 }
|
|
1020 else
|
|
1021 {
|
|
1022 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start;
|
|
1023 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size;
|
|
1024 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start;
|
|
1025 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages;
|
|
1026 }
|
|
1027
|
|
1028 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed;
|
|
1029
|
|
1030 return removed;
|
|
1031 }
|
|
1032
|
|
1033 /* Removes page from free list. */
|
|
1034 static void
|
|
1035 remove_page_from_free_list (page_header *ph)
|
|
1036 {
|
|
1037 remove_page_header_from_plh (ph, PH_PLH (ph));
|
|
1038 PH_PLH (ph) = 0;
|
|
1039 }
|
|
1040
|
|
1041
|
|
1042 /* Adds page to according free list. */
|
|
1043 static void
|
|
1044 add_page_to_free_list (page_header *ph)
|
|
1045 {
|
|
1046 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph)));
|
|
1047 add_page_header_to_plh (ph, PH_PLH (ph));
|
|
1048 }
|
|
1049
|
|
1050
|
|
1051 /* Merges two adjacent pages. */
|
|
1052 static page_header *
|
|
1053 merge_pages (page_header *first_ph, page_header *second_ph)
|
|
1054 {
|
|
1055 /* merge */
|
|
1056 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph);
|
|
1057 /* clean up left over page header */
|
|
1058 free_page_header (second_ph);
|
|
1059 /* update lookup table */
|
|
1060 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph));
|
|
1061
|
|
1062 return first_ph;
|
|
1063 }
|
|
1064
|
|
1065
|
|
1066 /* Checks if pages are adjacent, merges them, and adds merged page to
|
|
1067 free list */
|
|
1068 static void
|
|
1069 merge_into_free_list (page_header *ph)
|
|
1070 {
|
|
1071 /* check if you can coalesce adjacent pages */
|
|
1072 page_header *prev_ph =
|
|
1073 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1074 - PAGE_SIZE));
|
|
1075 page_header *succ_ph =
|
|
1076 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1077 + (PH_N_PAGES (ph) * PAGE_SIZE)));
|
|
1078 if (PH_ON_FREE_LIST_P (prev_ph))
|
|
1079 {
|
|
1080 remove_page_from_free_list (prev_ph);
|
|
1081 ph = merge_pages (prev_ph, ph);
|
|
1082 }
|
|
1083 if (PH_ON_FREE_LIST_P (succ_ph))
|
|
1084 {
|
|
1085 remove_page_from_free_list (succ_ph);
|
|
1086 ph = merge_pages (ph, succ_ph);
|
|
1087 }
|
|
1088 /* try to free heap_section, if the section is complete */
|
|
1089 if (!free_heap_section (ph))
|
|
1090 /* else add merged page to free list */
|
|
1091 add_page_to_free_list (ph);
|
|
1092 }
|
|
1093
|
|
1094
|
|
1095 /* Cuts given page header after n_pages, returns the first (cut) part, and
|
|
1096 puts the rest on the free list. */
|
|
1097 static page_header *
|
|
1098 split_page (page_header *ph, EMACS_INT n_pages)
|
|
1099 {
|
|
1100 page_header *new_ph;
|
|
1101 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages;
|
|
1102
|
|
1103 /* remove the page from the free list if already hooked in */
|
|
1104 if (PH_PLH (ph))
|
|
1105 remove_page_from_free_list (ph);
|
|
1106 /* set new number of pages */
|
|
1107 PH_N_PAGES (ph) = n_pages;
|
|
1108 /* add new page to lookup table */
|
|
1109 add_pages_to_lookup_table (ph, n_pages);
|
|
1110
|
|
1111 if (rem_pages)
|
|
1112 {
|
|
1113 /* build new page with reminder */
|
|
1114 new_ph = alloc_page_header ();
|
|
1115 PH_N_PAGES (new_ph) = rem_pages;
|
|
1116 PH_HEAP_SPACE (new_ph) =
|
|
1117 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE));
|
|
1118 /* add new page to lookup table */
|
|
1119 add_pages_to_lookup_table (new_ph, rem_pages);
|
|
1120 /* hook the rest into free list */
|
|
1121 add_page_to_free_list (new_ph);
|
|
1122 }
|
|
1123 return ph;
|
|
1124 }
|
|
1125
|
|
1126
|
|
1127 /* Expands the heap by given number of pages. */
|
|
1128 static page_header *
|
|
1129 expand_heap (EMACS_INT needed_pages)
|
|
1130 {
|
|
1131 page_header *ph;
|
|
1132 EMACS_INT n_pages;
|
|
1133 size_t real_size;
|
|
1134 void *real_start;
|
|
1135
|
|
1136 /* determine number of pages the heap should grow */
|
|
1137 n_pages = needed_pages + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR));
|
|
1138 if (n_pages < MIN_HEAP_INCREASE)
|
|
1139 n_pages = MIN_HEAP_INCREASE;
|
|
1140
|
|
1141 /* get the real values */
|
|
1142 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE;
|
|
1143 real_start = xmalloc_and_zero (real_size);
|
|
1144 #ifdef MEMORY_USAGE_STATS
|
|
1145 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0);
|
|
1146 #endif
|
|
1147
|
|
1148 /* maintain heap section count */
|
|
1149 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS)
|
|
1150 {
|
|
1151 stderr_out ("Increase number of MAX_HEAP_SECTS");
|
|
1152 ABORT ();
|
|
1153 }
|
|
1154 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start;
|
|
1155 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size;
|
|
1156 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1157 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages;
|
|
1158 N_HEAP_SECTIONS ++;
|
|
1159
|
|
1160 /* get page header */
|
|
1161 ph = alloc_page_header ();
|
|
1162
|
|
1163 /* setup page header */
|
|
1164 PH_N_PAGES (ph) = n_pages;
|
|
1165 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1166 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0);
|
|
1167 HEAP_SIZE += n_pages * PAGE_SIZE;
|
|
1168
|
|
1169 /* this was also done by allocate_lisp_storage */
|
|
1170 if (need_to_check_c_alloca)
|
|
1171 xemacs_c_alloca (0);
|
|
1172
|
|
1173 /* return needed size, put rest on free list */
|
|
1174 return split_page (ph, needed_pages);
|
|
1175 }
|
|
1176
|
|
1177
|
|
1178
|
|
1179
|
|
1180 /*--- used heap functions ----------------------------------------------*/
|
|
1181 /* Installs initial free list. */
|
|
1182 static void
|
3092
|
1183 install_cell_free_list (page_header *ph, EMACS_INT elemcount)
|
2720
|
1184 {
|
3092
|
1185 Rawbyte *p;
|
|
1186 EMACS_INT i;
|
2720
|
1187 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
1188 /* write initial free list if cell_size is < PAGE_SIZE */
|
3092
|
1189 p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1190 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
|
|
1191 {
|
|
1192 #ifdef ERROR_CHECK_GC
|
|
1193 assert (!LRECORD_FREE_P (p));
|
|
1194 MARK_LRECORD_AS_FREE (p);
|
|
1195 #endif
|
3092
|
1196 if (elemcount == 1)
|
|
1197 NEXT_FREE (p) = FREE_LIST (p + cell_size);
|
2720
|
1198 set_lookup_table (p, ph);
|
3092
|
1199 p += cell_size;
|
2720
|
1200 }
|
|
1201 #ifdef ERROR_CHECK_GC
|
|
1202 assert (!LRECORD_FREE_P (p));
|
|
1203 MARK_LRECORD_AS_FREE (p);
|
|
1204 #endif
|
|
1205 NEXT_FREE (p) = 0;
|
|
1206 set_lookup_table (p, ph);
|
|
1207
|
|
1208 /* hook free list into header */
|
|
1209 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph));
|
|
1210 }
|
|
1211
|
|
1212
|
|
1213 /* Cleans the object space of the given page_header. */
|
|
1214 static void
|
|
1215 remove_cell_free_list (page_header *ph)
|
|
1216 {
|
|
1217 #if ZERO_MEM
|
|
1218 ZERO_HEAP_SPACE (ph);
|
|
1219 #endif
|
|
1220 PH_FREE_LIST (ph) = 0;
|
|
1221 }
|
|
1222
|
|
1223
|
|
1224 /* Installs a new page and hooks it into given page_list_header. */
|
|
1225 static page_header *
|
|
1226 install_page_in_used_list (page_header *ph, page_list_header *plh,
|
3092
|
1227 size_t size, EMACS_INT elemcount)
|
2720
|
1228 {
|
|
1229 /* add to list */
|
|
1230 add_page_header_to_plh (ph, plh);
|
|
1231
|
|
1232 /* determine cell size */
|
|
1233 if (PLH_SIZE (plh))
|
|
1234 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
|
|
1235 else
|
|
1236 PH_CELL_SIZE (ph) = size;
|
3092
|
1237 if (elemcount == 1)
|
|
1238 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
|
|
1239 else
|
|
1240 {
|
|
1241 PH_CELLS_ON_PAGE (ph) = elemcount;
|
|
1242 PH_ARRAY_BIT (ph) = 1;
|
|
1243 }
|
2720
|
1244
|
|
1245 /* init cell count */
|
|
1246 PH_CELLS_USED (ph) = 0;
|
|
1247
|
|
1248 /* install mark bits and initialize cell free list */
|
3092
|
1249 install_mark_bits (ph);
|
2720
|
1250
|
3092
|
1251 install_cell_free_list (ph, elemcount);
|
2720
|
1252
|
|
1253 #ifdef MEMORY_USAGE_STATS
|
|
1254 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
|
|
1255 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
|
|
1256 #endif
|
|
1257
|
|
1258 return ph;
|
|
1259 }
|
|
1260
|
|
1261
|
|
1262 /* Cleans and frees a page, identified by the given page_header. */
|
|
1263 static void
|
|
1264 remove_page_from_used_list (page_header *ph)
|
|
1265 {
|
|
1266 page_list_header *plh = PH_PLH (ph);
|
|
1267
|
3092
|
1268 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT();
|
|
1269 /* cleanup: remove memory protection, zero page_header bits. */
|
|
1270
|
2720
|
1271 #ifdef MEMORY_USAGE_STATS
|
|
1272 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
|
|
1273 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
|
|
1274 #endif
|
|
1275
|
|
1276 /* clean up mark bits and cell free list */
|
|
1277 remove_cell_free_list (ph);
|
|
1278 if (PH_ON_USED_LIST_P (ph))
|
|
1279 remove_mark_bits (ph);
|
|
1280
|
|
1281 /* clean up page header */
|
|
1282 PH_CELL_SIZE (ph) = 0;
|
|
1283 PH_CELLS_ON_PAGE (ph) = 0;
|
|
1284 PH_CELLS_USED (ph) = 0;
|
|
1285
|
|
1286 /* remove from used list */
|
|
1287 remove_page_header_from_plh (ph, plh);
|
|
1288
|
|
1289 /* move to free list */
|
|
1290 merge_into_free_list (ph);
|
|
1291 }
|
|
1292
|
|
1293
|
|
1294
|
|
1295
|
|
1296 /*--- allocation -------------------------------------------------------*/
|
|
1297
|
|
1298 /* Allocates from cell free list on already allocated pages. */
|
|
1299 static page_header *
|
|
1300 allocate_cell (page_list_header *plh)
|
|
1301 {
|
|
1302 page_header *ph = PLH_FIRST (plh);
|
|
1303 if (ph)
|
|
1304 {
|
|
1305 if (PH_FREE_LIST (ph))
|
|
1306 /* elements free on first page */
|
|
1307 return ph;
|
|
1308 else if ((PH_NEXT (ph))
|
|
1309 && (PH_FREE_LIST (PH_NEXT (ph))))
|
|
1310 /* elements free on second page */
|
|
1311 {
|
|
1312 page_header *temp = PH_NEXT (ph);
|
|
1313 /* move full page (first page) to end of list */
|
|
1314 PH_NEXT (PLH_LAST (plh)) = ph;
|
|
1315 PH_PREV (ph) = PLH_LAST (plh);
|
|
1316 PLH_LAST (plh) = ph;
|
|
1317 PH_NEXT (ph) = 0;
|
|
1318 /* install second page as first page */
|
|
1319 ph = temp;
|
|
1320 PH_PREV (ph) = 0;
|
|
1321 PLH_FIRST (plh) = ph;
|
|
1322 return ph;
|
|
1323 }
|
|
1324 }
|
|
1325 return 0;
|
|
1326 }
|
|
1327
|
|
1328
|
|
1329 /* Finds a page which has at least the needed number of pages.
|
|
1330 Algorithm: FIRST FIT. */
|
|
1331 static page_header *
|
|
1332 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph)
|
|
1333 {
|
|
1334 while (ph)
|
|
1335 {
|
|
1336 if (PH_N_PAGES (ph) >= needed_pages)
|
|
1337 return ph;
|
|
1338 ph = PH_NEXT (ph);
|
|
1339 }
|
|
1340 return 0;
|
|
1341 }
|
|
1342
|
|
1343
|
|
1344 /* Allocates a page from the free list. */
|
|
1345 static page_header *
|
|
1346 allocate_page_from_free_list (EMACS_INT needed_pages)
|
|
1347 {
|
|
1348 page_header *ph = 0;
|
3092
|
1349 EMACS_INT i;
|
2720
|
1350 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
|
|
1351 if ((ph = find_free_page_first_fit (needed_pages,
|
|
1352 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
|
|
1353 {
|
|
1354 if (PH_N_PAGES (ph) > needed_pages)
|
|
1355 return split_page (ph, needed_pages);
|
|
1356 else
|
|
1357 {
|
|
1358 remove_page_from_free_list (ph);
|
|
1359 return ph;
|
|
1360 }
|
|
1361 }
|
|
1362 return 0;
|
|
1363 }
|
|
1364
|
|
1365
|
|
1366 /* Allocates a new page, either from free list or by expanding the heap. */
|
|
1367 static page_header *
|
3092
|
1368 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount)
|
2720
|
1369 {
|
3092
|
1370 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount);
|
2720
|
1371 /* first check free list */
|
|
1372 page_header *result = allocate_page_from_free_list (needed_pages);
|
|
1373 if (!result)
|
|
1374 /* expand heap */
|
|
1375 result = expand_heap (needed_pages);
|
3092
|
1376 install_page_in_used_list (result, plh, size, elemcount);
|
2720
|
1377 return result;
|
|
1378 }
|
|
1379
|
|
1380
|
|
1381 /* Selects the correct size class, tries to allocate a cell of this size
|
|
1382 from the free list, if this fails, a new page is allocated. */
|
|
1383 static void *
|
3092
|
1384 mc_alloc_1 (size_t size, EMACS_INT elemcount)
|
2720
|
1385 {
|
|
1386 page_list_header *plh = 0;
|
2723
|
1387 page_header *ph = 0;
|
|
1388 void *result = 0;
|
|
1389
|
3092
|
1390 plh = USED_HEAP_PAGES (get_used_list_index (size));
|
2720
|
1391
|
|
1392 if (size == 0)
|
|
1393 return 0;
|
3092
|
1394 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2))
|
2720
|
1395 /* first check any free cells */
|
|
1396 ph = allocate_cell (plh);
|
|
1397 if (!ph)
|
|
1398 /* allocate a new page */
|
3092
|
1399 ph = allocate_new_page (plh, size, elemcount);
|
2720
|
1400
|
|
1401 /* return first element of free list and remove it from the list */
|
|
1402 result = (void*) PH_FREE_LIST (ph);
|
|
1403 PH_FREE_LIST (ph) =
|
|
1404 NEXT_FREE (PH_FREE_LIST (ph));
|
|
1405
|
3092
|
1406 memset (result, '\0', (size * elemcount));
|
|
1407 MARK_LRECORD_AS_FREE (result);
|
2720
|
1408
|
|
1409 /* bump used cells counter */
|
3092
|
1410 PH_CELLS_USED (ph) += elemcount;
|
2720
|
1411
|
|
1412 #ifdef MEMORY_USAGE_STATS
|
3092
|
1413 PLH_USED_CELLS (plh) += elemcount;
|
|
1414 PLH_USED_SPACE (plh) += size * elemcount;
|
2720
|
1415 #endif
|
|
1416
|
|
1417 return result;
|
|
1418 }
|
|
1419
|
3092
|
1420 /* Array allocation. */
|
|
1421 void *
|
|
1422 mc_alloc_array (size_t size, EMACS_INT elemcount)
|
|
1423 {
|
|
1424 return mc_alloc_1 (size, elemcount);
|
|
1425 }
|
|
1426
|
2720
|
1427 void *
|
|
1428 mc_alloc (size_t size)
|
|
1429 {
|
|
1430 return mc_alloc_1 (size, 1);
|
|
1431 }
|
|
1432
|
|
1433
|
|
1434
|
|
1435 /*--- sweep & free & finalize-------------------------------------------*/
|
|
1436
|
|
1437 /* Frees a heap pointer. */
|
|
1438 static void
|
|
1439 remove_cell (void *ptr, page_header *ph)
|
|
1440 {
|
|
1441 #ifdef MEMORY_USAGE_STATS
|
|
1442 PLH_USED_CELLS (PH_PLH (ph))--;
|
|
1443 if (PH_ON_USED_LIST_P (ph))
|
|
1444 PLH_USED_SPACE (PH_PLH (ph)) -=
|
|
1445 detagged_lisp_object_size ((const struct lrecord_header *) ptr);
|
|
1446 else
|
|
1447 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph);
|
|
1448 #endif
|
2775
|
1449 if (PH_ON_USED_LIST_P (ph))
|
|
1450 {
|
2994
|
1451 #ifdef ALLOC_TYPE_STATS
|
2775
|
1452 dec_lrecord_stats (PH_CELL_SIZE (ph),
|
|
1453 (const struct lrecord_header *) ptr);
|
2994
|
1454 #endif /* ALLOC_TYPE_STATS */
|
2775
|
1455 #ifdef ERROR_CHECK_GC
|
|
1456 assert (!LRECORD_FREE_P (ptr));
|
|
1457 deadbeef_memory (ptr, PH_CELL_SIZE (ph));
|
|
1458 MARK_LRECORD_AS_FREE (ptr);
|
2720
|
1459 #endif
|
2775
|
1460 }
|
2720
|
1461
|
|
1462 /* hooks cell into free list */
|
|
1463 NEXT_FREE (ptr) = PH_FREE_LIST (ph);
|
|
1464 PH_FREE_LIST (ph) = FREE_LIST (ptr);
|
|
1465 /* decrease cells used */
|
|
1466 PH_CELLS_USED (ph)--;
|
|
1467 }
|
|
1468
|
|
1469
|
|
1470 /* Mark free list marks all free list entries. */
|
|
1471 static void
|
|
1472 mark_free_list (page_header *ph)
|
|
1473 {
|
|
1474 free_link *fl = PH_FREE_LIST (ph);
|
|
1475 while (fl)
|
|
1476 {
|
3092
|
1477 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK);
|
2720
|
1478 fl = NEXT_FREE (fl);
|
|
1479 }
|
|
1480 }
|
|
1481
|
|
1482
|
|
1483 /* Finalize a page for disksave. XEmacs calls this routine before it
|
|
1484 dumps the heap image. You have to tell mc-alloc how to call your
|
|
1485 object's finalizer for disksave. Therefore, you have to define the
|
|
1486 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should
|
|
1487 do nothing else then test if there is a finalizer and call it on
|
|
1488 the given argument, which is the heap address of the object. */
|
|
1489 static void
|
|
1490 finalize_page_for_disksave (page_header *ph)
|
|
1491 {
|
|
1492 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1493 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1494 EMACS_INT mark_bit = 0;
|
|
1495 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1496
|
|
1497 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1498 {
|
|
1499 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1500 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
|
|
1501 }
|
|
1502 }
|
|
1503
|
|
1504
|
|
1505 /* Finalizes the heap for disksave. */
|
|
1506 void
|
|
1507 mc_finalize_for_disksave (void)
|
|
1508 {
|
|
1509 visit_all_used_page_headers (finalize_page_for_disksave);
|
|
1510 }
|
|
1511
|
|
1512
|
|
1513 /* Sweeps a page: all the non-marked cells are freed. If the page is empty
|
|
1514 in the end, it is removed. If some cells are free, it is moved to the
|
|
1515 front of its page header list. Full pages stay where they are. */
|
|
1516 static void
|
|
1517 sweep_page (page_header *ph)
|
|
1518 {
|
3092
|
1519 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1520 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1521 EMACS_INT mark_bit = 0;
|
|
1522 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
3092
|
1523 unsigned int bit = 0;
|
2720
|
1524
|
|
1525 mark_free_list (ph);
|
|
1526
|
3092
|
1527 /* ARRAY_BIT_HACK */
|
|
1528 if (PH_ARRAY_BIT (ph))
|
|
1529 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1530 {
|
|
1531 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1532 if (bit)
|
|
1533 {
|
|
1534 zero_mark_bits (ph);
|
|
1535 PH_BLACK_BIT (ph) = 0;
|
|
1536 return;
|
|
1537 }
|
|
1538 }
|
|
1539
|
2720
|
1540 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1541 {
|
3092
|
1542 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1543 if (bit == WHITE)
|
2720
|
1544 {
|
3092
|
1545 GC_STAT_FREED;
|
2720
|
1546 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
|
|
1547 }
|
|
1548 }
|
|
1549 zero_mark_bits (ph);
|
3092
|
1550 PH_BLACK_BIT (ph) = 0;
|
2720
|
1551 if (PH_CELLS_USED (ph) == 0)
|
|
1552 remove_page_from_used_list (ph);
|
|
1553 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1554 move_page_header_to_front (ph);
|
|
1555 }
|
|
1556
|
|
1557
|
|
1558 /* Sweeps the heap. */
|
|
1559 void
|
|
1560 mc_sweep (void)
|
|
1561 {
|
|
1562 visit_all_used_page_headers (sweep_page);
|
|
1563 }
|
|
1564
|
|
1565
|
|
1566 /* Frees the cell pointed to by ptr. */
|
|
1567 void
|
3263
|
1568 mc_free (void *UNUSED (ptr))
|
2720
|
1569 {
|
3263
|
1570 /* Manual frees are not allowed with asynchronous finalization */
|
|
1571 return;
|
2720
|
1572 }
|
|
1573
|
|
1574
|
|
1575 /* Changes the size of the cell pointed to by ptr.
|
|
1576 Returns the new address of the new cell with new size. */
|
|
1577 void *
|
3092
|
1578 mc_realloc_1 (void *ptr, size_t size, int elemcount)
|
2720
|
1579 {
|
|
1580 if (ptr)
|
|
1581 {
|
3092
|
1582 if (size * elemcount)
|
2720
|
1583 {
|
3092
|
1584 void *result = mc_alloc_1 (size, elemcount);
|
2720
|
1585 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
|
3092
|
1586 size_t cpy_size = size * elemcount;
|
|
1587 if (cpy_size > from_size)
|
2720
|
1588 cpy_size = from_size;
|
|
1589 memcpy (result, ptr, cpy_size);
|
3092
|
1590 #ifdef ALLOC_TYPE_STATS
|
|
1591 inc_lrecord_stats (size, (struct lrecord_header *) result);
|
|
1592 #endif /* not ALLOC_TYPE_STATS */
|
|
1593 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1594 return result;
|
|
1595 }
|
|
1596 else
|
|
1597 {
|
3092
|
1598 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1599 return 0;
|
|
1600 }
|
|
1601 }
|
|
1602 else
|
3092
|
1603 return mc_alloc_1 (size, elemcount);
|
2720
|
1604 }
|
|
1605
|
|
1606 void *
|
|
1607 mc_realloc (void *ptr, size_t size)
|
|
1608 {
|
|
1609 return mc_realloc_1 (ptr, size, 1);
|
|
1610 }
|
|
1611
|
|
1612 void *
|
3092
|
1613 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount)
|
2720
|
1614 {
|
3092
|
1615 return mc_realloc_1 (ptr, size, elemcount);
|
2720
|
1616 }
|
|
1617
|
|
1618
|
|
1619
|
|
1620 /*--- initialization ---------------------------------------------------*/
|
|
1621
|
|
1622 /* Call once at the very beginning. */
|
|
1623 void
|
|
1624 init_mc_allocator (void)
|
|
1625 {
|
3092
|
1626 EMACS_INT i;
|
|
1627
|
|
1628 #ifdef MEMORY_USAGE_STATS
|
|
1629 MC_MALLOCED_BYTES = 0;
|
|
1630 #endif
|
2720
|
1631
|
3092
|
1632 /* init of pagesize dependent values */
|
|
1633 switch (SYS_PAGE_SIZE)
|
|
1634 {
|
|
1635 case 512: log_page_size = 9; break;
|
|
1636 case 1024: log_page_size = 10; break;
|
|
1637 case 2048: log_page_size = 11; break;
|
|
1638 case 4096: log_page_size = 12; break;
|
|
1639 case 8192: log_page_size = 13; break;
|
|
1640 case 16384: log_page_size = 14; break;
|
3212
|
1641 case 32768: log_page_size = 15; break;
|
|
1642 case 65536: log_page_size = 16; break;
|
|
1643 default:
|
|
1644 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n",
|
|
1645 SYS_PAGE_SIZE);
|
|
1646 ABORT ();
|
3092
|
1647 }
|
|
1648
|
|
1649 page_size_div_2 = (EMACS_INT) SYS_PAGE_SIZE >> 1;
|
|
1650
|
|
1651 mc_allocator_globals.used_heap_pages =
|
|
1652 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1)
|
|
1653 * sizeof (page_list_header));
|
|
1654 #ifdef MEMORY_USAGE_STATS
|
|
1655 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header);
|
|
1656 #endif
|
|
1657
|
|
1658 mc_allocator_globals.ptr_lookup_table =
|
|
1659 (level_2_lookup_tree **)
|
|
1660 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *));
|
|
1661 #ifdef MEMORY_USAGE_STATS
|
|
1662 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *);
|
|
1663 #endif
|
|
1664
|
|
1665 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
1666 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype);
|
|
1667 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2932
|
1668
|
2720
|
1669 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1670 {
|
|
1671 page_list_header *plh = USED_HEAP_PAGES (i);
|
|
1672 PLH_LIST_TYPE (plh) = USED_LIST;
|
|
1673 PLH_SIZE (plh) = get_used_list_size_value (i);
|
|
1674 PLH_FIRST (plh) = 0;
|
|
1675 PLH_LAST (plh) = 0;
|
|
1676 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1677 #ifdef MEMORY_USAGE_STATS
|
|
1678 PLH_PAGE_COUNT (plh) = 0;
|
|
1679 PLH_USED_CELLS (plh) = 0;
|
|
1680 PLH_USED_SPACE (plh) = 0;
|
|
1681 PLH_TOTAL_CELLS (plh) = 0;
|
|
1682 PLH_TOTAL_SPACE (plh) = 0;
|
|
1683 #endif
|
|
1684 }
|
|
1685
|
|
1686 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1687 {
|
|
1688 page_list_header *plh = FREE_HEAP_PAGES (i);
|
|
1689 PLH_LIST_TYPE (plh) = FREE_LIST;
|
|
1690 PLH_SIZE (plh) = get_free_list_size_value (i);
|
|
1691 PLH_FIRST (plh) = 0;
|
|
1692 PLH_LAST (plh) = 0;
|
|
1693 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1694 #ifdef MEMORY_USAGE_STATS
|
|
1695 PLH_PAGE_COUNT (plh) = 0;
|
|
1696 PLH_USED_CELLS (plh) = 0;
|
|
1697 PLH_USED_SPACE (plh) = 0;
|
|
1698 PLH_TOTAL_CELLS (plh) = 0;
|
|
1699 PLH_TOTAL_SPACE (plh) = 0;
|
|
1700 #endif
|
|
1701 }
|
|
1702
|
3092
|
1703 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
1704 PAGE_HEADER_FREE_LIST = 0;
|
3092
|
1705 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
1706
|
|
1707 #ifdef MEMORY_USAGE_STATS
|
3092
|
1708 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals);
|
2720
|
1709 #endif
|
|
1710
|
|
1711 init_lookup_table ();
|
|
1712 }
|
|
1713
|
|
1714
|
|
1715
|
|
1716
|
|
1717 /*--- lisp function for statistics -------------------------------------*/
|
|
1718
|
|
1719 #ifdef MEMORY_USAGE_STATS
|
|
1720 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /*
|
|
1721 Returns stats about the mc-alloc memory usage. See diagnose.el.
|
|
1722 */
|
|
1723 ())
|
|
1724 {
|
|
1725 Lisp_Object free_plhs = Qnil;
|
|
1726 Lisp_Object used_plhs = Qnil;
|
|
1727 Lisp_Object heap_sects = Qnil;
|
3092
|
1728 EMACS_INT used_size = 0;
|
|
1729 EMACS_INT real_size = 0;
|
2720
|
1730
|
3092
|
1731 EMACS_INT i;
|
2720
|
1732
|
|
1733 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1734 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
|
|
1735 free_plhs =
|
|
1736 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
|
|
1737 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
|
|
1738 free_plhs);
|
|
1739
|
|
1740 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1741 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
|
|
1742 used_plhs =
|
|
1743 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
|
|
1744 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))),
|
|
1745 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))),
|
|
1746 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))),
|
|
1747 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))),
|
|
1748 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))),
|
|
1749 used_plhs);
|
|
1750
|
|
1751 for (i = 0; i < N_HEAP_SECTIONS; i++) {
|
|
1752 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE;
|
|
1753 real_size +=
|
|
1754 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1755 }
|
|
1756
|
|
1757 heap_sects =
|
|
1758 list3 (make_int (N_HEAP_SECTIONS),
|
|
1759 make_int (used_size),
|
|
1760 make_int (real_size));
|
|
1761
|
|
1762 return Fcons (make_int (PAGE_SIZE),
|
3092
|
1763 list5 (heap_sects,
|
2720
|
1764 Fnreverse (used_plhs),
|
|
1765 Fnreverse (free_plhs),
|
|
1766 make_int (sizeof (mc_allocator_globals)),
|
|
1767 make_int (MC_MALLOCED_BYTES)));
|
|
1768 }
|
|
1769 #endif /* MEMORY_USAGE_STATS */
|
|
1770
|
|
1771 void
|
|
1772 syms_of_mc_alloc (void)
|
|
1773 {
|
|
1774 #ifdef MEMORY_USAGE_STATS
|
|
1775 DEFSUBR (Fmc_alloc_memory_usage);
|
|
1776 #endif /* MEMORY_USAGE_STATS */
|
|
1777 }
|
3092
|
1778
|
|
1779
|
|
1780 /*--- incremental garbage collector ----------------------------------*/
|
|
1781
|
|
1782 /* access dirty bit of page header */
|
|
1783 void
|
|
1784 set_dirty_bit (page_header *ph, unsigned int value)
|
|
1785 {
|
|
1786 PH_DIRTY_BIT (ph) = value;
|
|
1787 }
|
|
1788
|
|
1789 void
|
|
1790 set_dirty_bit_for_address (void *ptr, unsigned int value)
|
|
1791 {
|
|
1792 set_dirty_bit (get_page_header (ptr), value);
|
|
1793 }
|
|
1794
|
|
1795 unsigned int
|
|
1796 get_dirty_bit (page_header *ph)
|
|
1797 {
|
|
1798 return PH_DIRTY_BIT (ph);
|
|
1799 }
|
|
1800
|
|
1801 unsigned int
|
|
1802 get_dirty_bit_for_address (void *ptr)
|
|
1803 {
|
|
1804 return get_dirty_bit (get_page_header (ptr));
|
|
1805 }
|
|
1806
|
|
1807
|
|
1808 /* access protection bit of page header */
|
|
1809 void
|
|
1810 set_protection_bit (page_header *ph, unsigned int value)
|
|
1811 {
|
|
1812 PH_PROTECTION_BIT (ph) = value;
|
|
1813 }
|
|
1814
|
|
1815 void
|
|
1816 set_protection_bit_for_address (void *ptr, unsigned int value)
|
|
1817 {
|
|
1818 set_protection_bit (get_page_header (ptr), value);
|
|
1819 }
|
|
1820
|
|
1821 unsigned int
|
|
1822 get_protection_bit (page_header *ph)
|
|
1823 {
|
|
1824 return PH_PROTECTION_BIT (ph);
|
|
1825 }
|
|
1826
|
|
1827 unsigned int
|
|
1828 get_protection_bit_for_address (void *ptr)
|
|
1829 {
|
|
1830 return get_protection_bit (get_page_header (ptr));
|
|
1831 }
|
|
1832
|
|
1833
|
|
1834 /* Returns the start of the page of the object pointed to by ptr. */
|
|
1835 void *
|
|
1836 get_page_start (void *ptr)
|
|
1837 {
|
|
1838 return PH_HEAP_SPACE (get_page_header (ptr));
|
|
1839 }
|
|
1840
|
|
1841 /* Make PAGE_SIZE globally available. */
|
|
1842 EMACS_INT
|
|
1843 mc_get_page_size ()
|
|
1844 {
|
|
1845 return PAGE_SIZE;
|
|
1846 }
|
|
1847
|
|
1848 /* Is the fault at ptr on a protected page? */
|
|
1849 EMACS_INT
|
|
1850 fault_on_protected_page (void *ptr)
|
|
1851 {
|
|
1852 page_header *ph = get_page_header_internal (ptr);
|
|
1853 return (ph
|
|
1854 && PH_HEAP_SPACE (ph)
|
|
1855 && (PH_HEAP_SPACE (ph) <= ptr)
|
|
1856 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph)
|
|
1857 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr)
|
|
1858 && (PH_PROTECTION_BIT (ph) == 1));
|
|
1859 }
|
|
1860
|
|
1861
|
|
1862 /* Protect the heap page of given page header ph if black objects are
|
|
1863 on the page. */
|
|
1864 static void
|
|
1865 protect_heap_page (page_header *ph)
|
|
1866 {
|
|
1867 if (PH_BLACK_BIT (ph))
|
|
1868 {
|
|
1869 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1870 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1871 vdb_protect ((void *) heap_space, heap_space_size);
|
|
1872 PH_PROTECTION_BIT (ph) = 1;
|
|
1873 }
|
|
1874 }
|
|
1875
|
|
1876 /* Protect all heap pages with black objects. */
|
|
1877 void
|
|
1878 protect_heap_pages (void)
|
|
1879 {
|
|
1880 visit_all_used_page_headers (protect_heap_page);
|
|
1881 }
|
|
1882
|
|
1883
|
|
1884 /* Remove protection (if there) of heap page of given page header
|
|
1885 ph. */
|
|
1886 static void
|
|
1887 unprotect_heap_page (page_header *ph)
|
|
1888 {
|
|
1889 if (PH_PROTECTION_BIT (ph))
|
|
1890 {
|
|
1891 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1892 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1893 vdb_unprotect (heap_space, heap_space_size);
|
|
1894 PH_PROTECTION_BIT (ph) = 0;
|
|
1895 }
|
|
1896 }
|
|
1897
|
|
1898 /* Remove protection for all heap pages which are protected. */
|
|
1899 void
|
|
1900 unprotect_heap_pages (void)
|
|
1901 {
|
|
1902 visit_all_used_page_headers (unprotect_heap_page);
|
|
1903 }
|
|
1904
|
|
1905 /* Remove protection and mark page dirty. */
|
|
1906 void
|
|
1907 unprotect_page_and_mark_dirty (void *ptr)
|
|
1908 {
|
|
1909 page_header *ph = get_page_header (ptr);
|
|
1910 unprotect_heap_page (ph);
|
|
1911 PH_DIRTY_BIT (ph) = 1;
|
|
1912 }
|
|
1913
|
|
1914 /* Repush all objects on dirty pages onto the mark stack. */
|
|
1915 int
|
|
1916 repush_all_objects_on_page (void *ptr)
|
|
1917 {
|
|
1918 int repushed_objects = 0;
|
|
1919 page_header *ph = get_page_header (ptr);
|
|
1920 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
|
1921 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1922 EMACS_INT mark_bit = 0;
|
|
1923 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1924 unsigned int bit = 0;
|
|
1925 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1926 {
|
|
1927 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1928 if (bit == BLACK)
|
|
1929 {
|
|
1930 repushed_objects++;
|
|
1931 gc_write_barrier
|
|
1932 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit))));
|
|
1933 }
|
|
1934 }
|
|
1935 PH_BLACK_BIT (ph) = 0;
|
|
1936 PH_DIRTY_BIT (ph) = 0;
|
|
1937 return repushed_objects;
|
|
1938 }
|
|
1939
|
|
1940 /* Mark black if object is currently grey. This first checks, if the
|
|
1941 object is really allocated on the mc-heap. If it is, it can be
|
|
1942 marked black; if it is not, it cannot be marked. */
|
|
1943 EMACS_INT
|
|
1944 maybe_mark_black (void *ptr)
|
|
1945 {
|
|
1946 page_header *ph = get_page_header_internal (ptr);
|
|
1947 unsigned int bit = 0;
|
|
1948
|
|
1949 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph))
|
|
1950 {
|
|
1951 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
1952 if (bit == GREY)
|
|
1953 {
|
|
1954 if (!PH_BLACK_BIT (ph))
|
|
1955 PH_BLACK_BIT (ph) = 1;
|
|
1956 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK);
|
|
1957 }
|
|
1958 return 1;
|
|
1959 }
|
|
1960 return 0;
|
|
1961 }
|
|
1962
|
|
1963 /* Only for debugging --- not used anywhere in the sources. */
|
|
1964 EMACS_INT
|
|
1965 object_on_heap_p (void *ptr)
|
|
1966 {
|
|
1967 page_header *ph = get_page_header_internal (ptr);
|
|
1968 return (ph && PH_ON_USED_LIST_P (ph));
|
|
1969 }
|