2720
|
1 /* New size-based allocator for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
3092
|
24
|
2720
|
25 #include "lisp.h"
|
|
26 #include "mc-alloc.h"
|
3092
|
27 #include "getpagesize.h"
|
|
28
|
|
29
|
|
30 #if 0
|
|
31 # define USE_MARK_BITS_FREE_LIST 1
|
|
32 #endif
|
|
33 #if 1
|
|
34 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1
|
|
35 #endif
|
|
36
|
|
37 /* Memory protection needs the real system-dependent pagesize. */
|
|
38 #ifndef WIN32_NATIVE
|
|
39 #include <unistd.h> /* for getpagesize () */
|
|
40 #endif
|
|
41 #if defined (HAVE_GETPAGESIZE)
|
|
42 # define SYS_PAGE_SIZE getpagesize ()
|
|
43 #elif defined (_SC_PAGESIZE)
|
|
44 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE)
|
|
45 #elif defined (_SC_PAGE_SIZE)
|
|
46 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE)
|
|
47 #elif defined(get_page_size)
|
|
48 # define SYS_PAGE_SIZE get_page_size ()
|
|
49 #elif defined(PAGESIZE)
|
|
50 # define SYS_PAGE_SIZE PAGESIZE
|
|
51 #elif defined(PAGE_SIZE)
|
|
52 # define SYS_PAGE_SIZE PAGE_SIZE
|
|
53 #else
|
|
54 /* Valid page sizes are powers of 2. */
|
|
55 # define SYS_PAGE_SIZE 4096
|
|
56 #endif
|
2720
|
57
|
|
58
|
|
59 /*--- configurable values ----------------------------------------------*/
|
|
60
|
|
61 /* Definition of size classes */
|
|
62
|
|
63 /* Heap used list constants: In the used heap, it is important to
|
|
64 quickly find a free spot for a new object. Therefore the size
|
|
65 classes of the used heap are defined by the size of the cells on
|
|
66 the pages. The size classes should match common object sizes, to
|
|
67 avoid wasting memory. */
|
|
68
|
|
69 /* Minimum object size in bytes. */
|
3092
|
70 #if BITS_PER_EMACS_INT > 32
|
|
71 # define USED_LIST_MIN_OBJECT_SIZE 16
|
|
72 #else
|
|
73 # define USED_LIST_MIN_OBJECT_SIZE 8
|
|
74 #endif
|
2720
|
75
|
|
76 /* The step size by which the size classes increase (up to upper
|
|
77 threshold). This many bytes are mapped to a single used list: */
|
3092
|
78 #if BITS_PER_EMACS_INT > 32
|
|
79 # define USED_LIST_LIN_STEP 8
|
|
80 #else
|
|
81 # define USED_LIST_LIN_STEP 4
|
|
82 #endif
|
2720
|
83
|
|
84 /* The upper threshold should always be set to PAGE_SIZE/2, because if
|
|
85 a object is larger than PAGE_SIZE/2 there is no room for any other
|
|
86 object on this page. Objects this big are kept in the page list of
|
|
87 the multiple pages, since a quick search for free spots is not
|
|
88 needed for this kind of pages (because there are no free spots).
|
|
89 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
|
3092
|
90 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
|
2720
|
91
|
|
92
|
|
93 /* Heap free list constants: In the unused heap, the size of
|
|
94 consecutive memory tips the scales. A page is smallest entity which
|
|
95 is asked for. Therefore, the size classes of the unused heap are
|
|
96 defined by the number of consecutive pages. */
|
|
97 /* Sizes up to this many pages each have their own free list. */
|
|
98 #define FREE_LIST_LOWER_THRESHOLD 32
|
|
99 /* The step size by which the size classes increase (up to upper
|
|
100 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a
|
|
101 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and
|
|
102 FREE_LIST_UPPER_THRESHOLD. */
|
|
103 #define FREE_LIST_LIN_STEP 8
|
|
104 /* Sizes of at least this many pages are mapped to a single free
|
|
105 list. Blocks of memory larger than this number are all kept in a
|
|
106 single list, which makes searching this list slow. But objects that
|
|
107 big are really seldom. */
|
|
108 #define FREE_LIST_UPPER_THRESHOLD 256
|
|
109
|
|
110
|
3092
|
111 /* used heap list count */
|
|
112 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
|
|
113 - USED_LIST_MIN_OBJECT_SIZE) \
|
|
114 / USED_LIST_LIN_STEP) + 1 ) + 1
|
|
115
|
|
116 /* free heap list count */
|
|
117 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
|
|
118 - FREE_LIST_LOWER_THRESHOLD) \
|
|
119 / FREE_LIST_LIN_STEP) \
|
|
120 + FREE_LIST_LOWER_THRESHOLD)
|
|
121
|
|
122
|
2720
|
123 /* Maximum number of separately added heap sections. */
|
|
124 #if BITS_PER_EMACS_INT > 32
|
|
125 # define MAX_HEAP_SECTS 2048
|
|
126 #else
|
|
127 # define MAX_HEAP_SECTS 768
|
|
128 #endif
|
|
129
|
|
130
|
|
131 /* Heap growth constants. Heap increases by any number between the
|
|
132 boundaries (unit is PAGE_SIZE). */
|
3092
|
133 #define MIN_HEAP_INCREASE 256
|
2720
|
134 #define MAX_HEAP_INCREASE 256 /* not used */
|
|
135
|
|
136 /* Every heap growth is calculated like this:
|
|
137 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
|
|
138 So the growth of the heap is influenced by the current size of the
|
|
139 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE
|
|
140 boundaries.
|
|
141 This reduces the number of heap sectors, the larger the heap grows
|
|
142 the larger are the newly allocated chunks. */
|
|
143 #define HEAP_GROWTH_DIVISOR 3
|
|
144
|
|
145
|
|
146 /* Zero memory before putting on free lists. */
|
|
147 #define ZERO_MEM 1
|
|
148
|
|
149
|
|
150 #ifndef CHAR_BIT /* should be included by limits.h */
|
|
151 # define CHAR_BIT BITS_PER_CHAR
|
|
152 #endif
|
|
153
|
|
154
|
3092
|
155
|
|
156 /*--- values depending on PAGE_SIZE ------------------------------------*/
|
2720
|
157
|
3092
|
158 /* initialized in init_mc_allocator () */
|
|
159 static EMACS_INT log_page_size;
|
|
160 static EMACS_INT page_size_div_2;
|
2720
|
161
|
3092
|
162 #undef PAGE_SIZE
|
|
163 #define PAGE_SIZE SYS_PAGE_SIZE
|
|
164 #define LOG_PAGE_SIZE log_page_size
|
|
165 #define PAGE_SIZE_DIV_2 page_size_div_2
|
2720
|
166
|
|
167
|
|
168 /* Constants for heap address to page header mapping. */
|
|
169 #define LOG_LEVEL2_SIZE 10
|
|
170 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
|
|
171 #if BITS_PER_EMACS_INT > 32
|
|
172 # define USE_HASH_TABLE 1
|
|
173 # define LOG_LEVEL1_SIZE 11
|
|
174 #else
|
|
175 # define LOG_LEVEL1_SIZE \
|
|
176 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE)
|
|
177 #endif
|
|
178 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE)
|
|
179
|
|
180 #ifdef USE_HASH_TABLE
|
|
181 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1))
|
|
182 # define L1_INDEX(p) HASH ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
183 #else
|
|
184 # define L1_INDEX(p) ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
185 #endif
|
|
186 #define L2_INDEX(p) (((EMACS_INT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1))
|
|
187
|
|
188
|
|
189
|
|
190
|
|
191 /*--- structs and typedefs ---------------------------------------------*/
|
|
192
|
3092
|
193 /* Links the free lists (mark_bit_free_list and cell free list). */
|
2720
|
194 typedef struct free_link
|
|
195 {
|
|
196 struct lrecord_header lheader;
|
|
197 struct free_link *next_free;
|
|
198 } free_link;
|
|
199
|
|
200
|
3092
|
201 /* Header for pages. They are held in a doubly linked list. */
|
2720
|
202 typedef struct page_header
|
|
203 {
|
|
204 struct page_header *next; /* next page_header */
|
|
205 struct page_header *prev; /* previous page_header */
|
|
206 /* Field plh holds pointer to the according header of the page list.*/
|
|
207 struct page_list_header *plh; /* page list header */
|
|
208 free_link *free_list; /* links free cells on page */
|
|
209 EMACS_INT n_pages; /* number of pages */
|
|
210 EMACS_INT cell_size; /* size of cells on page */
|
|
211 EMACS_INT cells_on_page; /* total number of cells on page */
|
|
212 EMACS_INT cells_used; /* number of used cells on page */
|
|
213 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
|
|
214 the mark bits are put in an extra memory area. Then the field
|
|
215 mark_bits holds the pointer to this area. Is the number of
|
|
216 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
|
|
217 mark_bit EMACS_INT directly, without an additional indirection. */
|
3092
|
218 unsigned int black_bit:1; /* objects on page are black */
|
|
219 unsigned int dirty_bit:1; /* page is dirty */
|
|
220 unsigned int protection_bit:1; /* page is write protected */
|
|
221 unsigned int array_bit:1; /* page holds arrays */
|
|
222 Rawbyte *mark_bits; /* pointer to mark bits */
|
2720
|
223 void *heap_space; /* pointer to heap, where objects
|
|
224 are stored */
|
|
225 } page_header;
|
|
226
|
|
227
|
|
228 /* Different list types. */
|
|
229 enum list_type_enum {
|
|
230 USED_LIST,
|
|
231 FREE_LIST
|
|
232 };
|
|
233
|
|
234
|
|
235 /* Header for page lists. Field list_type holds the type of the list. */
|
|
236 typedef struct page_list_header
|
|
237 {
|
|
238 enum list_type_enum list_type; /* the type of the list */
|
|
239 /* Size holds the size of one cell (in bytes) in a used heap list, or the
|
|
240 size of the heap sector (in number of pages). */
|
|
241 size_t size; /* size of one cell / heap sector */
|
|
242 page_header *first; /* first of page_header list */
|
|
243 page_header *last; /* last of page_header list */
|
|
244 /* If the number of objects on page is bigger than
|
|
245 BITS_PER_EMACS_INT, the mark bits are put in an extra memory
|
|
246 area, which is linked in this free list, if not used. Is the
|
|
247 number of objects smaller than BITS_PER_EMACS_INT, the mark bits
|
|
248 are hold in the mark bit EMACS_INT directly, without an
|
|
249 additional indirection. */
|
|
250 free_link *mark_bit_free_list;
|
|
251
|
|
252 #ifdef MEMORY_USAGE_STATS
|
|
253 EMACS_INT page_count; /* number if pages in list */
|
|
254 EMACS_INT used_cells; /* number of objects in list */
|
|
255 EMACS_INT used_space; /* used space */
|
|
256 EMACS_INT total_cells; /* number of available cells */
|
|
257 EMACS_INT total_space; /* available space */
|
|
258 #endif
|
|
259 } page_list_header;
|
|
260
|
|
261
|
|
262 /* The heap sectors are stored with their real start pointer and their
|
|
263 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */
|
|
264 typedef struct heap_sect {
|
|
265 void *real_start; /* real start pointer (NOT aligned) */
|
|
266 size_t real_size; /* NOT multiple of PAGE_SIZE */
|
|
267 void *start; /* aligned start pointer */
|
|
268 EMACS_INT n_pages; /* multiple of PAGE_SIZE */
|
|
269 } heap_sect;
|
|
270
|
|
271
|
|
272 /* 2nd tree level for mapping of heap addresses to page headers. */
|
|
273 typedef struct level_2_lookup_tree {
|
|
274 page_header *index[LEVEL2_SIZE]; /* link to page header */
|
|
275 EMACS_INT key; /* high order address bits */
|
|
276 #ifdef USE_HASH_TABLE
|
|
277 struct level_2_lookup_tree *hash_link; /* hash chain link */
|
|
278 #endif
|
|
279 } level_2_lookup_tree;
|
|
280
|
|
281
|
|
282
|
|
283 /*--- global variable definitions --------------------------------------*/
|
|
284
|
|
285 /* All global allocator variables are kept in this struct. */
|
|
286 typedef struct mc_allocator_globals_type {
|
|
287
|
|
288 /* heap size */
|
|
289 EMACS_INT heap_size;
|
|
290
|
|
291 /* list of all separatly allocated chunks of heap */
|
|
292 heap_sect heap_sections[MAX_HEAP_SECTS];
|
|
293 EMACS_INT n_heap_sections;
|
|
294
|
|
295 /* Holds all allocated pages, each object size class in its separate list,
|
|
296 to guarantee fast allocation on partially filled pages. */
|
3092
|
297 page_list_header *used_heap_pages;
|
2720
|
298
|
|
299 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
|
|
300 kept on the Nth free list. Contiguos pages are coalesced. */
|
|
301 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
|
|
302
|
|
303 /* ptr lookup table */
|
3092
|
304 level_2_lookup_tree **ptr_lookup_table;
|
2720
|
305
|
3092
|
306 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
307 /* page header free list */
|
|
308 free_link *page_header_free_list;
|
3092
|
309 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
310
|
|
311 #ifdef MEMORY_USAGE_STATS
|
|
312 EMACS_INT malloced_bytes;
|
|
313 #endif
|
|
314 } mc_allocator_globals_type;
|
|
315
|
|
316 mc_allocator_globals_type mc_allocator_globals;
|
|
317
|
|
318
|
|
319
|
|
320
|
|
321 /*--- macro accessors --------------------------------------------------*/
|
|
322
|
|
323 #define USED_HEAP_PAGES(i) \
|
|
324 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
|
|
325
|
|
326 #define FREE_HEAP_PAGES(i) \
|
|
327 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
|
|
328
|
|
329 #define PLH(plh) plh
|
|
330 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type
|
|
331 # define PLH_SIZE(plh) PLH (plh)->size
|
|
332 # define PLH_FIRST(plh) PLH (plh)->first
|
|
333 # define PLH_LAST(plh) PLH (plh)->last
|
|
334 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list
|
|
335 #ifdef MEMORY_USAGE_STATS
|
|
336 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count
|
|
337 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells
|
|
338 # define PLH_USED_SPACE(plh) PLH (plh)->used_space
|
|
339 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells
|
|
340 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space
|
|
341 #endif
|
|
342
|
|
343 #define PH(ph) ph
|
|
344 # define PH_NEXT(ph) PH (ph)->next
|
|
345 # define PH_PREV(ph) PH (ph)->prev
|
|
346 # define PH_PLH(ph) PH (ph)->plh
|
|
347 # define PH_FREE_LIST(ph) PH (ph)->free_list
|
|
348 # define PH_N_PAGES(ph) PH (ph)->n_pages
|
|
349 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
|
|
350 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
|
|
351 # define PH_CELLS_USED(ph) PH (ph)->cells_used
|
3092
|
352 # define PH_BLACK_BIT(ph) PH (ph)->black_bit
|
|
353 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit
|
|
354 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit
|
|
355 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit
|
2720
|
356 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
|
|
357 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
|
|
358 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
|
|
359 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
|
|
360
|
|
361 #define HEAP_SIZE mc_allocator_globals.heap_size
|
|
362
|
|
363 #ifdef MEMORY_USAGE_STATS
|
|
364 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes
|
|
365 #endif
|
|
366
|
|
367 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
|
|
368 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
|
|
369
|
3092
|
370 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
371 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
|
3092
|
372 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
373
|
|
374 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
|
|
375 #define FREE_LIST(free_list) (free_link*) (free_list)
|
|
376
|
|
377 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
|
|
378 #define LEVEL2(l2, i) l2->index[i]
|
|
379 # define LEVEL2_KEY(l2) l2->key
|
|
380 #ifdef USE_HASH_TABLE
|
|
381 # define LEVEL2_HASH_LINK(l2) l2->hash_link
|
|
382 #endif
|
|
383
|
|
384 #if ZERO_MEM
|
|
385 # define ZERO_HEAP_SPACE(ph) \
|
|
386 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE)
|
|
387 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header))
|
|
388 #endif
|
|
389
|
|
390 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE)
|
|
391 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE)
|
|
392
|
|
393 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1))))
|
|
394
|
|
395 #define PAGE_SIZE_ALIGNMENT(address) \
|
|
396 (void *) ((((EMACS_INT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1))
|
|
397
|
|
398 #define PH_ON_FREE_LIST_P(ph) \
|
|
399 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
|
|
400
|
|
401 #define PH_ON_USED_LIST_P(ph) \
|
|
402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
|
|
403
|
|
404
|
3092
|
405 /* Number of mark bits: minimum 1, maximum 8. */
|
|
406 #define N_MARK_BITS 2
|
2720
|
407
|
|
408
|
|
409
|
|
410 /************************************************************************/
|
|
411 /* MC Allocator */
|
|
412 /************************************************************************/
|
|
413
|
|
414
|
|
415 /*--- misc functions ---------------------------------------------------*/
|
|
416
|
|
417 /* moved here from alloc.c */
|
|
418 #ifdef ERROR_CHECK_GC
|
|
419 static void
|
|
420 deadbeef_memory (void *ptr, Bytecount size)
|
|
421 {
|
|
422 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr;
|
|
423 Bytecount beefs = size >> 2;
|
|
424
|
|
425 /* In practice, size will always be a multiple of four. */
|
|
426 while (beefs--)
|
|
427 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */
|
|
428 }
|
|
429 #endif /* ERROR_CHECK_GC */
|
|
430
|
|
431 /* Visits all pages (page_headers) hooked into the used heap pages
|
|
432 list and executes f with the current page header as
|
3303
|
433 argument. Needed for sweep. Returns number of processed pages. */
|
|
434 static EMACS_INT
|
|
435 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph))
|
2720
|
436 {
|
3303
|
437 EMACS_INT number_of_pages_processed = 0;
|
3092
|
438 EMACS_INT i;
|
2720
|
439 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
440 if (PLH_FIRST (USED_HEAP_PAGES (i)))
|
|
441 {
|
|
442 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
|
|
443 while (PH_NEXT (ph))
|
|
444 {
|
|
445 page_header *next = PH_NEXT (ph); /* in case f removes the page */
|
3303
|
446 number_of_pages_processed += f (ph);
|
2720
|
447 ph = next;
|
|
448 }
|
3303
|
449 number_of_pages_processed += f (ph);
|
2720
|
450 }
|
3303
|
451 return number_of_pages_processed;
|
2720
|
452 }
|
|
453
|
|
454
|
|
455
|
|
456
|
|
457 /*--- mapping of heap addresses to page headers and mark bits ----------*/
|
|
458
|
|
459 /* Sets a heap pointer and page header pair into the lookup table. */
|
|
460 static void
|
|
461 set_lookup_table (void *ptr, page_header *ph)
|
|
462 {
|
3092
|
463 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
464 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
465 #ifdef USE_HASH_TABLE
|
|
466 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
467 l2 = LEVEL2_HASH_LINK (l2);
|
|
468 #endif
|
|
469 if (!l2)
|
|
470 {
|
|
471 l2 = (level_2_lookup_tree*)
|
|
472 xmalloc_and_zero (sizeof (level_2_lookup_tree));
|
|
473 #ifdef MEMORY_USAGE_STATS
|
|
474 MC_MALLOCED_BYTES +=
|
|
475 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0);
|
|
476 #endif
|
2932
|
477 memset (l2, '\0', sizeof (level_2_lookup_tree));
|
2720
|
478 #ifdef USE_HASH_TABLE
|
|
479 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index);
|
|
480 #endif
|
|
481 PTR_LOOKUP_TABLE (l1_index) = l2;
|
|
482 LEVEL2_KEY (l2) = l1_index;
|
|
483 }
|
|
484 LEVEL2 (l2, L2_INDEX (ptr)) = ph;
|
|
485 }
|
|
486
|
|
487
|
|
488 #ifdef UNSET_LOOKUP_TABLE
|
|
489 /* Set the lookup table to 0 for given heap address. */
|
|
490 static void
|
|
491 unset_lookup_table (void *ptr)
|
|
492 {
|
3092
|
493 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
494 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
495 #ifdef USE_HASH_TABLE
|
|
496 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
497 l2 = LEVEL2_HASH_LINK (l2);
|
|
498 #endif
|
|
499 if (l2) {
|
|
500 LEVEL2 (l2, L2_INDEX (ptr)) = 0;
|
|
501 }
|
|
502 }
|
|
503 #endif
|
|
504
|
|
505 /* Returns the page header of a given heap address, or 0 if not in table.
|
|
506 For internal use, no error checking. */
|
|
507 static page_header *
|
|
508 get_page_header_internal (void *ptr)
|
|
509 {
|
3092
|
510 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
511 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
512 #ifdef USE_HASH_TABLE
|
|
513 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
514 l2 = LEVEL2_HASH_LINK (l2);
|
|
515 #endif
|
|
516 if (!l2)
|
|
517 return 0;
|
|
518 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
519 }
|
|
520
|
|
521 /* Returns the page header of a given heap address, or 0 if not in table. */
|
|
522 static page_header *
|
|
523 get_page_header (void *ptr)
|
|
524 {
|
3092
|
525 EMACS_INT l1_index = L1_INDEX (ptr);
|
2720
|
526 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
2723
|
527 assert (l2);
|
2720
|
528 #ifdef USE_HASH_TABLE
|
|
529 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
530 l2 = LEVEL2_HASH_LINK (l2);
|
|
531 #endif
|
2723
|
532 assert (LEVEL2 (l2, L2_INDEX (ptr)));
|
2720
|
533 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
534 }
|
|
535
|
|
536 /* Returns the mark bit index of a given heap address. */
|
|
537 static EMACS_INT
|
|
538 get_mark_bit_index (void *ptr, page_header *ph)
|
|
539 {
|
|
540 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
541 if (cell_size)
|
3092
|
542 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size)
|
|
543 * N_MARK_BITS;
|
2720
|
544 else /* only one object on page */
|
|
545 return 0;
|
|
546 }
|
|
547
|
|
548
|
|
549 /* Adds addresses of pages to lookup table. */
|
|
550 static void
|
|
551 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
|
|
552 {
|
3092
|
553 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
554 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
|
3092
|
555 for (p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
556 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
|
|
557 set_lookup_table (p, ph);
|
|
558 }
|
|
559
|
|
560
|
|
561 /* Initializes lookup table. */
|
|
562 static void
|
|
563 init_lookup_table (void)
|
|
564 {
|
3092
|
565 EMACS_INT i;
|
2720
|
566 for (i = 0; i < LEVEL1_SIZE; i++)
|
|
567 PTR_LOOKUP_TABLE (i) = 0;
|
|
568 }
|
|
569
|
|
570
|
|
571
|
|
572
|
|
573 /*--- mark bits --------------------------------------------------------*/
|
|
574
|
|
575 /*--- bit operations --- */
|
|
576
|
|
577 /* Allocates a bit array of length bits. */
|
3092
|
578 static Rawbyte *
|
2720
|
579 alloc_bit_array(size_t bits)
|
|
580 {
|
3092
|
581 Rawbyte *bit_array;
|
|
582 #ifdef USE_MARK_BITS_FREE_LIST
|
|
583 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte);
|
|
584 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
585 size_t size =
|
|
586 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte),
|
|
587 Rawbyte *);
|
|
588 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
589 if (size < sizeof (free_link)) size = sizeof (free_link);
|
|
590 #ifdef MEMORY_USAGE_STATS
|
|
591 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
|
|
592 #endif
|
3092
|
593 bit_array = (Rawbyte *) xmalloc_and_zero (size);
|
2720
|
594 return bit_array;
|
|
595 }
|
|
596
|
|
597
|
|
598 /* Returns the bit value at pos. */
|
|
599 static EMACS_INT
|
3092
|
600 get_bit (Rawbyte *bit_array, EMACS_INT pos)
|
2720
|
601 {
|
|
602 #if N_MARK_BITS > 1
|
|
603 EMACS_INT result = 0;
|
|
604 EMACS_INT i;
|
|
605 #endif
|
|
606 bit_array += pos / CHAR_BIT;
|
|
607 #if N_MARK_BITS > 1
|
|
608 for (i = 0; i < N_MARK_BITS; i++)
|
3092
|
609 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i;
|
|
610 return result;
|
2720
|
611 #else
|
|
612 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
|
|
613 #endif
|
|
614 }
|
|
615
|
|
616
|
|
617 /* Bit_Arrays bit at pos to val. */
|
|
618 static void
|
3092
|
619 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_INT val)
|
2720
|
620 {
|
|
621 #if N_MARK_BITS > 1
|
|
622 EMACS_INT i;
|
|
623 #endif
|
|
624 bit_array += pos / CHAR_BIT;
|
|
625 #if N_MARK_BITS > 1
|
|
626 for (i = 0; i < N_MARK_BITS; i++)
|
|
627 if ((val >> i) & 1)
|
|
628 *bit_array |= 1 << ((pos + i) % CHAR_BIT);
|
|
629 else
|
|
630 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT));
|
|
631 #else
|
|
632 if (val)
|
|
633 *bit_array |= 1 << (pos % CHAR_BIT);
|
|
634 else
|
|
635 *bit_array &= ~(1 << (pos % CHAR_BIT));
|
|
636 #endif
|
|
637 }
|
|
638
|
|
639
|
|
640 /*--- mark bit functions ---*/
|
3092
|
641 #define USE_PNTR_MARK_BITS(ph) \
|
|
642 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT)
|
|
643 #define USE_WORD_MARK_BITS(ph) \
|
|
644 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT)
|
2720
|
645
|
3092
|
646 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p)
|
2720
|
647 #define GET_BIT_PNTR(b, p) get_bit (b, p)
|
|
648
|
3092
|
649 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v)
|
2720
|
650 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
|
|
651
|
|
652 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
|
3092
|
653 #define ZERO_MARK_BITS_PNTR(ph) \
|
|
654 do { \
|
|
655 memset (PH_MARK_BITS (ph), '\0', \
|
|
656 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \
|
|
657 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \
|
2720
|
658 } while (0)
|
|
659
|
|
660 #define GET_BIT(bit, ph, p) \
|
|
661 do { \
|
|
662 if (USE_PNTR_MARK_BITS (ph)) \
|
|
663 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \
|
|
664 else \
|
|
665 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \
|
|
666 } while (0)
|
|
667
|
|
668 #define SET_BIT(ph, p, v) \
|
|
669 do { \
|
|
670 if (USE_PNTR_MARK_BITS (ph)) \
|
|
671 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \
|
|
672 else \
|
|
673 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \
|
|
674 } while (0)
|
|
675
|
|
676 #define ZERO_MARK_BITS(ph) \
|
|
677 do { \
|
|
678 if (USE_PNTR_MARK_BITS (ph)) \
|
|
679 ZERO_MARK_BITS_PNTR (ph); \
|
|
680 else \
|
|
681 ZERO_MARK_BITS_WORD (ph); \
|
|
682 } while (0)
|
|
683
|
|
684
|
|
685 /* Allocates mark-bit space either from a free list or from the OS
|
|
686 for the given page header. */
|
3092
|
687 static Rawbyte *
|
2720
|
688 alloc_mark_bits (page_header *ph)
|
|
689 {
|
3092
|
690 Rawbyte *result;
|
|
691 #ifdef USE_MARK_BITS_FREE_LIST
|
2720
|
692 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
|
3092
|
693 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
2720
|
694 else
|
|
695 {
|
3092
|
696 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph);
|
2720
|
697 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
|
|
698 }
|
3092
|
699 #else /* not USE_MARK_BITS_FREE_LIST */
|
|
700 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
|
701 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
702 return result;
|
|
703 }
|
|
704
|
|
705
|
|
706 /* Frees by maintaining a free list. */
|
|
707 static void
|
|
708 free_mark_bits (page_header *ph)
|
|
709 {
|
3092
|
710 #ifdef USE_MARK_BITS_FREE_LIST
|
|
711 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
|
|
712 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
|
|
713 #else /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
714 if (PH_MARK_BITS (ph))
|
3092
|
715 free (PH_MARK_BITS (ph));
|
|
716 #endif /* not USE_MARK_BITS_FREE_LIST */
|
2720
|
717 }
|
|
718
|
|
719
|
|
720 /* Installs mark bits and zeros bits. */
|
|
721 static void
|
|
722 install_mark_bits (page_header *ph)
|
|
723 {
|
|
724 if (USE_PNTR_MARK_BITS (ph))
|
|
725 {
|
|
726 PH_MARK_BITS (ph) = alloc_mark_bits (ph);
|
|
727 ZERO_MARK_BITS_PNTR (ph);
|
|
728 }
|
|
729 else
|
|
730 ZERO_MARK_BITS_WORD (ph);
|
|
731 }
|
|
732
|
|
733
|
|
734 /* Cleans and frees the mark bits of the given page_header. */
|
|
735 static void
|
|
736 remove_mark_bits (page_header *ph)
|
|
737 {
|
|
738 if (USE_PNTR_MARK_BITS (ph))
|
|
739 free_mark_bits (ph);
|
|
740 }
|
|
741
|
|
742
|
|
743 /* Zeros all mark bits in given header. */
|
|
744 static void
|
|
745 zero_mark_bits (page_header *ph)
|
|
746 {
|
|
747 ZERO_MARK_BITS (ph);
|
|
748 }
|
|
749
|
|
750
|
|
751 /* Returns mark bit for given heap pointer. */
|
|
752 EMACS_INT
|
|
753 get_mark_bit (void *ptr)
|
|
754 {
|
|
755 EMACS_INT bit = 0;
|
|
756 page_header *ph = get_page_header (ptr);
|
|
757 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph));
|
|
758 if (ph)
|
|
759 {
|
|
760 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
761 }
|
|
762 return bit;
|
|
763 }
|
|
764
|
|
765
|
|
766 /* Sets mark bit for given heap pointer. */
|
|
767 void
|
|
768 set_mark_bit (void *ptr, EMACS_INT value)
|
|
769 {
|
|
770 page_header *ph = get_page_header (ptr);
|
|
771 assert (ph && PH_ON_USED_LIST_P (ph));
|
|
772 if (ph)
|
|
773 {
|
3092
|
774 if (value == BLACK)
|
|
775 if (!PH_BLACK_BIT (ph))
|
|
776 PH_BLACK_BIT (ph) = 1;
|
2720
|
777 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
|
|
778 }
|
|
779 }
|
|
780
|
|
781
|
|
782
|
|
783
|
|
784 /*--- page header functions --------------------------------------------*/
|
|
785
|
3092
|
786 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
787 #include "blocktype.h"
|
|
788
|
|
789 struct page_header_blocktype
|
|
790 {
|
|
791 Blocktype_declare (page_header);
|
|
792 } *the_page_header_blocktype;
|
|
793 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
|
794
|
2720
|
795 /* Allocates a page header either from a free list or from the OS. */
|
|
796 static page_header *
|
|
797 alloc_page_header (void)
|
|
798 {
|
3092
|
799 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
800 page_header *result;
|
|
801 #ifdef MEMORY_USAGE_STATS
|
|
802 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
803 #endif
|
|
804 result = Blocktype_alloc (the_page_header_blocktype);
|
|
805 ZERO_PAGE_HEADER (result);
|
|
806 return result;
|
|
807 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
808 page_header *result;
|
|
809 if (PAGE_HEADER_FREE_LIST == 0)
|
|
810 {
|
|
811 result =
|
|
812 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
|
|
813 #ifdef MEMORY_USAGE_STATS
|
|
814 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
815 #endif
|
|
816 }
|
|
817 else
|
|
818 {
|
|
819 result = (page_header*) PAGE_HEADER_FREE_LIST;
|
|
820 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
|
|
821 }
|
|
822 return result;
|
3092
|
823 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
824 }
|
|
825
|
|
826
|
|
827 /* Frees given page header by maintaining a free list. */
|
|
828 static void
|
|
829 free_page_header (page_header *ph)
|
|
830 {
|
3092
|
831 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
832 Blocktype_free (the_page_header_blocktype, ph);
|
|
833 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
834 #if ZERO_MEM
|
|
835 ZERO_PAGE_HEADER (ph);
|
|
836 #endif
|
|
837 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
|
|
838 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
|
3092
|
839 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
840 }
|
|
841
|
|
842
|
|
843 /* Adds given page header to given page list header's list. */
|
|
844 static void
|
|
845 add_page_header_to_plh (page_header *ph, page_list_header *plh)
|
|
846 {
|
|
847 /* insert at the front of the list */
|
|
848 PH_PREV (ph) = 0;
|
|
849 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
850 PH_PLH (ph) = plh;
|
|
851 /* if list is not empty, set prev in the first element */
|
|
852 if (PLH_FIRST (plh))
|
|
853 PH_PREV (PLH_FIRST (plh)) = ph;
|
|
854 /* one element in list is first and last at the same time */
|
|
855 PLH_FIRST (plh) = ph;
|
|
856 if (!PLH_LAST (plh))
|
|
857 PLH_LAST (plh) = ph;
|
|
858
|
|
859 #ifdef MEMORY_USAGE_STATS
|
|
860 /* bump page count */
|
|
861 PLH_PAGE_COUNT (plh)++;
|
|
862 #endif
|
|
863
|
|
864 }
|
|
865
|
|
866
|
|
867 /* Removes given page header from given page list header's list. */
|
|
868 static void
|
|
869 remove_page_header_from_plh (page_header *ph, page_list_header *plh)
|
|
870 {
|
|
871 if (PLH_FIRST (plh) == ph)
|
|
872 PLH_FIRST (plh) = PH_NEXT (ph);
|
|
873 if (PLH_LAST (plh) == ph)
|
|
874 PLH_LAST (plh) = PH_PREV (ph);
|
|
875 if (PH_NEXT (ph))
|
|
876 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
877 if (PH_PREV (ph))
|
|
878 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
879
|
|
880 #ifdef MEMORY_USAGE_STATS
|
|
881 /* decrease page count */
|
|
882 PLH_PAGE_COUNT (plh)--;
|
|
883 #endif
|
|
884 }
|
|
885
|
|
886
|
|
887 /* Moves a page header to the front of its the page header list.
|
|
888 This is used during sweep: Pages with some alive objects are moved to
|
|
889 the front. This makes allocation faster, all pages with free slots
|
|
890 can be found at the front of the list. */
|
|
891 static void
|
|
892 move_page_header_to_front (page_header *ph)
|
|
893 {
|
|
894 page_list_header *plh = PH_PLH (ph);
|
|
895 /* is page already first? */
|
|
896 if (ph == PLH_FIRST (plh)) return;
|
|
897 /* remove from list */
|
|
898 if (PLH_LAST (plh) == ph)
|
|
899 PLH_LAST (plh) = PH_PREV (ph);
|
|
900 if (PH_NEXT (ph))
|
|
901 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
902 if (PH_PREV (ph))
|
|
903 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
904 /* insert at the front */
|
|
905 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
906 PH_PREV (ph) = 0;
|
|
907 PH_PREV (PH_NEXT (ph)) = ph;
|
|
908 PLH_FIRST (plh) = ph;
|
|
909 }
|
|
910
|
|
911
|
|
912
|
|
913
|
|
914 /*--- page list functions ----------------------------------------------*/
|
|
915
|
|
916 /* Returns the index of the used heap list according to given size. */
|
|
917 static int
|
|
918 get_used_list_index (size_t size)
|
|
919 {
|
|
920 if (size <= USED_LIST_MIN_OBJECT_SIZE)
|
3092
|
921 {
|
|
922 // printf ("size %d -> index %d\n", size, 0);
|
|
923 return 0;
|
|
924 }
|
|
925 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD)
|
|
926 {
|
|
927 // printf ("size %d -> index %d\n", size,
|
|
928 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
929 // / USED_LIST_LIN_STEP) + 1);
|
|
930 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
931 / USED_LIST_LIN_STEP) + 1;
|
|
932 }
|
|
933 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1);
|
2720
|
934 return N_USED_PAGE_LISTS - 1;
|
|
935 }
|
|
936
|
|
937 /* Returns the size of the used heap list according to given index. */
|
|
938 static size_t
|
|
939 get_used_list_size_value (int used_index)
|
|
940 {
|
|
941 if (used_index < N_USED_PAGE_LISTS - 1)
|
|
942 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
|
|
943 return 0;
|
|
944 }
|
|
945
|
|
946
|
|
947 /* Returns the index of the free heap list according to given size. */
|
3092
|
948 static EMACS_INT
|
2720
|
949 get_free_list_index (EMACS_INT n_pages)
|
|
950 {
|
|
951 if (n_pages == 0)
|
|
952 return 0;
|
|
953 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
|
|
954 return n_pages - 1;
|
|
955 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1)
|
|
956 return N_FREE_PAGE_LISTS - 1;
|
|
957 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1)
|
|
958 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
959
|
|
960 }
|
|
961
|
|
962
|
|
963 /* Returns the size in number of pages of the given free list at index. */
|
|
964 static size_t
|
3092
|
965 get_free_list_size_value (EMACS_INT free_index)
|
2720
|
966 {
|
|
967 if (free_index < FREE_LIST_LOWER_THRESHOLD)
|
|
968 return free_index + 1;
|
|
969 if (free_index >= N_FREE_PAGE_LISTS)
|
|
970 return FREE_LIST_UPPER_THRESHOLD;
|
|
971 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD)
|
|
972 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
973 }
|
|
974
|
|
975
|
|
976 #ifdef MEMORY_USAGE_STATS
|
|
977 Bytecount
|
|
978 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats)
|
|
979 {
|
|
980 size_t used_size =
|
|
981 get_used_list_size_value (get_used_list_index (claimed_size));
|
|
982 if (used_size == 0)
|
|
983 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size));
|
|
984
|
|
985 if (stats)
|
|
986 {
|
|
987 stats->was_requested += claimed_size;
|
|
988 stats->malloc_overhead += used_size - claimed_size;
|
|
989 }
|
|
990
|
|
991 return used_size;
|
|
992 }
|
|
993 #endif /* not MEMORY_USAGE_STATS */
|
|
994
|
|
995
|
|
996
|
|
997 /*--- free heap functions ----------------------------------------------*/
|
|
998
|
|
999 /* Frees a heap section, if the heap_section is completly free */
|
|
1000 static EMACS_INT
|
|
1001 free_heap_section (page_header *ph)
|
|
1002 {
|
3092
|
1003 EMACS_INT i;
|
|
1004 EMACS_INT removed = 0;
|
2720
|
1005 for (i = 0; i < N_HEAP_SECTIONS; i++)
|
|
1006 if (!removed)
|
|
1007 {
|
|
1008 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
|
|
1009 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
|
|
1010 {
|
|
1011 xfree_1 (HEAP_SECTION(i).real_start);
|
|
1012 #ifdef MEMORY_USAGE_STATS
|
|
1013 MC_MALLOCED_BYTES
|
|
1014 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1015 #endif
|
|
1016
|
|
1017 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1018
|
|
1019 removed = 1;
|
|
1020 }
|
|
1021 }
|
|
1022 else
|
|
1023 {
|
|
1024 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start;
|
|
1025 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size;
|
|
1026 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start;
|
|
1027 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages;
|
|
1028 }
|
|
1029
|
|
1030 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed;
|
|
1031
|
|
1032 return removed;
|
|
1033 }
|
|
1034
|
|
1035 /* Removes page from free list. */
|
|
1036 static void
|
|
1037 remove_page_from_free_list (page_header *ph)
|
|
1038 {
|
|
1039 remove_page_header_from_plh (ph, PH_PLH (ph));
|
|
1040 PH_PLH (ph) = 0;
|
|
1041 }
|
|
1042
|
|
1043
|
|
1044 /* Adds page to according free list. */
|
|
1045 static void
|
|
1046 add_page_to_free_list (page_header *ph)
|
|
1047 {
|
|
1048 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph)));
|
|
1049 add_page_header_to_plh (ph, PH_PLH (ph));
|
|
1050 }
|
|
1051
|
|
1052
|
|
1053 /* Merges two adjacent pages. */
|
|
1054 static page_header *
|
|
1055 merge_pages (page_header *first_ph, page_header *second_ph)
|
|
1056 {
|
|
1057 /* merge */
|
|
1058 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph);
|
|
1059 /* clean up left over page header */
|
|
1060 free_page_header (second_ph);
|
|
1061 /* update lookup table */
|
|
1062 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph));
|
|
1063
|
|
1064 return first_ph;
|
|
1065 }
|
|
1066
|
|
1067
|
|
1068 /* Checks if pages are adjacent, merges them, and adds merged page to
|
|
1069 free list */
|
|
1070 static void
|
|
1071 merge_into_free_list (page_header *ph)
|
|
1072 {
|
|
1073 /* check if you can coalesce adjacent pages */
|
|
1074 page_header *prev_ph =
|
|
1075 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1076 - PAGE_SIZE));
|
|
1077 page_header *succ_ph =
|
|
1078 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1079 + (PH_N_PAGES (ph) * PAGE_SIZE)));
|
|
1080 if (PH_ON_FREE_LIST_P (prev_ph))
|
|
1081 {
|
|
1082 remove_page_from_free_list (prev_ph);
|
|
1083 ph = merge_pages (prev_ph, ph);
|
|
1084 }
|
|
1085 if (PH_ON_FREE_LIST_P (succ_ph))
|
|
1086 {
|
|
1087 remove_page_from_free_list (succ_ph);
|
|
1088 ph = merge_pages (ph, succ_ph);
|
|
1089 }
|
|
1090 /* try to free heap_section, if the section is complete */
|
|
1091 if (!free_heap_section (ph))
|
|
1092 /* else add merged page to free list */
|
|
1093 add_page_to_free_list (ph);
|
|
1094 }
|
|
1095
|
|
1096
|
|
1097 /* Cuts given page header after n_pages, returns the first (cut) part, and
|
|
1098 puts the rest on the free list. */
|
|
1099 static page_header *
|
|
1100 split_page (page_header *ph, EMACS_INT n_pages)
|
|
1101 {
|
|
1102 page_header *new_ph;
|
|
1103 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages;
|
|
1104
|
|
1105 /* remove the page from the free list if already hooked in */
|
|
1106 if (PH_PLH (ph))
|
|
1107 remove_page_from_free_list (ph);
|
|
1108 /* set new number of pages */
|
|
1109 PH_N_PAGES (ph) = n_pages;
|
|
1110 /* add new page to lookup table */
|
|
1111 add_pages_to_lookup_table (ph, n_pages);
|
|
1112
|
|
1113 if (rem_pages)
|
|
1114 {
|
|
1115 /* build new page with reminder */
|
|
1116 new_ph = alloc_page_header ();
|
|
1117 PH_N_PAGES (new_ph) = rem_pages;
|
|
1118 PH_HEAP_SPACE (new_ph) =
|
|
1119 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE));
|
|
1120 /* add new page to lookup table */
|
|
1121 add_pages_to_lookup_table (new_ph, rem_pages);
|
|
1122 /* hook the rest into free list */
|
|
1123 add_page_to_free_list (new_ph);
|
|
1124 }
|
|
1125 return ph;
|
|
1126 }
|
|
1127
|
|
1128
|
|
1129 /* Expands the heap by given number of pages. */
|
|
1130 static page_header *
|
|
1131 expand_heap (EMACS_INT needed_pages)
|
|
1132 {
|
|
1133 page_header *ph;
|
|
1134 EMACS_INT n_pages;
|
|
1135 size_t real_size;
|
|
1136 void *real_start;
|
|
1137
|
|
1138 /* determine number of pages the heap should grow */
|
|
1139 n_pages = needed_pages + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR));
|
|
1140 if (n_pages < MIN_HEAP_INCREASE)
|
|
1141 n_pages = MIN_HEAP_INCREASE;
|
|
1142
|
|
1143 /* get the real values */
|
|
1144 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE;
|
|
1145 real_start = xmalloc_and_zero (real_size);
|
|
1146 #ifdef MEMORY_USAGE_STATS
|
|
1147 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0);
|
|
1148 #endif
|
|
1149
|
|
1150 /* maintain heap section count */
|
|
1151 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS)
|
|
1152 {
|
|
1153 stderr_out ("Increase number of MAX_HEAP_SECTS");
|
|
1154 ABORT ();
|
|
1155 }
|
|
1156 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start;
|
|
1157 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size;
|
|
1158 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1159 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages;
|
|
1160 N_HEAP_SECTIONS ++;
|
|
1161
|
|
1162 /* get page header */
|
|
1163 ph = alloc_page_header ();
|
|
1164
|
|
1165 /* setup page header */
|
|
1166 PH_N_PAGES (ph) = n_pages;
|
|
1167 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1168 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0);
|
|
1169 HEAP_SIZE += n_pages * PAGE_SIZE;
|
|
1170
|
|
1171 /* this was also done by allocate_lisp_storage */
|
|
1172 if (need_to_check_c_alloca)
|
|
1173 xemacs_c_alloca (0);
|
|
1174
|
|
1175 /* return needed size, put rest on free list */
|
|
1176 return split_page (ph, needed_pages);
|
|
1177 }
|
|
1178
|
|
1179
|
|
1180
|
|
1181
|
|
1182 /*--- used heap functions ----------------------------------------------*/
|
|
1183 /* Installs initial free list. */
|
|
1184 static void
|
3092
|
1185 install_cell_free_list (page_header *ph, EMACS_INT elemcount)
|
2720
|
1186 {
|
3092
|
1187 Rawbyte *p;
|
|
1188 EMACS_INT i;
|
2720
|
1189 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
1190 /* write initial free list if cell_size is < PAGE_SIZE */
|
3092
|
1191 p = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1192 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
|
|
1193 {
|
|
1194 #ifdef ERROR_CHECK_GC
|
|
1195 assert (!LRECORD_FREE_P (p));
|
|
1196 MARK_LRECORD_AS_FREE (p);
|
|
1197 #endif
|
3092
|
1198 if (elemcount == 1)
|
|
1199 NEXT_FREE (p) = FREE_LIST (p + cell_size);
|
2720
|
1200 set_lookup_table (p, ph);
|
3092
|
1201 p += cell_size;
|
2720
|
1202 }
|
|
1203 #ifdef ERROR_CHECK_GC
|
|
1204 assert (!LRECORD_FREE_P (p));
|
|
1205 MARK_LRECORD_AS_FREE (p);
|
|
1206 #endif
|
|
1207 NEXT_FREE (p) = 0;
|
|
1208 set_lookup_table (p, ph);
|
|
1209
|
|
1210 /* hook free list into header */
|
|
1211 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph));
|
|
1212 }
|
|
1213
|
|
1214
|
|
1215 /* Cleans the object space of the given page_header. */
|
|
1216 static void
|
|
1217 remove_cell_free_list (page_header *ph)
|
|
1218 {
|
|
1219 #if ZERO_MEM
|
|
1220 ZERO_HEAP_SPACE (ph);
|
|
1221 #endif
|
|
1222 PH_FREE_LIST (ph) = 0;
|
|
1223 }
|
|
1224
|
|
1225
|
|
1226 /* Installs a new page and hooks it into given page_list_header. */
|
|
1227 static page_header *
|
|
1228 install_page_in_used_list (page_header *ph, page_list_header *plh,
|
3092
|
1229 size_t size, EMACS_INT elemcount)
|
2720
|
1230 {
|
|
1231 /* add to list */
|
|
1232 add_page_header_to_plh (ph, plh);
|
|
1233
|
|
1234 /* determine cell size */
|
|
1235 if (PLH_SIZE (plh))
|
|
1236 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
|
|
1237 else
|
|
1238 PH_CELL_SIZE (ph) = size;
|
3092
|
1239 if (elemcount == 1)
|
|
1240 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
|
|
1241 else
|
|
1242 {
|
|
1243 PH_CELLS_ON_PAGE (ph) = elemcount;
|
|
1244 PH_ARRAY_BIT (ph) = 1;
|
|
1245 }
|
2720
|
1246
|
|
1247 /* init cell count */
|
|
1248 PH_CELLS_USED (ph) = 0;
|
|
1249
|
|
1250 /* install mark bits and initialize cell free list */
|
3092
|
1251 install_mark_bits (ph);
|
2720
|
1252
|
3092
|
1253 install_cell_free_list (ph, elemcount);
|
2720
|
1254
|
|
1255 #ifdef MEMORY_USAGE_STATS
|
|
1256 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
|
|
1257 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
|
|
1258 #endif
|
|
1259
|
|
1260 return ph;
|
|
1261 }
|
|
1262
|
|
1263
|
|
1264 /* Cleans and frees a page, identified by the given page_header. */
|
|
1265 static void
|
|
1266 remove_page_from_used_list (page_header *ph)
|
|
1267 {
|
|
1268 page_list_header *plh = PH_PLH (ph);
|
|
1269
|
3092
|
1270 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT();
|
|
1271 /* cleanup: remove memory protection, zero page_header bits. */
|
|
1272
|
2720
|
1273 #ifdef MEMORY_USAGE_STATS
|
|
1274 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
|
|
1275 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
|
|
1276 #endif
|
|
1277
|
|
1278 /* clean up mark bits and cell free list */
|
|
1279 remove_cell_free_list (ph);
|
|
1280 if (PH_ON_USED_LIST_P (ph))
|
|
1281 remove_mark_bits (ph);
|
|
1282
|
|
1283 /* clean up page header */
|
|
1284 PH_CELL_SIZE (ph) = 0;
|
|
1285 PH_CELLS_ON_PAGE (ph) = 0;
|
|
1286 PH_CELLS_USED (ph) = 0;
|
|
1287
|
|
1288 /* remove from used list */
|
|
1289 remove_page_header_from_plh (ph, plh);
|
|
1290
|
|
1291 /* move to free list */
|
|
1292 merge_into_free_list (ph);
|
|
1293 }
|
|
1294
|
|
1295
|
|
1296
|
|
1297
|
|
1298 /*--- allocation -------------------------------------------------------*/
|
|
1299
|
|
1300 /* Allocates from cell free list on already allocated pages. */
|
|
1301 static page_header *
|
|
1302 allocate_cell (page_list_header *plh)
|
|
1303 {
|
|
1304 page_header *ph = PLH_FIRST (plh);
|
|
1305 if (ph)
|
|
1306 {
|
|
1307 if (PH_FREE_LIST (ph))
|
|
1308 /* elements free on first page */
|
|
1309 return ph;
|
|
1310 else if ((PH_NEXT (ph))
|
|
1311 && (PH_FREE_LIST (PH_NEXT (ph))))
|
|
1312 /* elements free on second page */
|
|
1313 {
|
|
1314 page_header *temp = PH_NEXT (ph);
|
|
1315 /* move full page (first page) to end of list */
|
|
1316 PH_NEXT (PLH_LAST (plh)) = ph;
|
|
1317 PH_PREV (ph) = PLH_LAST (plh);
|
|
1318 PLH_LAST (plh) = ph;
|
|
1319 PH_NEXT (ph) = 0;
|
|
1320 /* install second page as first page */
|
|
1321 ph = temp;
|
|
1322 PH_PREV (ph) = 0;
|
|
1323 PLH_FIRST (plh) = ph;
|
|
1324 return ph;
|
|
1325 }
|
|
1326 }
|
|
1327 return 0;
|
|
1328 }
|
|
1329
|
|
1330
|
|
1331 /* Finds a page which has at least the needed number of pages.
|
|
1332 Algorithm: FIRST FIT. */
|
|
1333 static page_header *
|
|
1334 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph)
|
|
1335 {
|
|
1336 while (ph)
|
|
1337 {
|
|
1338 if (PH_N_PAGES (ph) >= needed_pages)
|
|
1339 return ph;
|
|
1340 ph = PH_NEXT (ph);
|
|
1341 }
|
|
1342 return 0;
|
|
1343 }
|
|
1344
|
|
1345
|
|
1346 /* Allocates a page from the free list. */
|
|
1347 static page_header *
|
|
1348 allocate_page_from_free_list (EMACS_INT needed_pages)
|
|
1349 {
|
|
1350 page_header *ph = 0;
|
3092
|
1351 EMACS_INT i;
|
2720
|
1352 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
|
|
1353 if ((ph = find_free_page_first_fit (needed_pages,
|
|
1354 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
|
|
1355 {
|
|
1356 if (PH_N_PAGES (ph) > needed_pages)
|
|
1357 return split_page (ph, needed_pages);
|
|
1358 else
|
|
1359 {
|
|
1360 remove_page_from_free_list (ph);
|
|
1361 return ph;
|
|
1362 }
|
|
1363 }
|
|
1364 return 0;
|
|
1365 }
|
|
1366
|
|
1367
|
|
1368 /* Allocates a new page, either from free list or by expanding the heap. */
|
|
1369 static page_header *
|
3092
|
1370 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount)
|
2720
|
1371 {
|
3092
|
1372 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount);
|
2720
|
1373 /* first check free list */
|
|
1374 page_header *result = allocate_page_from_free_list (needed_pages);
|
|
1375 if (!result)
|
|
1376 /* expand heap */
|
|
1377 result = expand_heap (needed_pages);
|
3092
|
1378 install_page_in_used_list (result, plh, size, elemcount);
|
2720
|
1379 return result;
|
|
1380 }
|
|
1381
|
|
1382
|
|
1383 /* Selects the correct size class, tries to allocate a cell of this size
|
|
1384 from the free list, if this fails, a new page is allocated. */
|
|
1385 static void *
|
3092
|
1386 mc_alloc_1 (size_t size, EMACS_INT elemcount)
|
2720
|
1387 {
|
|
1388 page_list_header *plh = 0;
|
2723
|
1389 page_header *ph = 0;
|
|
1390 void *result = 0;
|
|
1391
|
3092
|
1392 plh = USED_HEAP_PAGES (get_used_list_index (size));
|
2720
|
1393
|
|
1394 if (size == 0)
|
|
1395 return 0;
|
3092
|
1396 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2))
|
2720
|
1397 /* first check any free cells */
|
|
1398 ph = allocate_cell (plh);
|
|
1399 if (!ph)
|
|
1400 /* allocate a new page */
|
3092
|
1401 ph = allocate_new_page (plh, size, elemcount);
|
2720
|
1402
|
|
1403 /* return first element of free list and remove it from the list */
|
|
1404 result = (void*) PH_FREE_LIST (ph);
|
|
1405 PH_FREE_LIST (ph) =
|
|
1406 NEXT_FREE (PH_FREE_LIST (ph));
|
|
1407
|
3092
|
1408 memset (result, '\0', (size * elemcount));
|
|
1409 MARK_LRECORD_AS_FREE (result);
|
2720
|
1410
|
|
1411 /* bump used cells counter */
|
3092
|
1412 PH_CELLS_USED (ph) += elemcount;
|
2720
|
1413
|
|
1414 #ifdef MEMORY_USAGE_STATS
|
3092
|
1415 PLH_USED_CELLS (plh) += elemcount;
|
|
1416 PLH_USED_SPACE (plh) += size * elemcount;
|
2720
|
1417 #endif
|
|
1418
|
|
1419 return result;
|
|
1420 }
|
|
1421
|
3092
|
1422 /* Array allocation. */
|
|
1423 void *
|
|
1424 mc_alloc_array (size_t size, EMACS_INT elemcount)
|
|
1425 {
|
|
1426 return mc_alloc_1 (size, elemcount);
|
|
1427 }
|
|
1428
|
2720
|
1429 void *
|
|
1430 mc_alloc (size_t size)
|
|
1431 {
|
|
1432 return mc_alloc_1 (size, 1);
|
|
1433 }
|
|
1434
|
|
1435
|
|
1436
|
|
1437 /*--- sweep & free & finalize-------------------------------------------*/
|
|
1438
|
|
1439 /* Frees a heap pointer. */
|
|
1440 static void
|
|
1441 remove_cell (void *ptr, page_header *ph)
|
|
1442 {
|
|
1443 #ifdef MEMORY_USAGE_STATS
|
|
1444 PLH_USED_CELLS (PH_PLH (ph))--;
|
|
1445 if (PH_ON_USED_LIST_P (ph))
|
|
1446 PLH_USED_SPACE (PH_PLH (ph)) -=
|
|
1447 detagged_lisp_object_size ((const struct lrecord_header *) ptr);
|
|
1448 else
|
|
1449 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph);
|
|
1450 #endif
|
2775
|
1451 if (PH_ON_USED_LIST_P (ph))
|
|
1452 {
|
2994
|
1453 #ifdef ALLOC_TYPE_STATS
|
2775
|
1454 dec_lrecord_stats (PH_CELL_SIZE (ph),
|
|
1455 (const struct lrecord_header *) ptr);
|
2994
|
1456 #endif /* ALLOC_TYPE_STATS */
|
2775
|
1457 #ifdef ERROR_CHECK_GC
|
|
1458 assert (!LRECORD_FREE_P (ptr));
|
|
1459 deadbeef_memory (ptr, PH_CELL_SIZE (ph));
|
|
1460 MARK_LRECORD_AS_FREE (ptr);
|
2720
|
1461 #endif
|
2775
|
1462 }
|
2720
|
1463
|
|
1464 /* hooks cell into free list */
|
|
1465 NEXT_FREE (ptr) = PH_FREE_LIST (ph);
|
|
1466 PH_FREE_LIST (ph) = FREE_LIST (ptr);
|
|
1467 /* decrease cells used */
|
|
1468 PH_CELLS_USED (ph)--;
|
|
1469 }
|
|
1470
|
|
1471
|
|
1472 /* Mark free list marks all free list entries. */
|
|
1473 static void
|
|
1474 mark_free_list (page_header *ph)
|
|
1475 {
|
|
1476 free_link *fl = PH_FREE_LIST (ph);
|
|
1477 while (fl)
|
|
1478 {
|
3092
|
1479 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK);
|
2720
|
1480 fl = NEXT_FREE (fl);
|
|
1481 }
|
|
1482 }
|
|
1483
|
|
1484
|
|
1485 /* Finalize a page for disksave. XEmacs calls this routine before it
|
|
1486 dumps the heap image. You have to tell mc-alloc how to call your
|
|
1487 object's finalizer for disksave. Therefore, you have to define the
|
|
1488 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should
|
|
1489 do nothing else then test if there is a finalizer and call it on
|
3303
|
1490 the given argument, which is the heap address of the object.
|
|
1491 Returns number of processed pages. */
|
|
1492 static EMACS_INT
|
2720
|
1493 finalize_page_for_disksave (page_header *ph)
|
|
1494 {
|
|
1495 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1496 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1497 EMACS_INT mark_bit = 0;
|
|
1498 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1499
|
|
1500 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1501 {
|
|
1502 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1503 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
|
|
1504 }
|
3303
|
1505 return 1;
|
2720
|
1506 }
|
|
1507
|
|
1508
|
3303
|
1509 /* Finalizes the heap for disksave. Returns number of processed
|
|
1510 pages. */
|
|
1511 EMACS_INT
|
2720
|
1512 mc_finalize_for_disksave (void)
|
|
1513 {
|
3303
|
1514 return visit_all_used_page_headers (finalize_page_for_disksave);
|
2720
|
1515 }
|
|
1516
|
|
1517
|
3303
|
1518 /* Sweeps a page: all the non-marked cells are freed. If the page is
|
|
1519 empty in the end, it is removed. If some cells are free, it is
|
|
1520 moved to the front of its page header list. Full pages stay where
|
|
1521 they are. Returns number of processed pages.*/
|
|
1522 static EMACS_INT
|
2720
|
1523 sweep_page (page_header *ph)
|
|
1524 {
|
3092
|
1525 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
2720
|
1526 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1527 EMACS_INT mark_bit = 0;
|
|
1528 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
3092
|
1529 unsigned int bit = 0;
|
2720
|
1530
|
|
1531 mark_free_list (ph);
|
|
1532
|
3092
|
1533 /* ARRAY_BIT_HACK */
|
|
1534 if (PH_ARRAY_BIT (ph))
|
|
1535 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1536 {
|
|
1537 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1538 if (bit)
|
|
1539 {
|
|
1540 zero_mark_bits (ph);
|
|
1541 PH_BLACK_BIT (ph) = 0;
|
3303
|
1542 return 1;
|
3092
|
1543 }
|
|
1544 }
|
|
1545
|
2720
|
1546 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1547 {
|
3092
|
1548 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1549 if (bit == WHITE)
|
2720
|
1550 {
|
3092
|
1551 GC_STAT_FREED;
|
2720
|
1552 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
|
|
1553 }
|
|
1554 }
|
|
1555 zero_mark_bits (ph);
|
3092
|
1556 PH_BLACK_BIT (ph) = 0;
|
2720
|
1557 if (PH_CELLS_USED (ph) == 0)
|
|
1558 remove_page_from_used_list (ph);
|
|
1559 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1560 move_page_header_to_front (ph);
|
3303
|
1561
|
|
1562 return 1;
|
2720
|
1563 }
|
|
1564
|
|
1565
|
3303
|
1566 /* Sweeps the heap. Returns number of processed pages. */
|
|
1567 EMACS_INT
|
2720
|
1568 mc_sweep (void)
|
|
1569 {
|
3303
|
1570 return visit_all_used_page_headers (sweep_page);
|
2720
|
1571 }
|
|
1572
|
|
1573
|
|
1574 /* Frees the cell pointed to by ptr. */
|
|
1575 void
|
3263
|
1576 mc_free (void *UNUSED (ptr))
|
2720
|
1577 {
|
3263
|
1578 /* Manual frees are not allowed with asynchronous finalization */
|
|
1579 return;
|
2720
|
1580 }
|
|
1581
|
|
1582
|
|
1583 /* Changes the size of the cell pointed to by ptr.
|
|
1584 Returns the new address of the new cell with new size. */
|
|
1585 void *
|
3092
|
1586 mc_realloc_1 (void *ptr, size_t size, int elemcount)
|
2720
|
1587 {
|
|
1588 if (ptr)
|
|
1589 {
|
3092
|
1590 if (size * elemcount)
|
2720
|
1591 {
|
3092
|
1592 void *result = mc_alloc_1 (size, elemcount);
|
2720
|
1593 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
|
3092
|
1594 size_t cpy_size = size * elemcount;
|
|
1595 if (cpy_size > from_size)
|
2720
|
1596 cpy_size = from_size;
|
|
1597 memcpy (result, ptr, cpy_size);
|
3092
|
1598 #ifdef ALLOC_TYPE_STATS
|
|
1599 inc_lrecord_stats (size, (struct lrecord_header *) result);
|
|
1600 #endif /* not ALLOC_TYPE_STATS */
|
|
1601 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1602 return result;
|
|
1603 }
|
|
1604 else
|
|
1605 {
|
3092
|
1606 /* mc_free (ptr); not needed, will be collected next gc */
|
2720
|
1607 return 0;
|
|
1608 }
|
|
1609 }
|
|
1610 else
|
3092
|
1611 return mc_alloc_1 (size, elemcount);
|
2720
|
1612 }
|
|
1613
|
|
1614 void *
|
|
1615 mc_realloc (void *ptr, size_t size)
|
|
1616 {
|
|
1617 return mc_realloc_1 (ptr, size, 1);
|
|
1618 }
|
|
1619
|
|
1620 void *
|
3092
|
1621 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount)
|
2720
|
1622 {
|
3092
|
1623 return mc_realloc_1 (ptr, size, elemcount);
|
2720
|
1624 }
|
|
1625
|
|
1626
|
|
1627
|
|
1628 /*--- initialization ---------------------------------------------------*/
|
|
1629
|
|
1630 /* Call once at the very beginning. */
|
|
1631 void
|
|
1632 init_mc_allocator (void)
|
|
1633 {
|
3092
|
1634 EMACS_INT i;
|
|
1635
|
|
1636 #ifdef MEMORY_USAGE_STATS
|
|
1637 MC_MALLOCED_BYTES = 0;
|
|
1638 #endif
|
2720
|
1639
|
3092
|
1640 /* init of pagesize dependent values */
|
|
1641 switch (SYS_PAGE_SIZE)
|
|
1642 {
|
|
1643 case 512: log_page_size = 9; break;
|
|
1644 case 1024: log_page_size = 10; break;
|
|
1645 case 2048: log_page_size = 11; break;
|
|
1646 case 4096: log_page_size = 12; break;
|
|
1647 case 8192: log_page_size = 13; break;
|
|
1648 case 16384: log_page_size = 14; break;
|
3212
|
1649 case 32768: log_page_size = 15; break;
|
|
1650 case 65536: log_page_size = 16; break;
|
|
1651 default:
|
|
1652 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n",
|
|
1653 SYS_PAGE_SIZE);
|
|
1654 ABORT ();
|
3092
|
1655 }
|
|
1656
|
|
1657 page_size_div_2 = (EMACS_INT) SYS_PAGE_SIZE >> 1;
|
|
1658
|
|
1659 mc_allocator_globals.used_heap_pages =
|
|
1660 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1)
|
|
1661 * sizeof (page_list_header));
|
|
1662 #ifdef MEMORY_USAGE_STATS
|
|
1663 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header);
|
|
1664 #endif
|
|
1665
|
|
1666 mc_allocator_globals.ptr_lookup_table =
|
|
1667 (level_2_lookup_tree **)
|
|
1668 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *));
|
|
1669 #ifdef MEMORY_USAGE_STATS
|
|
1670 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *);
|
|
1671 #endif
|
|
1672
|
|
1673 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
|
|
1674 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype);
|
|
1675 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2932
|
1676
|
2720
|
1677 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1678 {
|
|
1679 page_list_header *plh = USED_HEAP_PAGES (i);
|
|
1680 PLH_LIST_TYPE (plh) = USED_LIST;
|
|
1681 PLH_SIZE (plh) = get_used_list_size_value (i);
|
|
1682 PLH_FIRST (plh) = 0;
|
|
1683 PLH_LAST (plh) = 0;
|
|
1684 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1685 #ifdef MEMORY_USAGE_STATS
|
|
1686 PLH_PAGE_COUNT (plh) = 0;
|
|
1687 PLH_USED_CELLS (plh) = 0;
|
|
1688 PLH_USED_SPACE (plh) = 0;
|
|
1689 PLH_TOTAL_CELLS (plh) = 0;
|
|
1690 PLH_TOTAL_SPACE (plh) = 0;
|
|
1691 #endif
|
|
1692 }
|
|
1693
|
|
1694 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1695 {
|
|
1696 page_list_header *plh = FREE_HEAP_PAGES (i);
|
|
1697 PLH_LIST_TYPE (plh) = FREE_LIST;
|
|
1698 PLH_SIZE (plh) = get_free_list_size_value (i);
|
|
1699 PLH_FIRST (plh) = 0;
|
|
1700 PLH_LAST (plh) = 0;
|
|
1701 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1702 #ifdef MEMORY_USAGE_STATS
|
|
1703 PLH_PAGE_COUNT (plh) = 0;
|
|
1704 PLH_USED_CELLS (plh) = 0;
|
|
1705 PLH_USED_SPACE (plh) = 0;
|
|
1706 PLH_TOTAL_CELLS (plh) = 0;
|
|
1707 PLH_TOTAL_SPACE (plh) = 0;
|
|
1708 #endif
|
|
1709 }
|
|
1710
|
3092
|
1711 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
|
2720
|
1712 PAGE_HEADER_FREE_LIST = 0;
|
3092
|
1713 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
|
2720
|
1714
|
|
1715 #ifdef MEMORY_USAGE_STATS
|
3092
|
1716 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals);
|
2720
|
1717 #endif
|
|
1718
|
|
1719 init_lookup_table ();
|
|
1720 }
|
|
1721
|
|
1722
|
|
1723
|
|
1724
|
|
1725 /*--- lisp function for statistics -------------------------------------*/
|
|
1726
|
|
1727 #ifdef MEMORY_USAGE_STATS
|
|
1728 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /*
|
|
1729 Returns stats about the mc-alloc memory usage. See diagnose.el.
|
|
1730 */
|
|
1731 ())
|
|
1732 {
|
|
1733 Lisp_Object free_plhs = Qnil;
|
|
1734 Lisp_Object used_plhs = Qnil;
|
|
1735 Lisp_Object heap_sects = Qnil;
|
3092
|
1736 EMACS_INT used_size = 0;
|
|
1737 EMACS_INT real_size = 0;
|
2720
|
1738
|
3092
|
1739 EMACS_INT i;
|
2720
|
1740
|
|
1741 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1742 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
|
|
1743 free_plhs =
|
|
1744 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
|
|
1745 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
|
|
1746 free_plhs);
|
|
1747
|
|
1748 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1749 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
|
|
1750 used_plhs =
|
|
1751 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
|
|
1752 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))),
|
|
1753 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))),
|
|
1754 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))),
|
|
1755 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))),
|
|
1756 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))),
|
|
1757 used_plhs);
|
|
1758
|
|
1759 for (i = 0; i < N_HEAP_SECTIONS; i++) {
|
|
1760 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE;
|
|
1761 real_size +=
|
|
1762 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1763 }
|
|
1764
|
|
1765 heap_sects =
|
|
1766 list3 (make_int (N_HEAP_SECTIONS),
|
|
1767 make_int (used_size),
|
|
1768 make_int (real_size));
|
|
1769
|
|
1770 return Fcons (make_int (PAGE_SIZE),
|
3092
|
1771 list5 (heap_sects,
|
2720
|
1772 Fnreverse (used_plhs),
|
|
1773 Fnreverse (free_plhs),
|
|
1774 make_int (sizeof (mc_allocator_globals)),
|
|
1775 make_int (MC_MALLOCED_BYTES)));
|
|
1776 }
|
|
1777 #endif /* MEMORY_USAGE_STATS */
|
|
1778
|
|
1779 void
|
|
1780 syms_of_mc_alloc (void)
|
|
1781 {
|
|
1782 #ifdef MEMORY_USAGE_STATS
|
|
1783 DEFSUBR (Fmc_alloc_memory_usage);
|
|
1784 #endif /* MEMORY_USAGE_STATS */
|
|
1785 }
|
3092
|
1786
|
|
1787
|
|
1788 /*--- incremental garbage collector ----------------------------------*/
|
|
1789
|
|
1790 /* access dirty bit of page header */
|
|
1791 void
|
|
1792 set_dirty_bit (page_header *ph, unsigned int value)
|
|
1793 {
|
|
1794 PH_DIRTY_BIT (ph) = value;
|
|
1795 }
|
|
1796
|
|
1797 void
|
|
1798 set_dirty_bit_for_address (void *ptr, unsigned int value)
|
|
1799 {
|
|
1800 set_dirty_bit (get_page_header (ptr), value);
|
|
1801 }
|
|
1802
|
|
1803 unsigned int
|
|
1804 get_dirty_bit (page_header *ph)
|
|
1805 {
|
|
1806 return PH_DIRTY_BIT (ph);
|
|
1807 }
|
|
1808
|
|
1809 unsigned int
|
|
1810 get_dirty_bit_for_address (void *ptr)
|
|
1811 {
|
|
1812 return get_dirty_bit (get_page_header (ptr));
|
|
1813 }
|
|
1814
|
|
1815
|
|
1816 /* access protection bit of page header */
|
|
1817 void
|
|
1818 set_protection_bit (page_header *ph, unsigned int value)
|
|
1819 {
|
|
1820 PH_PROTECTION_BIT (ph) = value;
|
|
1821 }
|
|
1822
|
|
1823 void
|
|
1824 set_protection_bit_for_address (void *ptr, unsigned int value)
|
|
1825 {
|
|
1826 set_protection_bit (get_page_header (ptr), value);
|
|
1827 }
|
|
1828
|
|
1829 unsigned int
|
|
1830 get_protection_bit (page_header *ph)
|
|
1831 {
|
|
1832 return PH_PROTECTION_BIT (ph);
|
|
1833 }
|
|
1834
|
|
1835 unsigned int
|
|
1836 get_protection_bit_for_address (void *ptr)
|
|
1837 {
|
|
1838 return get_protection_bit (get_page_header (ptr));
|
|
1839 }
|
|
1840
|
|
1841
|
|
1842 /* Returns the start of the page of the object pointed to by ptr. */
|
|
1843 void *
|
|
1844 get_page_start (void *ptr)
|
|
1845 {
|
|
1846 return PH_HEAP_SPACE (get_page_header (ptr));
|
|
1847 }
|
|
1848
|
|
1849 /* Make PAGE_SIZE globally available. */
|
|
1850 EMACS_INT
|
|
1851 mc_get_page_size ()
|
|
1852 {
|
|
1853 return PAGE_SIZE;
|
|
1854 }
|
|
1855
|
|
1856 /* Is the fault at ptr on a protected page? */
|
|
1857 EMACS_INT
|
|
1858 fault_on_protected_page (void *ptr)
|
|
1859 {
|
|
1860 page_header *ph = get_page_header_internal (ptr);
|
|
1861 return (ph
|
|
1862 && PH_HEAP_SPACE (ph)
|
|
1863 && (PH_HEAP_SPACE (ph) <= ptr)
|
|
1864 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph)
|
|
1865 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr)
|
|
1866 && (PH_PROTECTION_BIT (ph) == 1));
|
|
1867 }
|
|
1868
|
|
1869
|
|
1870 /* Protect the heap page of given page header ph if black objects are
|
3303
|
1871 on the page. Returns number of processed pages. */
|
|
1872 static EMACS_INT
|
3092
|
1873 protect_heap_page (page_header *ph)
|
|
1874 {
|
|
1875 if (PH_BLACK_BIT (ph))
|
|
1876 {
|
|
1877 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1878 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1879 vdb_protect ((void *) heap_space, heap_space_size);
|
|
1880 PH_PROTECTION_BIT (ph) = 1;
|
3303
|
1881 return 1;
|
3092
|
1882 }
|
3303
|
1883 return 0;
|
3092
|
1884 }
|
|
1885
|
3303
|
1886 /* Protect all heap pages with black objects. Returns number of
|
|
1887 processed pages.*/
|
|
1888 EMACS_INT
|
3092
|
1889 protect_heap_pages (void)
|
|
1890 {
|
3303
|
1891 return visit_all_used_page_headers (protect_heap_page);
|
3092
|
1892 }
|
|
1893
|
|
1894
|
|
1895 /* Remove protection (if there) of heap page of given page header
|
3303
|
1896 ph. Returns number of processed pages. */
|
|
1897 static EMACS_INT
|
3092
|
1898 unprotect_heap_page (page_header *ph)
|
|
1899 {
|
|
1900 if (PH_PROTECTION_BIT (ph))
|
|
1901 {
|
|
1902 void *heap_space = PH_HEAP_SPACE (ph);
|
|
1903 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1904 vdb_unprotect (heap_space, heap_space_size);
|
|
1905 PH_PROTECTION_BIT (ph) = 0;
|
3303
|
1906 return 1;
|
3092
|
1907 }
|
3303
|
1908 return 0;
|
3092
|
1909 }
|
|
1910
|
3303
|
1911 /* Remove protection for all heap pages which are protected. Returns
|
|
1912 number of processed pages. */
|
|
1913 EMACS_INT
|
3092
|
1914 unprotect_heap_pages (void)
|
|
1915 {
|
3303
|
1916 return visit_all_used_page_headers (unprotect_heap_page);
|
3092
|
1917 }
|
|
1918
|
|
1919 /* Remove protection and mark page dirty. */
|
|
1920 void
|
|
1921 unprotect_page_and_mark_dirty (void *ptr)
|
|
1922 {
|
|
1923 page_header *ph = get_page_header (ptr);
|
|
1924 unprotect_heap_page (ph);
|
|
1925 PH_DIRTY_BIT (ph) = 1;
|
|
1926 }
|
|
1927
|
|
1928 /* Repush all objects on dirty pages onto the mark stack. */
|
|
1929 int
|
|
1930 repush_all_objects_on_page (void *ptr)
|
|
1931 {
|
|
1932 int repushed_objects = 0;
|
|
1933 page_header *ph = get_page_header (ptr);
|
|
1934 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
|
|
1935 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1936 EMACS_INT mark_bit = 0;
|
|
1937 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1938 unsigned int bit = 0;
|
|
1939 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1940 {
|
|
1941 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
|
|
1942 if (bit == BLACK)
|
|
1943 {
|
|
1944 repushed_objects++;
|
|
1945 gc_write_barrier
|
|
1946 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit))));
|
|
1947 }
|
|
1948 }
|
|
1949 PH_BLACK_BIT (ph) = 0;
|
|
1950 PH_DIRTY_BIT (ph) = 0;
|
|
1951 return repushed_objects;
|
|
1952 }
|
|
1953
|
|
1954 /* Mark black if object is currently grey. This first checks, if the
|
|
1955 object is really allocated on the mc-heap. If it is, it can be
|
|
1956 marked black; if it is not, it cannot be marked. */
|
|
1957 EMACS_INT
|
|
1958 maybe_mark_black (void *ptr)
|
|
1959 {
|
|
1960 page_header *ph = get_page_header_internal (ptr);
|
|
1961 unsigned int bit = 0;
|
|
1962
|
|
1963 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph))
|
|
1964 {
|
|
1965 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
1966 if (bit == GREY)
|
|
1967 {
|
|
1968 if (!PH_BLACK_BIT (ph))
|
|
1969 PH_BLACK_BIT (ph) = 1;
|
|
1970 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK);
|
|
1971 }
|
|
1972 return 1;
|
|
1973 }
|
|
1974 return 0;
|
|
1975 }
|
|
1976
|
|
1977 /* Only for debugging --- not used anywhere in the sources. */
|
|
1978 EMACS_INT
|
|
1979 object_on_heap_p (void *ptr)
|
|
1980 {
|
|
1981 page_header *ph = get_page_header_internal (ptr);
|
|
1982 return (ph && PH_ON_USED_LIST_P (ph));
|
|
1983 }
|