2720
|
1 /* New size-based allocator for XEmacs.
|
|
2 Copyright (C) 2005 Marcus Crestani.
|
|
3
|
|
4 This file is part of XEmacs.
|
|
5
|
|
6 XEmacs is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 2, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with XEmacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
19 Boston, MA 02111-1307, USA. */
|
|
20
|
|
21 /* Synched up with: Not in FSF. */
|
|
22
|
|
23 #include <config.h>
|
|
24 #include "lisp.h"
|
|
25 #include "mc-alloc.h"
|
|
26
|
|
27
|
|
28 /*--- configurable values ----------------------------------------------*/
|
|
29
|
|
30 /* Valid page sizes are powers of 2. */
|
|
31 #undef PAGE_SIZE /* for FreeBSD */
|
|
32 #define PAGE_SIZE 2048
|
|
33
|
|
34
|
|
35 /* Definition of size classes */
|
|
36
|
|
37 /* Heap used list constants: In the used heap, it is important to
|
|
38 quickly find a free spot for a new object. Therefore the size
|
|
39 classes of the used heap are defined by the size of the cells on
|
|
40 the pages. The size classes should match common object sizes, to
|
|
41 avoid wasting memory. */
|
|
42
|
|
43 /* Minimum object size in bytes. */
|
|
44 #define USED_LIST_MIN_OBJECT_SIZE 8
|
|
45
|
|
46 /* The step size by which the size classes increase (up to upper
|
|
47 threshold). This many bytes are mapped to a single used list: */
|
|
48 #define USED_LIST_LIN_STEP 4
|
|
49
|
|
50 /* The upper threshold should always be set to PAGE_SIZE/2, because if
|
|
51 a object is larger than PAGE_SIZE/2 there is no room for any other
|
|
52 object on this page. Objects this big are kept in the page list of
|
|
53 the multiple pages, since a quick search for free spots is not
|
|
54 needed for this kind of pages (because there are no free spots).
|
|
55 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
|
|
56 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
|
|
57
|
|
58
|
|
59 /* Unmanaged memory used list constants: Like in the used heap, it is
|
|
60 important to quickly find a free spot for a new object. Therefore
|
|
61 the size classes of the unmanaged heap are defined by the size of
|
|
62 the cells on the pages. The size classes should match common object
|
|
63 sizes, to avoid wasting memory. */
|
|
64 /* Minimum object size in bytes. */
|
|
65 #define UNMANAGED_LIST_MIN_OBJECT_SIZE 8
|
|
66 /* The step size by which the size classes increase (up to upper
|
|
67 threshold). This many bytes are mapped to a single unmanaged list: */
|
|
68 #define UNMANAGED_LIST_LIN_STEP 4
|
|
69 /* The upper threshold should always be set to PAGE_SIZE/2, because if
|
|
70 a object is larger than PAGE_SIZE/2 there is no room for any other
|
|
71 object on this page. Objects this big are kept in the page list of
|
|
72 the multiple pages, since a quick search for free spots is not
|
|
73 needed for this kind of pages (because there are no free spots).
|
|
74 PAGE_SIZES defines maximum size of a unmanaged space list. */
|
|
75 #define UNMANAGED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
|
|
76
|
|
77
|
|
78 /* Heap free list constants: In the unused heap, the size of
|
|
79 consecutive memory tips the scales. A page is smallest entity which
|
|
80 is asked for. Therefore, the size classes of the unused heap are
|
|
81 defined by the number of consecutive pages. */
|
|
82 /* Sizes up to this many pages each have their own free list. */
|
|
83 #define FREE_LIST_LOWER_THRESHOLD 32
|
|
84 /* The step size by which the size classes increase (up to upper
|
|
85 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a
|
|
86 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and
|
|
87 FREE_LIST_UPPER_THRESHOLD. */
|
|
88 #define FREE_LIST_LIN_STEP 8
|
|
89 /* Sizes of at least this many pages are mapped to a single free
|
|
90 list. Blocks of memory larger than this number are all kept in a
|
|
91 single list, which makes searching this list slow. But objects that
|
|
92 big are really seldom. */
|
|
93 #define FREE_LIST_UPPER_THRESHOLD 256
|
|
94
|
|
95
|
|
96 /* Maximum number of separately added heap sections. */
|
|
97 #if BITS_PER_EMACS_INT > 32
|
|
98 # define MAX_HEAP_SECTS 2048
|
|
99 #else
|
|
100 # define MAX_HEAP_SECTS 768
|
|
101 #endif
|
|
102
|
|
103
|
|
104 /* Heap growth constants. Heap increases by any number between the
|
|
105 boundaries (unit is PAGE_SIZE). */
|
|
106 #define MIN_HEAP_INCREASE 32
|
|
107 #define MAX_HEAP_INCREASE 256 /* not used */
|
|
108
|
|
109 /* Every heap growth is calculated like this:
|
|
110 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
|
|
111 So the growth of the heap is influenced by the current size of the
|
|
112 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE
|
|
113 boundaries.
|
|
114 This reduces the number of heap sectors, the larger the heap grows
|
|
115 the larger are the newly allocated chunks. */
|
|
116 #define HEAP_GROWTH_DIVISOR 3
|
|
117
|
|
118
|
|
119 /* Zero memory before putting on free lists. */
|
|
120 #define ZERO_MEM 1
|
|
121
|
|
122
|
|
123
|
|
124
|
|
125 /*--- calculations done by macros --------------------------------------*/
|
|
126
|
|
127 #ifndef CHAR_BIT /* should be included by limits.h */
|
|
128 # define CHAR_BIT BITS_PER_CHAR
|
|
129 #endif
|
|
130
|
|
131 #if PAGE_SIZE == 512
|
|
132 # define CPP_LOG_PAGE_SIZE 9
|
|
133 #endif
|
|
134 #if PAGE_SIZE == 1024
|
|
135 # define CPP_LOG_PAGE_SIZE 10
|
|
136 #endif
|
|
137 #if PAGE_SIZE == 2048
|
|
138 # define CPP_LOG_PAGE_SIZE 11
|
|
139 #endif
|
|
140 #if PAGE_SIZE == 4096
|
|
141 # define CPP_LOG_PAGE_SIZE 12
|
|
142 #endif
|
|
143 #if PAGE_SIZE == 8192
|
|
144 # define CPP_LOG_PAGE_SIZE 13
|
|
145 #endif
|
|
146 #if PAGE_SIZE == 16384
|
|
147 # define CPP_LOG_PAGE_SIZE 14
|
|
148 #endif
|
|
149 #ifndef CPP_LOG_PAGE_SIZE
|
|
150 --> fix PAGE_SIZE
|
|
151 #endif
|
|
152 #undef PAGE_SIZE
|
|
153 #define CPP_PAGE_SIZE (1 << CPP_LOG_PAGE_SIZE)
|
|
154 #define LOG_PAGE_SIZE ((EMACS_INT) CPP_LOG_PAGE_SIZE)
|
|
155 #define PAGE_SIZE ((EMACS_INT) CPP_PAGE_SIZE)
|
|
156 #define PAGE_SIZE_DIV_2 (PAGE_SIZE >> 1)
|
|
157
|
|
158
|
|
159 /* NOT USED ANYMORE */
|
|
160 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
|
|
161 /* used heap list logarithms */
|
|
162 #if USED_LIST_LOWER_THRESHOLD == 8
|
|
163 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 3
|
|
164 #endif
|
|
165 #if USED_LIST_LOWER_THRESHOLD == 16
|
|
166 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 4
|
|
167 #endif
|
|
168 #if USED_LIST_LOWER_THRESHOLD == 32
|
|
169 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 5
|
|
170 #endif
|
|
171 #if USED_LIST_LOWER_THRESHOLD == 64
|
|
172 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 6
|
|
173 #endif
|
|
174 #if USED_LIST_LOWER_THRESHOLD == 128
|
|
175 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 7
|
|
176 #endif
|
|
177 #if USED_LIST_LOWER_THRESHOLD == 256
|
|
178 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 8
|
|
179 #endif
|
|
180 #ifndef CPP_LOG_USED_LIST_LOWER_THRESHOLD
|
|
181 --> fix USED_LIST_LOWER_THRESHOLD
|
|
182 #endif
|
|
183 #define LOG_USED_LIST_LOWER_THRESHOLD CPP_LOG_USED_LIST_LOWER_THRESHOLD
|
|
184 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
|
|
185
|
|
186 /* used heap list count */
|
|
187 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
|
|
188 - USED_LIST_MIN_OBJECT_SIZE) \
|
|
189 / USED_LIST_LIN_STEP) + 1 ) + 1
|
|
190
|
|
191 /* unmanaged memory list count */
|
|
192 #define N_UNMANAGED_PAGE_LISTS (((UNMANAGED_LIST_UPPER_THRESHOLD \
|
|
193 - UNMANAGED_LIST_MIN_OBJECT_SIZE) \
|
|
194 / UNMANAGED_LIST_LIN_STEP) + 1 ) + 1
|
|
195
|
|
196 /* NOT USED ANYMORE */
|
|
197 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
|
|
198 #define N_USED_PAGE_LISTS_LIN (((USED_LIST_LOWER_THRESHOLD \
|
|
199 - USED_LIST_MIN_OBJECT_SIZE) \
|
|
200 / USED_LIST_LIN_STEP) + 1 )
|
|
201 #define N_USED_PAGE_LISTS_EXP \
|
|
202 (LOG_PAGE_SIZE - LOG_USED_LIST_LOWER_THRESHOLD)
|
|
203
|
|
204 #define N_USED_PAGE_LISTS \
|
|
205 (N_USED_PAGE_LISTS_LIN + N_USED_PAGE_LISTS_EXP + 1)
|
|
206 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
|
|
207
|
|
208 /* free heap list count */
|
|
209 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
|
|
210 - FREE_LIST_LOWER_THRESHOLD) \
|
|
211 / FREE_LIST_LIN_STEP) \
|
|
212 + FREE_LIST_LOWER_THRESHOLD)
|
|
213
|
|
214
|
|
215 /* Constants for heap address to page header mapping. */
|
|
216 #define LOG_LEVEL2_SIZE 10
|
|
217 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
|
|
218 #if BITS_PER_EMACS_INT > 32
|
|
219 # define USE_HASH_TABLE 1
|
|
220 # define LOG_LEVEL1_SIZE 11
|
|
221 #else
|
|
222 # define LOG_LEVEL1_SIZE \
|
|
223 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE)
|
|
224 #endif
|
|
225 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE)
|
|
226
|
|
227 #ifdef USE_HASH_TABLE
|
|
228 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1))
|
|
229 # define L1_INDEX(p) HASH ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
230 #else
|
|
231 # define L1_INDEX(p) ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
|
|
232 #endif
|
|
233 #define L2_INDEX(p) (((EMACS_INT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1))
|
|
234
|
|
235
|
|
236
|
|
237
|
|
238 /*--- structs and typedefs ---------------------------------------------*/
|
|
239
|
|
240 /* Links the free lists (mark_bit_free_list, page_header_free_list,
|
|
241 cell free list). */
|
|
242 typedef struct free_link
|
|
243 {
|
|
244 struct lrecord_header lheader;
|
|
245 struct free_link *next_free;
|
|
246 } free_link;
|
|
247
|
|
248
|
|
249 /* Header for pages. They are hold in a doubly linked list. */
|
|
250 typedef struct page_header
|
|
251 {
|
|
252 struct page_header *next; /* next page_header */
|
|
253 struct page_header *prev; /* previous page_header */
|
|
254 /* Field plh holds pointer to the according header of the page list.*/
|
|
255 struct page_list_header *plh; /* page list header */
|
|
256 free_link *free_list; /* links free cells on page */
|
|
257 EMACS_INT n_pages; /* number of pages */
|
|
258 EMACS_INT cell_size; /* size of cells on page */
|
|
259 EMACS_INT cells_on_page; /* total number of cells on page */
|
|
260 EMACS_INT cells_used; /* number of used cells on page */
|
|
261 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
|
|
262 the mark bits are put in an extra memory area. Then the field
|
|
263 mark_bits holds the pointer to this area. Is the number of
|
|
264 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
|
|
265 mark_bit EMACS_INT directly, without an additional indirection. */
|
|
266 char *mark_bits; /* pointer to mark bits */
|
|
267 void *heap_space; /* pointer to heap, where objects
|
|
268 are stored */
|
|
269 } page_header;
|
|
270
|
|
271
|
|
272 /* Different list types. */
|
|
273 enum list_type_enum {
|
|
274 USED_LIST,
|
|
275 UNMANAGED_LIST,
|
|
276 FREE_LIST
|
|
277 };
|
|
278
|
|
279
|
|
280 /* Header for page lists. Field list_type holds the type of the list. */
|
|
281 typedef struct page_list_header
|
|
282 {
|
|
283 enum list_type_enum list_type; /* the type of the list */
|
|
284 /* Size holds the size of one cell (in bytes) in a used heap list, or the
|
|
285 size of the heap sector (in number of pages). */
|
|
286 size_t size; /* size of one cell / heap sector */
|
|
287 page_header *first; /* first of page_header list */
|
|
288 page_header *last; /* last of page_header list */
|
|
289 /* If the number of objects on page is bigger than
|
|
290 BITS_PER_EMACS_INT, the mark bits are put in an extra memory
|
|
291 area, which is linked in this free list, if not used. Is the
|
|
292 number of objects smaller than BITS_PER_EMACS_INT, the mark bits
|
|
293 are hold in the mark bit EMACS_INT directly, without an
|
|
294 additional indirection. */
|
|
295 free_link *mark_bit_free_list;
|
|
296
|
|
297 #ifdef MEMORY_USAGE_STATS
|
|
298 EMACS_INT page_count; /* number if pages in list */
|
|
299 EMACS_INT used_cells; /* number of objects in list */
|
|
300 EMACS_INT used_space; /* used space */
|
|
301 EMACS_INT total_cells; /* number of available cells */
|
|
302 EMACS_INT total_space; /* available space */
|
|
303 #endif
|
|
304 } page_list_header;
|
|
305
|
|
306
|
|
307 /* The heap sectors are stored with their real start pointer and their
|
|
308 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */
|
|
309 typedef struct heap_sect {
|
|
310 void *real_start; /* real start pointer (NOT aligned) */
|
|
311 size_t real_size; /* NOT multiple of PAGE_SIZE */
|
|
312 void *start; /* aligned start pointer */
|
|
313 EMACS_INT n_pages; /* multiple of PAGE_SIZE */
|
|
314 } heap_sect;
|
|
315
|
|
316
|
|
317 /* 2nd tree level for mapping of heap addresses to page headers. */
|
|
318 typedef struct level_2_lookup_tree {
|
|
319 page_header *index[LEVEL2_SIZE]; /* link to page header */
|
|
320 EMACS_INT key; /* high order address bits */
|
|
321 #ifdef USE_HASH_TABLE
|
|
322 struct level_2_lookup_tree *hash_link; /* hash chain link */
|
|
323 #endif
|
|
324 } level_2_lookup_tree;
|
|
325
|
|
326
|
|
327
|
|
328 /*--- global variable definitions --------------------------------------*/
|
|
329
|
|
330 /* All global allocator variables are kept in this struct. */
|
|
331 typedef struct mc_allocator_globals_type {
|
|
332
|
|
333 /* heap size */
|
|
334 EMACS_INT heap_size;
|
|
335
|
|
336 /* list of all separatly allocated chunks of heap */
|
|
337 heap_sect heap_sections[MAX_HEAP_SECTS];
|
|
338 EMACS_INT n_heap_sections;
|
|
339
|
|
340 /* Holds all allocated pages, each object size class in its separate list,
|
|
341 to guarantee fast allocation on partially filled pages. */
|
|
342 page_list_header used_heap_pages[N_USED_PAGE_LISTS];
|
|
343
|
|
344 /* Holds all unmanaged pages. */
|
|
345 page_list_header unmanaged_heap_pages[N_UNMANAGED_PAGE_LISTS];
|
|
346
|
|
347 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
|
|
348 kept on the Nth free list. Contiguos pages are coalesced. */
|
|
349 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
|
|
350
|
|
351 /* ptr lookup table */
|
|
352 level_2_lookup_tree *ptr_lookup_table[LEVEL1_SIZE];
|
|
353
|
|
354 /* page header free list */
|
|
355 free_link *page_header_free_list;
|
|
356
|
|
357 #ifdef MEMORY_USAGE_STATS
|
|
358 EMACS_INT malloced_bytes;
|
|
359 #endif
|
|
360 } mc_allocator_globals_type;
|
|
361
|
|
362 mc_allocator_globals_type mc_allocator_globals;
|
|
363
|
|
364
|
|
365
|
|
366
|
|
367 /*--- macro accessors --------------------------------------------------*/
|
|
368
|
|
369 #define USED_HEAP_PAGES(i) \
|
|
370 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
|
|
371
|
|
372 #define UNMANAGED_HEAP_PAGES(i) \
|
|
373 ((page_list_header*) &mc_allocator_globals.unmanaged_heap_pages[i])
|
|
374
|
|
375 #define FREE_HEAP_PAGES(i) \
|
|
376 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
|
|
377
|
|
378 #define PLH(plh) plh
|
|
379 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type
|
|
380 # define PLH_SIZE(plh) PLH (plh)->size
|
|
381 # define PLH_FIRST(plh) PLH (plh)->first
|
|
382 # define PLH_LAST(plh) PLH (plh)->last
|
|
383 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list
|
|
384 #ifdef MEMORY_USAGE_STATS
|
|
385 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count
|
|
386 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells
|
|
387 # define PLH_USED_SPACE(plh) PLH (plh)->used_space
|
|
388 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells
|
|
389 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space
|
|
390 #endif
|
|
391
|
|
392 #define PH(ph) ph
|
|
393 # define PH_NEXT(ph) PH (ph)->next
|
|
394 # define PH_PREV(ph) PH (ph)->prev
|
|
395 # define PH_PLH(ph) PH (ph)->plh
|
|
396 # define PH_FREE_LIST(ph) PH (ph)->free_list
|
|
397 # define PH_N_PAGES(ph) PH (ph)->n_pages
|
|
398 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
|
|
399 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
|
|
400 # define PH_CELLS_USED(ph) PH (ph)->cells_used
|
|
401 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
|
|
402 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
|
|
403 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
|
|
404 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
|
|
405
|
|
406 #define HEAP_SIZE mc_allocator_globals.heap_size
|
|
407
|
|
408 #ifdef MEMORY_USAGE_STATS
|
|
409 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes
|
|
410 #endif
|
|
411
|
|
412 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
|
|
413 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
|
|
414
|
|
415 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
|
|
416
|
|
417 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
|
|
418 #define FREE_LIST(free_list) (free_link*) (free_list)
|
|
419
|
|
420 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
|
|
421 #define LEVEL2(l2, i) l2->index[i]
|
|
422 # define LEVEL2_KEY(l2) l2->key
|
|
423 #ifdef USE_HASH_TABLE
|
|
424 # define LEVEL2_HASH_LINK(l2) l2->hash_link
|
|
425 #endif
|
|
426
|
|
427 #if ZERO_MEM
|
|
428 # define ZERO_HEAP_SPACE(ph) \
|
|
429 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE)
|
|
430 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header))
|
|
431 #endif
|
|
432
|
|
433 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE)
|
|
434 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE)
|
|
435
|
|
436 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1))))
|
|
437
|
|
438 #define PAGE_SIZE_ALIGNMENT(address) \
|
|
439 (void *) ((((EMACS_INT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1))
|
|
440
|
|
441 #define PH_ON_FREE_LIST_P(ph) \
|
|
442 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
|
|
443
|
|
444 #define PH_ON_USED_LIST_P(ph) \
|
|
445 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
|
|
446
|
|
447 #define PH_ON_UNMANAGED_LIST_P(ph) \
|
|
448 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == UNMANAGED_LIST))
|
|
449
|
|
450
|
|
451
|
|
452
|
|
453 /************************************************************************/
|
|
454 /* MC Allocator */
|
|
455 /************************************************************************/
|
|
456
|
|
457
|
|
458 /* ###TODO### */
|
|
459 #if 1
|
|
460 # define ALLOC_MB_UNMANAGED 1
|
|
461 #endif
|
|
462
|
|
463
|
|
464 /*--- misc functions ---------------------------------------------------*/
|
|
465
|
|
466 /* moved here from alloc.c */
|
|
467 #ifdef ERROR_CHECK_GC
|
|
468 static void
|
|
469 deadbeef_memory (void *ptr, Bytecount size)
|
|
470 {
|
|
471 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr;
|
|
472 Bytecount beefs = size >> 2;
|
|
473
|
|
474 /* In practice, size will always be a multiple of four. */
|
|
475 while (beefs--)
|
|
476 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */
|
|
477 }
|
|
478 #endif /* ERROR_CHECK_GC */
|
|
479
|
|
480 /* Visits all pages (page_headers) hooked into the used heap pages
|
|
481 list and executes f with the current page header as
|
|
482 argument. Needed for sweep. */
|
|
483 static void
|
|
484 visit_all_used_page_headers (void (*f) (page_header *ph))
|
|
485 {
|
|
486 int i;
|
|
487 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
488 if (PLH_FIRST (USED_HEAP_PAGES (i)))
|
|
489 {
|
|
490 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
|
|
491 while (PH_NEXT (ph))
|
|
492 {
|
|
493 page_header *next = PH_NEXT (ph); /* in case f removes the page */
|
|
494 f (ph);
|
|
495 ph = next;
|
|
496 }
|
|
497 f (ph);
|
|
498 }
|
|
499 }
|
|
500
|
|
501
|
|
502
|
|
503
|
|
504 /*--- mapping of heap addresses to page headers and mark bits ----------*/
|
|
505
|
|
506 /* Sets a heap pointer and page header pair into the lookup table. */
|
|
507 static void
|
|
508 set_lookup_table (void *ptr, page_header *ph)
|
|
509 {
|
|
510 int l1_index = L1_INDEX (ptr);
|
|
511 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
512 #ifdef USE_HASH_TABLE
|
|
513 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
514 l2 = LEVEL2_HASH_LINK (l2);
|
|
515 #endif
|
|
516 if (!l2)
|
|
517 {
|
|
518 l2 = (level_2_lookup_tree*)
|
|
519 xmalloc_and_zero (sizeof (level_2_lookup_tree));
|
|
520 #ifdef MEMORY_USAGE_STATS
|
|
521 MC_MALLOCED_BYTES +=
|
|
522 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0);
|
|
523 #endif
|
|
524 memset (l2, 0, sizeof (level_2_lookup_tree));
|
|
525 #ifdef USE_HASH_TABLE
|
|
526 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index);
|
|
527 #endif
|
|
528 PTR_LOOKUP_TABLE (l1_index) = l2;
|
|
529 LEVEL2_KEY (l2) = l1_index;
|
|
530 }
|
|
531 LEVEL2 (l2, L2_INDEX (ptr)) = ph;
|
|
532 }
|
|
533
|
|
534
|
|
535 #ifdef UNSET_LOOKUP_TABLE
|
|
536 /* Set the lookup table to 0 for given heap address. */
|
|
537 static void
|
|
538 unset_lookup_table (void *ptr)
|
|
539 {
|
|
540 int l1_index = L1_INDEX (ptr);
|
|
541 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
542 #ifdef USE_HASH_TABLE
|
|
543 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
544 l2 = LEVEL2_HASH_LINK (l2);
|
|
545 #endif
|
|
546 if (l2) {
|
|
547 LEVEL2 (l2, L2_INDEX (ptr)) = 0;
|
|
548 }
|
|
549 }
|
|
550 #endif
|
|
551
|
|
552 /* Returns the page header of a given heap address, or 0 if not in table.
|
|
553 For internal use, no error checking. */
|
|
554 static page_header *
|
|
555 get_page_header_internal (void *ptr)
|
|
556 {
|
|
557 int l1_index = L1_INDEX (ptr);
|
|
558 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
|
559 #ifdef USE_HASH_TABLE
|
|
560 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
561 l2 = LEVEL2_HASH_LINK (l2);
|
|
562 #endif
|
|
563 if (!l2)
|
|
564 return 0;
|
|
565 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
566 }
|
|
567
|
|
568 /* Returns the page header of a given heap address, or 0 if not in table. */
|
|
569 static page_header *
|
|
570 get_page_header (void *ptr)
|
|
571 {
|
|
572 int l1_index = L1_INDEX (ptr);
|
|
573 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
|
2723
|
574 assert (l2);
|
2720
|
575 #ifdef USE_HASH_TABLE
|
|
576 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
|
|
577 l2 = LEVEL2_HASH_LINK (l2);
|
|
578 #endif
|
2723
|
579 assert (LEVEL2 (l2, L2_INDEX (ptr)));
|
2720
|
580 return LEVEL2 (l2, L2_INDEX (ptr));
|
|
581 }
|
|
582
|
|
583
|
|
584 /* Returns the mark bit index of a given heap address. */
|
|
585 static EMACS_INT
|
|
586 get_mark_bit_index (void *ptr, page_header *ph)
|
|
587 {
|
|
588 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
589 if (cell_size)
|
|
590 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size);
|
|
591 else /* only one object on page */
|
|
592 return 0;
|
|
593 }
|
|
594
|
|
595
|
|
596 /* Adds addresses of pages to lookup table. */
|
|
597 static void
|
|
598 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
|
|
599 {
|
|
600 char *p = (char*) PH_HEAP_SPACE (ph);
|
|
601 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
|
|
602 for (p = (char*) PH_HEAP_SPACE (ph);
|
|
603 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
|
|
604 set_lookup_table (p, ph);
|
|
605 }
|
|
606
|
|
607
|
|
608 /* Initializes lookup table. */
|
|
609 static void
|
|
610 init_lookup_table (void)
|
|
611 {
|
|
612 int i;
|
|
613 for (i = 0; i < LEVEL1_SIZE; i++)
|
|
614 PTR_LOOKUP_TABLE (i) = 0;
|
|
615 }
|
|
616
|
|
617
|
|
618
|
|
619
|
|
620 /*--- mark bits --------------------------------------------------------*/
|
|
621
|
|
622 /* Number of mark bits: minimum 1, maximum 8. */
|
|
623 #define N_MARK_BITS 1
|
|
624
|
|
625 /*--- bit operations --- */
|
|
626
|
|
627 /* Allocates a bit array of length bits. */
|
|
628 static char *
|
|
629 alloc_bit_array(size_t bits)
|
|
630 {
|
|
631 #ifdef ALLOC_MB_UNMANAGED
|
|
632 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char);
|
|
633 if (size < sizeof (free_link)) size = sizeof (free_link);
|
|
634 return (char *) mc_alloc_unmanaged (size);
|
|
635 #else /* not ALLOC_MB_UNMANAGED */
|
|
636 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char);
|
|
637 char *bit_array;
|
|
638 if (size < sizeof (free_link)) size = sizeof (free_link);
|
|
639 bit_array = (char*) xmalloc_and_zero (size);
|
|
640 #ifdef MEMORY_USAGE_STATS
|
|
641 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
|
|
642 #endif
|
|
643 return bit_array;
|
|
644 #endif /* not ALLOC_MB_UNMANAGED */
|
|
645 }
|
|
646
|
|
647
|
|
648 /* Returns the bit value at pos. */
|
|
649 static EMACS_INT
|
|
650 get_bit (char *bit_array, EMACS_INT pos)
|
|
651 {
|
|
652 #if N_MARK_BITS > 1
|
|
653 EMACS_INT result = 0;
|
|
654 EMACS_INT i;
|
|
655 #endif
|
|
656 bit_array += pos / CHAR_BIT;
|
|
657 #if N_MARK_BITS > 1
|
|
658 for (i = 0; i < N_MARK_BITS; i++)
|
|
659 result |= (*bit_array & (1 << ((pos + i) % CHAR_BIT)));
|
|
660 return result >> pos;
|
|
661 #else
|
|
662 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
|
|
663 #endif
|
|
664 }
|
|
665
|
|
666
|
|
667 /* Bit_Arrays bit at pos to val. */
|
|
668 static void
|
|
669 set_bit(char *bit_array, EMACS_INT pos, EMACS_INT val)
|
|
670 {
|
|
671 #if N_MARK_BITS > 1
|
|
672 EMACS_INT result = 0;
|
|
673 EMACS_INT i;
|
|
674 #endif
|
|
675 bit_array += pos / CHAR_BIT;
|
|
676 #if N_MARK_BITS > 1
|
|
677 for (i = 0; i < N_MARK_BITS; i++)
|
|
678 if ((val >> i) & 1)
|
|
679 *bit_array |= 1 << ((pos + i) % CHAR_BIT);
|
|
680 else
|
|
681 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT));
|
|
682 #else
|
|
683 if (val)
|
|
684 *bit_array |= 1 << (pos % CHAR_BIT);
|
|
685 else
|
|
686 *bit_array &= ~(1 << (pos % CHAR_BIT));
|
|
687 #endif
|
|
688 }
|
|
689
|
|
690
|
|
691 /*--- mark bit functions ---*/
|
|
692 #define USE_PNTR_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) > BITS_PER_EMACS_INT)
|
|
693 #define USE_WORD_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) <= BITS_PER_EMACS_INT)
|
|
694
|
|
695 #define GET_BIT_WORD(b, p) get_bit ((char*) &b, p)
|
|
696 #define GET_BIT_PNTR(b, p) get_bit (b, p)
|
|
697
|
|
698 #define SET_BIT_WORD(b, p, v) set_bit ((char*) &b, p, v)
|
|
699 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
|
|
700
|
|
701 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
|
|
702 #define ZERO_MARK_BITS_PNTR(ph) \
|
|
703 do { \
|
|
704 memset (PH_MARK_BITS (ph), '\0', \
|
|
705 (PH_CELLS_ON_PAGE (ph) + CHAR_BIT - 1) \
|
|
706 / CHAR_BIT * sizeof(char)); \
|
|
707 } while (0)
|
|
708
|
|
709 #define GET_BIT(bit, ph, p) \
|
|
710 do { \
|
|
711 if (USE_PNTR_MARK_BITS (ph)) \
|
|
712 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \
|
|
713 else \
|
|
714 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \
|
|
715 } while (0)
|
|
716
|
|
717 #define SET_BIT(ph, p, v) \
|
|
718 do { \
|
|
719 if (USE_PNTR_MARK_BITS (ph)) \
|
|
720 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \
|
|
721 else \
|
|
722 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \
|
|
723 } while (0)
|
|
724
|
|
725 #define ZERO_MARK_BITS(ph) \
|
|
726 do { \
|
|
727 if (USE_PNTR_MARK_BITS (ph)) \
|
|
728 ZERO_MARK_BITS_PNTR (ph); \
|
|
729 else \
|
|
730 ZERO_MARK_BITS_WORD (ph); \
|
|
731 } while (0)
|
|
732
|
|
733
|
|
734 /* Allocates mark-bit space either from a free list or from the OS
|
|
735 for the given page header. */
|
|
736 static char *
|
|
737 alloc_mark_bits (page_header *ph)
|
|
738 {
|
|
739 char *result;
|
|
740 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
|
|
741 result = (char*) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
|
|
742 else
|
|
743 {
|
|
744 result = (char*) PH_MARK_BIT_FREE_LIST (ph);
|
|
745 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
|
|
746 }
|
|
747 return result;
|
|
748 }
|
|
749
|
|
750
|
|
751 /* Frees by maintaining a free list. */
|
|
752 static void
|
|
753 free_mark_bits (page_header *ph)
|
|
754 {
|
|
755 #ifdef ALLOC_MB_UNMANAGED
|
|
756 if (PH_MARK_BITS (ph))
|
|
757 mc_free (PH_MARK_BITS (ph));
|
|
758 #else /* not ALLOC_MB_UNMANAGED */
|
|
759 if (PH_MARK_BITS (ph)) {
|
|
760 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
|
|
761 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
|
|
762 }
|
|
763 #endif /* not ALLOC_MB_UNMANAGED */
|
|
764 }
|
|
765
|
|
766
|
|
767 /* Installs mark bits and zeros bits. */
|
|
768 static void
|
|
769 install_mark_bits (page_header *ph)
|
|
770 {
|
|
771 if (USE_PNTR_MARK_BITS (ph))
|
|
772 {
|
|
773 PH_MARK_BITS (ph) = alloc_mark_bits (ph);
|
|
774 ZERO_MARK_BITS_PNTR (ph);
|
|
775 }
|
|
776 else
|
|
777 ZERO_MARK_BITS_WORD (ph);
|
|
778 }
|
|
779
|
|
780
|
|
781 /* Cleans and frees the mark bits of the given page_header. */
|
|
782 static void
|
|
783 remove_mark_bits (page_header *ph)
|
|
784 {
|
|
785 if (USE_PNTR_MARK_BITS (ph))
|
|
786 free_mark_bits (ph);
|
|
787 }
|
|
788
|
|
789
|
|
790 /* Zeros all mark bits in given header. */
|
|
791 static void
|
|
792 zero_mark_bits (page_header *ph)
|
|
793 {
|
|
794 ZERO_MARK_BITS (ph);
|
|
795 }
|
|
796
|
|
797
|
|
798 /* Returns mark bit for given heap pointer. */
|
|
799 EMACS_INT
|
|
800 get_mark_bit (void *ptr)
|
|
801 {
|
|
802 EMACS_INT bit = 0;
|
|
803 page_header *ph = get_page_header (ptr);
|
|
804 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph));
|
|
805 if (ph)
|
|
806 {
|
|
807 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
|
|
808 }
|
|
809 return bit;
|
|
810 }
|
|
811
|
|
812
|
|
813 /* Sets mark bit for given heap pointer. */
|
|
814 void
|
|
815 set_mark_bit (void *ptr, EMACS_INT value)
|
|
816 {
|
|
817 page_header *ph = get_page_header (ptr);
|
|
818 assert (ph && PH_ON_USED_LIST_P (ph));
|
|
819 if (ph)
|
|
820 {
|
|
821 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
|
|
822 }
|
|
823 }
|
|
824
|
|
825
|
|
826
|
|
827
|
|
828 /*--- page header functions --------------------------------------------*/
|
|
829
|
|
830 /* Allocates a page header either from a free list or from the OS. */
|
|
831 static page_header *
|
|
832 alloc_page_header (void)
|
|
833 {
|
|
834 page_header *result;
|
|
835 if (PAGE_HEADER_FREE_LIST == 0)
|
|
836 {
|
|
837 result =
|
|
838 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
|
|
839 #ifdef MEMORY_USAGE_STATS
|
|
840 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
|
|
841 #endif
|
|
842
|
|
843 }
|
|
844 else
|
|
845 {
|
|
846 result = (page_header*) PAGE_HEADER_FREE_LIST;
|
|
847 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
|
|
848 }
|
|
849 return result;
|
|
850 }
|
|
851
|
|
852
|
|
853 /* Frees given page header by maintaining a free list. */
|
|
854 static void
|
|
855 free_page_header (page_header *ph)
|
|
856 {
|
|
857 #if ZERO_MEM
|
|
858 ZERO_PAGE_HEADER (ph);
|
|
859 #endif
|
|
860 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
|
|
861 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
|
|
862 }
|
|
863
|
|
864
|
|
865 /* Adds given page header to given page list header's list. */
|
|
866 static void
|
|
867 add_page_header_to_plh (page_header *ph, page_list_header *plh)
|
|
868 {
|
|
869 /* insert at the front of the list */
|
|
870 PH_PREV (ph) = 0;
|
|
871 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
872 PH_PLH (ph) = plh;
|
|
873 /* if list is not empty, set prev in the first element */
|
|
874 if (PLH_FIRST (plh))
|
|
875 PH_PREV (PLH_FIRST (plh)) = ph;
|
|
876 /* one element in list is first and last at the same time */
|
|
877 PLH_FIRST (plh) = ph;
|
|
878 if (!PLH_LAST (plh))
|
|
879 PLH_LAST (plh) = ph;
|
|
880
|
|
881 #ifdef MEMORY_USAGE_STATS
|
|
882 /* bump page count */
|
|
883 PLH_PAGE_COUNT (plh)++;
|
|
884 #endif
|
|
885
|
|
886 }
|
|
887
|
|
888
|
|
889 /* Removes given page header from given page list header's list. */
|
|
890 static void
|
|
891 remove_page_header_from_plh (page_header *ph, page_list_header *plh)
|
|
892 {
|
|
893 if (PLH_FIRST (plh) == ph)
|
|
894 PLH_FIRST (plh) = PH_NEXT (ph);
|
|
895 if (PLH_LAST (plh) == ph)
|
|
896 PLH_LAST (plh) = PH_PREV (ph);
|
|
897 if (PH_NEXT (ph))
|
|
898 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
899 if (PH_PREV (ph))
|
|
900 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
901
|
|
902 #ifdef MEMORY_USAGE_STATS
|
|
903 /* decrease page count */
|
|
904 PLH_PAGE_COUNT (plh)--;
|
|
905 #endif
|
|
906 }
|
|
907
|
|
908
|
|
909 /* Moves a page header to the front of its the page header list.
|
|
910 This is used during sweep: Pages with some alive objects are moved to
|
|
911 the front. This makes allocation faster, all pages with free slots
|
|
912 can be found at the front of the list. */
|
|
913 static void
|
|
914 move_page_header_to_front (page_header *ph)
|
|
915 {
|
|
916 page_list_header *plh = PH_PLH (ph);
|
|
917 /* is page already first? */
|
|
918 if (ph == PLH_FIRST (plh)) return;
|
|
919 /* remove from list */
|
|
920 if (PLH_LAST (plh) == ph)
|
|
921 PLH_LAST (plh) = PH_PREV (ph);
|
|
922 if (PH_NEXT (ph))
|
|
923 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
|
|
924 if (PH_PREV (ph))
|
|
925 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
|
|
926 /* insert at the front */
|
|
927 PH_NEXT (ph) = PLH_FIRST (plh);
|
|
928 PH_PREV (ph) = 0;
|
|
929 PH_PREV (PH_NEXT (ph)) = ph;
|
|
930 PLH_FIRST (plh) = ph;
|
|
931 }
|
|
932
|
|
933
|
|
934
|
|
935
|
|
936 /*--- page list functions ----------------------------------------------*/
|
|
937
|
|
938 /* Returns the index of the used heap list according to given size. */
|
|
939 static int
|
|
940 get_used_list_index (size_t size)
|
|
941 {
|
|
942 if (size <= USED_LIST_MIN_OBJECT_SIZE)
|
|
943 return 0;
|
|
944 if (size <= USED_LIST_UPPER_THRESHOLD)
|
|
945 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
|
|
946 / USED_LIST_LIN_STEP) + 1;
|
|
947 return N_USED_PAGE_LISTS - 1;
|
|
948 }
|
|
949
|
|
950
|
|
951 /* Returns the size of the used heap list according to given index. */
|
|
952 static size_t
|
|
953 get_used_list_size_value (int used_index)
|
|
954 {
|
|
955 if (used_index < N_USED_PAGE_LISTS - 1)
|
|
956 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
|
|
957 return 0;
|
|
958 }
|
|
959
|
|
960
|
|
961 /* Returns the index of the used heap list according to given size. */
|
|
962 static int
|
|
963 get_unmanaged_list_index (size_t size)
|
|
964 {
|
|
965 if (size <= UNMANAGED_LIST_MIN_OBJECT_SIZE)
|
|
966 return 0;
|
|
967 if (size <= UNMANAGED_LIST_UPPER_THRESHOLD)
|
|
968 return ((size - UNMANAGED_LIST_MIN_OBJECT_SIZE - 1)
|
|
969 / UNMANAGED_LIST_LIN_STEP) + 1;
|
|
970 return N_UNMANAGED_PAGE_LISTS - 1;
|
|
971 }
|
|
972
|
|
973
|
|
974 /* Returns the size of the unmanaged heap list according to given index. */
|
|
975 static size_t
|
|
976 get_unmanaged_list_size_value (int unmanaged_index)
|
|
977 {
|
|
978 if (unmanaged_index < N_UNMANAGED_PAGE_LISTS - 1)
|
|
979 return (unmanaged_index * UNMANAGED_LIST_LIN_STEP)
|
|
980 + UNMANAGED_LIST_MIN_OBJECT_SIZE;
|
|
981 return 0;
|
|
982 }
|
|
983
|
|
984
|
|
985 /* Returns the index of the free heap list according to given size. */
|
|
986 static int
|
|
987 get_free_list_index (EMACS_INT n_pages)
|
|
988 {
|
|
989 if (n_pages == 0)
|
|
990 return 0;
|
|
991 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
|
|
992 return n_pages - 1;
|
|
993 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1)
|
|
994 return N_FREE_PAGE_LISTS - 1;
|
|
995 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1)
|
|
996 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
997
|
|
998 }
|
|
999
|
|
1000
|
|
1001 /* Returns the size in number of pages of the given free list at index. */
|
|
1002 static size_t
|
|
1003 get_free_list_size_value (int free_index)
|
|
1004 {
|
|
1005 if (free_index < FREE_LIST_LOWER_THRESHOLD)
|
|
1006 return free_index + 1;
|
|
1007 if (free_index >= N_FREE_PAGE_LISTS)
|
|
1008 return FREE_LIST_UPPER_THRESHOLD;
|
|
1009 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD)
|
|
1010 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
|
|
1011 }
|
|
1012
|
|
1013
|
|
1014 #ifdef MEMORY_USAGE_STATS
|
|
1015 Bytecount
|
|
1016 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats)
|
|
1017 {
|
|
1018 size_t used_size =
|
|
1019 get_used_list_size_value (get_used_list_index (claimed_size));
|
|
1020 if (used_size == 0)
|
|
1021 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size));
|
|
1022
|
|
1023 if (stats)
|
|
1024 {
|
|
1025 stats->was_requested += claimed_size;
|
|
1026 stats->malloc_overhead += used_size - claimed_size;
|
|
1027 }
|
|
1028
|
|
1029 return used_size;
|
|
1030 }
|
|
1031 #endif /* not MEMORY_USAGE_STATS */
|
|
1032
|
|
1033
|
|
1034
|
|
1035 /*--- free heap functions ----------------------------------------------*/
|
|
1036
|
|
1037 /* Frees a heap section, if the heap_section is completly free */
|
|
1038 static EMACS_INT
|
|
1039 free_heap_section (page_header *ph)
|
|
1040 {
|
|
1041 int i;
|
|
1042 int removed = 0;
|
|
1043 for (i = 0; i < N_HEAP_SECTIONS; i++)
|
|
1044 if (!removed)
|
|
1045 {
|
|
1046 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
|
|
1047 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
|
|
1048 {
|
|
1049 xfree_1 (HEAP_SECTION(i).real_start);
|
|
1050 #ifdef MEMORY_USAGE_STATS
|
|
1051 MC_MALLOCED_BYTES
|
|
1052 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1053 #endif
|
|
1054
|
|
1055 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE;
|
|
1056
|
|
1057 removed = 1;
|
|
1058 }
|
|
1059 }
|
|
1060 else
|
|
1061 {
|
|
1062 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start;
|
|
1063 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size;
|
|
1064 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start;
|
|
1065 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages;
|
|
1066 }
|
|
1067
|
|
1068 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed;
|
|
1069
|
|
1070 return removed;
|
|
1071 }
|
|
1072
|
|
1073 /* Removes page from free list. */
|
|
1074 static void
|
|
1075 remove_page_from_free_list (page_header *ph)
|
|
1076 {
|
|
1077 remove_page_header_from_plh (ph, PH_PLH (ph));
|
|
1078 PH_PLH (ph) = 0;
|
|
1079 }
|
|
1080
|
|
1081
|
|
1082 /* Adds page to according free list. */
|
|
1083 static void
|
|
1084 add_page_to_free_list (page_header *ph)
|
|
1085 {
|
|
1086 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph)));
|
|
1087 add_page_header_to_plh (ph, PH_PLH (ph));
|
|
1088 }
|
|
1089
|
|
1090
|
|
1091 /* Merges two adjacent pages. */
|
|
1092 static page_header *
|
|
1093 merge_pages (page_header *first_ph, page_header *second_ph)
|
|
1094 {
|
|
1095 /* merge */
|
|
1096 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph);
|
|
1097 /* clean up left over page header */
|
|
1098 free_page_header (second_ph);
|
|
1099 /* update lookup table */
|
|
1100 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph));
|
|
1101
|
|
1102 return first_ph;
|
|
1103 }
|
|
1104
|
|
1105
|
|
1106 /* Checks if pages are adjacent, merges them, and adds merged page to
|
|
1107 free list */
|
|
1108 static void
|
|
1109 merge_into_free_list (page_header *ph)
|
|
1110 {
|
|
1111 /* check if you can coalesce adjacent pages */
|
|
1112 page_header *prev_ph =
|
|
1113 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1114 - PAGE_SIZE));
|
|
1115 page_header *succ_ph =
|
|
1116 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
|
|
1117 + (PH_N_PAGES (ph) * PAGE_SIZE)));
|
|
1118 if (PH_ON_FREE_LIST_P (prev_ph))
|
|
1119 {
|
|
1120 remove_page_from_free_list (prev_ph);
|
|
1121 ph = merge_pages (prev_ph, ph);
|
|
1122 }
|
|
1123 if (PH_ON_FREE_LIST_P (succ_ph))
|
|
1124 {
|
|
1125 remove_page_from_free_list (succ_ph);
|
|
1126 ph = merge_pages (ph, succ_ph);
|
|
1127 }
|
|
1128 /* try to free heap_section, if the section is complete */
|
|
1129 if (!free_heap_section (ph))
|
|
1130 /* else add merged page to free list */
|
|
1131 add_page_to_free_list (ph);
|
|
1132 }
|
|
1133
|
|
1134
|
|
1135 /* Cuts given page header after n_pages, returns the first (cut) part, and
|
|
1136 puts the rest on the free list. */
|
|
1137 static page_header *
|
|
1138 split_page (page_header *ph, EMACS_INT n_pages)
|
|
1139 {
|
|
1140 page_header *new_ph;
|
|
1141 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages;
|
|
1142
|
|
1143 /* remove the page from the free list if already hooked in */
|
|
1144 if (PH_PLH (ph))
|
|
1145 remove_page_from_free_list (ph);
|
|
1146 /* set new number of pages */
|
|
1147 PH_N_PAGES (ph) = n_pages;
|
|
1148 /* add new page to lookup table */
|
|
1149 add_pages_to_lookup_table (ph, n_pages);
|
|
1150
|
|
1151 if (rem_pages)
|
|
1152 {
|
|
1153 /* build new page with reminder */
|
|
1154 new_ph = alloc_page_header ();
|
|
1155 PH_N_PAGES (new_ph) = rem_pages;
|
|
1156 PH_HEAP_SPACE (new_ph) =
|
|
1157 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE));
|
|
1158 /* add new page to lookup table */
|
|
1159 add_pages_to_lookup_table (new_ph, rem_pages);
|
|
1160 /* hook the rest into free list */
|
|
1161 add_page_to_free_list (new_ph);
|
|
1162 }
|
|
1163 return ph;
|
|
1164 }
|
|
1165
|
|
1166
|
|
1167 /* Expands the heap by given number of pages. */
|
|
1168 static page_header *
|
|
1169 expand_heap (EMACS_INT needed_pages)
|
|
1170 {
|
|
1171 page_header *ph;
|
|
1172 EMACS_INT n_pages;
|
|
1173 size_t real_size;
|
|
1174 void *real_start;
|
|
1175
|
|
1176 /* determine number of pages the heap should grow */
|
|
1177 n_pages = needed_pages + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR));
|
|
1178 if (n_pages < MIN_HEAP_INCREASE)
|
|
1179 n_pages = MIN_HEAP_INCREASE;
|
|
1180
|
|
1181 /* get the real values */
|
|
1182 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE;
|
|
1183 real_start = xmalloc_and_zero (real_size);
|
|
1184 #ifdef MEMORY_USAGE_STATS
|
|
1185 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0);
|
|
1186 #endif
|
|
1187
|
|
1188 /* maintain heap section count */
|
|
1189 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS)
|
|
1190 {
|
|
1191 stderr_out ("Increase number of MAX_HEAP_SECTS");
|
|
1192 ABORT ();
|
|
1193 }
|
|
1194 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start;
|
|
1195 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size;
|
|
1196 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1197 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages;
|
|
1198 N_HEAP_SECTIONS ++;
|
|
1199
|
|
1200 /* get page header */
|
|
1201 ph = alloc_page_header ();
|
|
1202
|
|
1203 /* setup page header */
|
|
1204 PH_N_PAGES (ph) = n_pages;
|
|
1205 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start);
|
|
1206 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0);
|
|
1207 HEAP_SIZE += n_pages * PAGE_SIZE;
|
|
1208
|
|
1209 /* this was also done by allocate_lisp_storage */
|
|
1210 if (need_to_check_c_alloca)
|
|
1211 xemacs_c_alloca (0);
|
|
1212
|
|
1213 /* return needed size, put rest on free list */
|
|
1214 return split_page (ph, needed_pages);
|
|
1215 }
|
|
1216
|
|
1217
|
|
1218
|
|
1219
|
|
1220 /*--- used heap functions ----------------------------------------------*/
|
|
1221 /* Installs initial free list. */
|
|
1222 static void
|
|
1223 install_cell_free_list (page_header *ph)
|
|
1224 {
|
|
1225 char *p;
|
|
1226 int i;
|
|
1227 EMACS_INT cell_size = PH_CELL_SIZE (ph);
|
|
1228 /* write initial free list if cell_size is < PAGE_SIZE */
|
|
1229 p = (char *) PH_HEAP_SPACE (ph);
|
|
1230 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
|
|
1231 {
|
|
1232 #ifdef ERROR_CHECK_GC
|
|
1233 assert (!LRECORD_FREE_P (p));
|
|
1234 MARK_LRECORD_AS_FREE (p);
|
|
1235 #endif
|
|
1236 NEXT_FREE (p) = FREE_LIST (p + cell_size);
|
|
1237 set_lookup_table (p, ph);
|
|
1238 p += cell_size;
|
|
1239 }
|
|
1240 #ifdef ERROR_CHECK_GC
|
|
1241 assert (!LRECORD_FREE_P (p));
|
|
1242 MARK_LRECORD_AS_FREE (p);
|
|
1243 #endif
|
|
1244 NEXT_FREE (p) = 0;
|
|
1245 set_lookup_table (p, ph);
|
|
1246
|
|
1247 /* hook free list into header */
|
|
1248 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph));
|
|
1249 }
|
|
1250
|
|
1251
|
|
1252 /* Cleans the object space of the given page_header. */
|
|
1253 static void
|
|
1254 remove_cell_free_list (page_header *ph)
|
|
1255 {
|
|
1256 #if ZERO_MEM
|
|
1257 ZERO_HEAP_SPACE (ph);
|
|
1258 #endif
|
|
1259 PH_FREE_LIST (ph) = 0;
|
|
1260 }
|
|
1261
|
|
1262
|
|
1263 /* Installs a new page and hooks it into given page_list_header. */
|
|
1264 static page_header *
|
|
1265 install_page_in_used_list (page_header *ph, page_list_header *plh,
|
|
1266 size_t size, int managed)
|
|
1267 {
|
|
1268 /* add to list */
|
|
1269 add_page_header_to_plh (ph, plh);
|
|
1270
|
|
1271 /* determine cell size */
|
|
1272 if (PLH_SIZE (plh))
|
|
1273 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
|
|
1274 else
|
|
1275 PH_CELL_SIZE (ph) = size;
|
|
1276 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
|
|
1277
|
|
1278 /* init cell count */
|
|
1279 PH_CELLS_USED (ph) = 0;
|
|
1280
|
|
1281 /* install mark bits and initialize cell free list */
|
|
1282 if (managed)
|
|
1283 install_mark_bits (ph);
|
|
1284
|
|
1285 install_cell_free_list (ph);
|
|
1286
|
|
1287 #ifdef MEMORY_USAGE_STATS
|
|
1288 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
|
|
1289 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
|
|
1290 #endif
|
|
1291
|
|
1292 return ph;
|
|
1293 }
|
|
1294
|
|
1295
|
|
1296 /* Cleans and frees a page, identified by the given page_header. */
|
|
1297 static void
|
|
1298 remove_page_from_used_list (page_header *ph)
|
|
1299 {
|
|
1300 page_list_header *plh = PH_PLH (ph);
|
|
1301
|
|
1302 #ifdef MEMORY_USAGE_STATS
|
|
1303 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
|
|
1304 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
|
|
1305 #endif
|
|
1306
|
|
1307 /* clean up mark bits and cell free list */
|
|
1308 remove_cell_free_list (ph);
|
|
1309 if (PH_ON_USED_LIST_P (ph))
|
|
1310 remove_mark_bits (ph);
|
|
1311
|
|
1312 /* clean up page header */
|
|
1313 PH_CELL_SIZE (ph) = 0;
|
|
1314 PH_CELLS_ON_PAGE (ph) = 0;
|
|
1315 PH_CELLS_USED (ph) = 0;
|
|
1316
|
|
1317 /* remove from used list */
|
|
1318 remove_page_header_from_plh (ph, plh);
|
|
1319
|
|
1320 /* move to free list */
|
|
1321 merge_into_free_list (ph);
|
|
1322 }
|
|
1323
|
|
1324
|
|
1325
|
|
1326
|
|
1327 /*--- allocation -------------------------------------------------------*/
|
|
1328
|
|
1329 /* Allocates from cell free list on already allocated pages. */
|
|
1330 static page_header *
|
|
1331 allocate_cell (page_list_header *plh)
|
|
1332 {
|
|
1333 page_header *ph = PLH_FIRST (plh);
|
|
1334 if (ph)
|
|
1335 {
|
|
1336 if (PH_FREE_LIST (ph))
|
|
1337 /* elements free on first page */
|
|
1338 return ph;
|
|
1339 else if ((PH_NEXT (ph))
|
|
1340 && (PH_FREE_LIST (PH_NEXT (ph))))
|
|
1341 /* elements free on second page */
|
|
1342 {
|
|
1343 page_header *temp = PH_NEXT (ph);
|
|
1344 /* move full page (first page) to end of list */
|
|
1345 PH_NEXT (PLH_LAST (plh)) = ph;
|
|
1346 PH_PREV (ph) = PLH_LAST (plh);
|
|
1347 PLH_LAST (plh) = ph;
|
|
1348 PH_NEXT (ph) = 0;
|
|
1349 /* install second page as first page */
|
|
1350 ph = temp;
|
|
1351 PH_PREV (ph) = 0;
|
|
1352 PLH_FIRST (plh) = ph;
|
|
1353 return ph;
|
|
1354 }
|
|
1355 }
|
|
1356 return 0;
|
|
1357 }
|
|
1358
|
|
1359
|
|
1360 /* Finds a page which has at least the needed number of pages.
|
|
1361 Algorithm: FIRST FIT. */
|
|
1362 static page_header *
|
|
1363 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph)
|
|
1364 {
|
|
1365 while (ph)
|
|
1366 {
|
|
1367 if (PH_N_PAGES (ph) >= needed_pages)
|
|
1368 return ph;
|
|
1369 ph = PH_NEXT (ph);
|
|
1370 }
|
|
1371 return 0;
|
|
1372 }
|
|
1373
|
|
1374
|
|
1375 /* Allocates a page from the free list. */
|
|
1376 static page_header *
|
|
1377 allocate_page_from_free_list (EMACS_INT needed_pages)
|
|
1378 {
|
|
1379 page_header *ph = 0;
|
|
1380 int i;
|
|
1381 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
|
|
1382 if ((ph = find_free_page_first_fit (needed_pages,
|
|
1383 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
|
|
1384 {
|
|
1385 if (PH_N_PAGES (ph) > needed_pages)
|
|
1386 return split_page (ph, needed_pages);
|
|
1387 else
|
|
1388 {
|
|
1389 remove_page_from_free_list (ph);
|
|
1390 return ph;
|
|
1391 }
|
|
1392 }
|
|
1393 return 0;
|
|
1394 }
|
|
1395
|
|
1396
|
|
1397 /* Allocates a new page, either from free list or by expanding the heap. */
|
|
1398 static page_header *
|
|
1399 allocate_new_page (page_list_header *plh, size_t size, int managed)
|
|
1400 {
|
|
1401 EMACS_INT needed_pages = BYTES_TO_PAGES (size);
|
|
1402 /* first check free list */
|
|
1403 page_header *result = allocate_page_from_free_list (needed_pages);
|
|
1404 if (!result)
|
|
1405 /* expand heap */
|
|
1406 result = expand_heap (needed_pages);
|
|
1407 install_page_in_used_list (result, plh, size, managed);
|
|
1408 return result;
|
|
1409 }
|
|
1410
|
|
1411
|
|
1412 /* Selects the correct size class, tries to allocate a cell of this size
|
|
1413 from the free list, if this fails, a new page is allocated. */
|
|
1414 static void *
|
|
1415 mc_alloc_1 (size_t size, int managed)
|
|
1416 {
|
|
1417 page_list_header *plh = 0;
|
2723
|
1418 page_header *ph = 0;
|
|
1419 void *result = 0;
|
|
1420
|
2720
|
1421 if (managed)
|
|
1422 plh = USED_HEAP_PAGES (get_used_list_index (size));
|
|
1423 else
|
|
1424 plh = UNMANAGED_HEAP_PAGES (get_unmanaged_list_index (size));
|
|
1425
|
|
1426 if (size == 0)
|
|
1427 return 0;
|
|
1428 if (size < PAGE_SIZE_DIV_2)
|
|
1429 /* first check any free cells */
|
|
1430 ph = allocate_cell (plh);
|
|
1431 if (!ph)
|
|
1432 /* allocate a new page */
|
|
1433 ph = allocate_new_page (plh, size, managed);
|
|
1434
|
|
1435 /* return first element of free list and remove it from the list */
|
|
1436 result = (void*) PH_FREE_LIST (ph);
|
|
1437 PH_FREE_LIST (ph) =
|
|
1438 NEXT_FREE (PH_FREE_LIST (ph));
|
|
1439
|
|
1440 memset (result, '\0', size);
|
|
1441 if (managed)
|
|
1442 MARK_LRECORD_AS_FREE (result);
|
|
1443
|
|
1444 /* bump used cells counter */
|
|
1445 PH_CELLS_USED (ph)++;
|
|
1446
|
|
1447 #ifdef MEMORY_USAGE_STATS
|
|
1448 PLH_USED_CELLS (plh)++;
|
|
1449 if (managed)
|
|
1450 PLH_USED_SPACE (plh) += size;
|
|
1451 else
|
|
1452 PLH_USED_SPACE (plh) += PLH_SIZE (plh);
|
|
1453 #endif
|
|
1454
|
|
1455 return result;
|
|
1456 }
|
|
1457
|
|
1458 void *
|
|
1459 mc_alloc (size_t size)
|
|
1460 {
|
|
1461 return mc_alloc_1 (size, 1);
|
|
1462 }
|
|
1463
|
|
1464 void *
|
|
1465 mc_alloc_unmanaged (size_t size)
|
|
1466 {
|
|
1467 return mc_alloc_1 (size, 0);
|
|
1468 }
|
|
1469
|
|
1470
|
|
1471
|
|
1472
|
|
1473 /*--- sweep & free & finalize-------------------------------------------*/
|
|
1474
|
|
1475 /* Frees a heap pointer. */
|
|
1476 static void
|
|
1477 remove_cell (void *ptr, page_header *ph)
|
|
1478 {
|
|
1479 #ifdef MEMORY_USAGE_STATS
|
|
1480 PLH_USED_CELLS (PH_PLH (ph))--;
|
|
1481 if (PH_ON_USED_LIST_P (ph))
|
|
1482 PLH_USED_SPACE (PH_PLH (ph)) -=
|
|
1483 detagged_lisp_object_size ((const struct lrecord_header *) ptr);
|
|
1484 else
|
|
1485 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph);
|
|
1486 #endif
|
2775
|
1487 if (PH_ON_USED_LIST_P (ph))
|
|
1488 {
|
2720
|
1489 #ifdef MC_ALLOC_TYPE_STATS
|
2775
|
1490 dec_lrecord_stats (PH_CELL_SIZE (ph),
|
|
1491 (const struct lrecord_header *) ptr);
|
2720
|
1492 #endif /* MC_ALLOC_TYPE_STATS */
|
2775
|
1493 #ifdef ERROR_CHECK_GC
|
|
1494 assert (!LRECORD_FREE_P (ptr));
|
|
1495 deadbeef_memory (ptr, PH_CELL_SIZE (ph));
|
|
1496 MARK_LRECORD_AS_FREE (ptr);
|
2720
|
1497 #endif
|
2775
|
1498 }
|
2720
|
1499
|
|
1500 /* hooks cell into free list */
|
|
1501 NEXT_FREE (ptr) = PH_FREE_LIST (ph);
|
|
1502 PH_FREE_LIST (ph) = FREE_LIST (ptr);
|
|
1503 /* decrease cells used */
|
|
1504 PH_CELLS_USED (ph)--;
|
|
1505 }
|
|
1506
|
|
1507
|
|
1508 /* Mark free list marks all free list entries. */
|
|
1509 static void
|
|
1510 mark_free_list (page_header *ph)
|
|
1511 {
|
|
1512 free_link *fl = PH_FREE_LIST (ph);
|
|
1513 while (fl)
|
|
1514 {
|
|
1515 SET_BIT (ph, get_mark_bit_index (fl, ph), 1);
|
|
1516 fl = NEXT_FREE (fl);
|
|
1517 }
|
|
1518 }
|
|
1519
|
|
1520 /* Finalize a page. You have to tell mc-alloc how to call your
|
|
1521 object's finalizer. Therefore, you have to define the macro
|
|
1522 MC_ALLOC_CALL_FINALIZER(ptr). This macro should do nothing else
|
|
1523 then test if there is a finalizer and call it on the given
|
|
1524 argument, which is the heap address of the object. */
|
|
1525 static void
|
|
1526 finalize_page (page_header *ph)
|
|
1527 {
|
|
1528 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1529 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1530 EMACS_INT mark_bit = 0;
|
|
1531 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1532 int bit = 0;
|
|
1533
|
|
1534 mark_free_list (ph);
|
|
1535
|
|
1536 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1537 {
|
|
1538 GET_BIT (bit, ph, mark_bit);
|
|
1539 if (!bit)
|
|
1540 {
|
|
1541 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1542 MC_ALLOC_CALL_FINALIZER ((void *) ptr);
|
|
1543 }
|
|
1544 }
|
|
1545 }
|
|
1546
|
|
1547
|
|
1548 /* Finalize a page for disksave. XEmacs calls this routine before it
|
|
1549 dumps the heap image. You have to tell mc-alloc how to call your
|
|
1550 object's finalizer for disksave. Therefore, you have to define the
|
|
1551 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should
|
|
1552 do nothing else then test if there is a finalizer and call it on
|
|
1553 the given argument, which is the heap address of the object. */
|
|
1554 static void
|
|
1555 finalize_page_for_disksave (page_header *ph)
|
|
1556 {
|
|
1557 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
|
|
1558 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1559 EMACS_INT mark_bit = 0;
|
|
1560 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1561
|
|
1562 mark_free_list (ph);
|
|
1563
|
|
1564 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1565 {
|
|
1566 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
|
|
1567 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
|
|
1568 }
|
|
1569 }
|
|
1570
|
|
1571
|
|
1572 /* Finalizes the heap. */
|
|
1573 void
|
|
1574 mc_finalize (void)
|
|
1575 {
|
|
1576 visit_all_used_page_headers (finalize_page);
|
|
1577 }
|
|
1578
|
|
1579
|
|
1580 /* Finalizes the heap for disksave. */
|
|
1581 void
|
|
1582 mc_finalize_for_disksave (void)
|
|
1583 {
|
|
1584 visit_all_used_page_headers (finalize_page_for_disksave);
|
|
1585 }
|
|
1586
|
|
1587
|
|
1588 /* Sweeps a page: all the non-marked cells are freed. If the page is empty
|
|
1589 in the end, it is removed. If some cells are free, it is moved to the
|
|
1590 front of its page header list. Full pages stay where they are. */
|
|
1591 static void
|
|
1592 sweep_page (page_header *ph)
|
|
1593 {
|
|
1594 char *heap_space = (char *) PH_HEAP_SPACE (ph);
|
|
1595 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
|
|
1596 EMACS_INT mark_bit = 0;
|
|
1597 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
|
|
1598 int bit = 0;
|
|
1599
|
|
1600 mark_free_list (ph);
|
|
1601
|
|
1602 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
|
|
1603 {
|
|
1604 GET_BIT (bit, ph, mark_bit);
|
|
1605 if (!bit)
|
|
1606 {
|
|
1607 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
|
|
1608 }
|
|
1609 }
|
|
1610 zero_mark_bits (ph);
|
|
1611 if (PH_CELLS_USED (ph) == 0)
|
|
1612 remove_page_from_used_list (ph);
|
|
1613 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1614 move_page_header_to_front (ph);
|
|
1615 }
|
|
1616
|
|
1617
|
|
1618 /* Sweeps the heap. */
|
|
1619 void
|
|
1620 mc_sweep (void)
|
|
1621 {
|
|
1622 visit_all_used_page_headers (sweep_page);
|
|
1623 }
|
|
1624
|
|
1625
|
|
1626 /* Frees the cell pointed to by ptr. */
|
|
1627 void
|
|
1628 mc_free (void *ptr)
|
|
1629 {
|
|
1630 page_header *ph = get_page_header (ptr);
|
|
1631 assert (!PH_ON_FREE_LIST_P (ph));
|
|
1632
|
|
1633 remove_cell (ptr, ph);
|
|
1634
|
|
1635 if (PH_CELLS_USED (ph) == 0)
|
|
1636 remove_page_from_used_list (ph);
|
|
1637 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
|
|
1638 move_page_header_to_front (ph);
|
|
1639 }
|
|
1640
|
|
1641
|
|
1642 /* Changes the size of the cell pointed to by ptr.
|
|
1643 Returns the new address of the new cell with new size. */
|
|
1644 void *
|
|
1645 mc_realloc_1 (void *ptr, size_t size, int managed)
|
|
1646 {
|
|
1647 if (ptr)
|
|
1648 {
|
|
1649 if (size)
|
|
1650 {
|
|
1651 void *result = mc_alloc_1 (size, managed);
|
|
1652 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
|
|
1653 size_t cpy_size = size;
|
|
1654 if (size > from_size)
|
|
1655 cpy_size = from_size;
|
|
1656 memcpy (result, ptr, cpy_size);
|
|
1657 mc_free (ptr);
|
|
1658 return result;
|
|
1659 }
|
|
1660 else
|
|
1661 {
|
|
1662 mc_free (ptr);
|
|
1663 return 0;
|
|
1664 }
|
|
1665 }
|
|
1666 else
|
|
1667 return mc_alloc_1 (size, managed);
|
|
1668 }
|
|
1669
|
|
1670 void *
|
|
1671 mc_realloc (void *ptr, size_t size)
|
|
1672 {
|
|
1673 return mc_realloc_1 (ptr, size, 1);
|
|
1674 }
|
|
1675
|
|
1676 void *
|
|
1677 mc_realloc_unmanaged (void *ptr, size_t size)
|
|
1678 {
|
|
1679 return mc_realloc_1 (ptr, size, 0);
|
|
1680 }
|
|
1681
|
|
1682
|
|
1683
|
|
1684
|
|
1685 /*--- initialization ---------------------------------------------------*/
|
|
1686
|
|
1687 /* Call once at the very beginning. */
|
|
1688 void
|
|
1689 init_mc_allocator (void)
|
|
1690 {
|
|
1691 int i;
|
|
1692
|
|
1693 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1694 {
|
|
1695 page_list_header *plh = USED_HEAP_PAGES (i);
|
|
1696 PLH_LIST_TYPE (plh) = USED_LIST;
|
|
1697 PLH_SIZE (plh) = get_used_list_size_value (i);
|
|
1698 PLH_FIRST (plh) = 0;
|
|
1699 PLH_LAST (plh) = 0;
|
|
1700 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1701 #ifdef MEMORY_USAGE_STATS
|
|
1702 PLH_PAGE_COUNT (plh) = 0;
|
|
1703 PLH_USED_CELLS (plh) = 0;
|
|
1704 PLH_USED_SPACE (plh) = 0;
|
|
1705 PLH_TOTAL_CELLS (plh) = 0;
|
|
1706 PLH_TOTAL_SPACE (plh) = 0;
|
|
1707 #endif
|
|
1708 }
|
|
1709
|
|
1710 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
|
|
1711 {
|
|
1712 page_list_header *plh = UNMANAGED_HEAP_PAGES (i);
|
|
1713 PLH_LIST_TYPE (plh) = UNMANAGED_LIST;
|
|
1714 PLH_SIZE (plh) = get_unmanaged_list_size_value (i);
|
|
1715 PLH_FIRST (plh) = 0;
|
|
1716 PLH_LAST (plh) = 0;
|
|
1717 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1718 #ifdef MEMORY_USAGE_STATS
|
|
1719 PLH_PAGE_COUNT (plh) = 0;
|
|
1720 PLH_USED_CELLS (plh) = 0;
|
|
1721 PLH_USED_SPACE (plh) = 0;
|
|
1722 PLH_TOTAL_CELLS (plh) = 0;
|
|
1723 PLH_TOTAL_SPACE (plh) = 0;
|
|
1724 #endif
|
|
1725 }
|
|
1726
|
|
1727 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1728 {
|
|
1729 page_list_header *plh = FREE_HEAP_PAGES (i);
|
|
1730 PLH_LIST_TYPE (plh) = FREE_LIST;
|
|
1731 PLH_SIZE (plh) = get_free_list_size_value (i);
|
|
1732 PLH_FIRST (plh) = 0;
|
|
1733 PLH_LAST (plh) = 0;
|
|
1734 PLH_MARK_BIT_FREE_LIST (plh) = 0;
|
|
1735 #ifdef MEMORY_USAGE_STATS
|
|
1736 PLH_PAGE_COUNT (plh) = 0;
|
|
1737 PLH_USED_CELLS (plh) = 0;
|
|
1738 PLH_USED_SPACE (plh) = 0;
|
|
1739 PLH_TOTAL_CELLS (plh) = 0;
|
|
1740 PLH_TOTAL_SPACE (plh) = 0;
|
|
1741 #endif
|
|
1742 }
|
|
1743
|
|
1744 PAGE_HEADER_FREE_LIST = 0;
|
|
1745
|
|
1746 #ifdef MEMORY_USAGE_STATS
|
|
1747 MC_MALLOCED_BYTES = sizeof (mc_allocator_globals);
|
|
1748 #endif
|
|
1749
|
|
1750 init_lookup_table ();
|
|
1751 }
|
|
1752
|
|
1753
|
|
1754
|
|
1755
|
|
1756 /*--- lisp function for statistics -------------------------------------*/
|
|
1757
|
|
1758 #ifdef MEMORY_USAGE_STATS
|
|
1759 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /*
|
|
1760 Returns stats about the mc-alloc memory usage. See diagnose.el.
|
|
1761 */
|
|
1762 ())
|
|
1763 {
|
|
1764 Lisp_Object free_plhs = Qnil;
|
|
1765 Lisp_Object used_plhs = Qnil;
|
|
1766 Lisp_Object unmanaged_plhs = Qnil;
|
|
1767 Lisp_Object heap_sects = Qnil;
|
|
1768 int used_size = 0;
|
|
1769 int real_size = 0;
|
|
1770
|
|
1771 int i;
|
|
1772
|
|
1773 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
|
|
1774 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
|
|
1775 free_plhs =
|
|
1776 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
|
|
1777 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
|
|
1778 free_plhs);
|
|
1779
|
|
1780 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
|
|
1781 if (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i)) > 0)
|
|
1782 unmanaged_plhs =
|
|
1783 acons (make_int (PLH_SIZE (UNMANAGED_HEAP_PAGES(i))),
|
|
1784 list5 (make_int (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i))),
|
|
1785 make_int (PLH_USED_CELLS (UNMANAGED_HEAP_PAGES(i))),
|
|
1786 make_int (PLH_USED_SPACE (UNMANAGED_HEAP_PAGES(i))),
|
|
1787 make_int (PLH_TOTAL_CELLS (UNMANAGED_HEAP_PAGES(i))),
|
|
1788 make_int (PLH_TOTAL_SPACE (UNMANAGED_HEAP_PAGES(i)))),
|
|
1789 unmanaged_plhs);
|
|
1790
|
|
1791 for (i = 0; i < N_USED_PAGE_LISTS; i++)
|
|
1792 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
|
|
1793 used_plhs =
|
|
1794 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
|
|
1795 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))),
|
|
1796 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))),
|
|
1797 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))),
|
|
1798 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))),
|
|
1799 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))),
|
|
1800 used_plhs);
|
|
1801
|
|
1802 for (i = 0; i < N_HEAP_SECTIONS; i++) {
|
|
1803 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE;
|
|
1804 real_size +=
|
|
1805 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
|
|
1806 }
|
|
1807
|
|
1808 heap_sects =
|
|
1809 list3 (make_int (N_HEAP_SECTIONS),
|
|
1810 make_int (used_size),
|
|
1811 make_int (real_size));
|
|
1812
|
|
1813 return Fcons (make_int (PAGE_SIZE),
|
|
1814 list6 (heap_sects,
|
|
1815 Fnreverse (used_plhs),
|
|
1816 Fnreverse (unmanaged_plhs),
|
|
1817 Fnreverse (free_plhs),
|
|
1818 make_int (sizeof (mc_allocator_globals)),
|
|
1819 make_int (MC_MALLOCED_BYTES)));
|
|
1820 }
|
|
1821 #endif /* MEMORY_USAGE_STATS */
|
|
1822
|
|
1823 void
|
|
1824 syms_of_mc_alloc (void)
|
|
1825 {
|
|
1826 #ifdef MEMORY_USAGE_STATS
|
|
1827 DEFSUBR (Fmc_alloc_memory_usage);
|
|
1828 #endif /* MEMORY_USAGE_STATS */
|
|
1829 }
|