comparison src/mc-alloc.c @ 3092:141c2920ea48

[xemacs-hg @ 2005-11-25 01:41:31 by crestani] Incremental Garbage Collector
author crestani
date Fri, 25 Nov 2005 01:42:08 +0000
parents ec5f23ea6d2e
children 26a547441418
comparison
equal deleted inserted replaced
3091:c22d8984148c 3092:141c2920ea48
19 Boston, MA 02111-1307, USA. */ 19 Boston, MA 02111-1307, USA. */
20 20
21 /* Synched up with: Not in FSF. */ 21 /* Synched up with: Not in FSF. */
22 22
23 #include <config.h> 23 #include <config.h>
24
24 #include "lisp.h" 25 #include "lisp.h"
25 #include "mc-alloc.h" 26 #include "mc-alloc.h"
27 #include "getpagesize.h"
28
29
30 #if 0
31 # define USE_MARK_BITS_FREE_LIST 1
32 #endif
33 #if 1
34 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1
35 #endif
36
37 /* Memory protection needs the real system-dependent pagesize. */
38 #ifndef WIN32_NATIVE
39 #include <unistd.h> /* for getpagesize () */
40 #endif
41 #if defined (HAVE_GETPAGESIZE)
42 # define SYS_PAGE_SIZE getpagesize ()
43 #elif defined (_SC_PAGESIZE)
44 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE)
45 #elif defined (_SC_PAGE_SIZE)
46 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE)
47 #elif defined(get_page_size)
48 # define SYS_PAGE_SIZE get_page_size ()
49 #elif defined(PAGESIZE)
50 # define SYS_PAGE_SIZE PAGESIZE
51 #elif defined(PAGE_SIZE)
52 # define SYS_PAGE_SIZE PAGE_SIZE
53 #else
54 /* Valid page sizes are powers of 2. */
55 # define SYS_PAGE_SIZE 4096
56 #endif
26 57
27 58
28 /*--- configurable values ----------------------------------------------*/ 59 /*--- configurable values ----------------------------------------------*/
29
30 /* Valid page sizes are powers of 2. */
31 #undef PAGE_SIZE /* for FreeBSD */
32 #define PAGE_SIZE 2048
33
34 60
35 /* Definition of size classes */ 61 /* Definition of size classes */
36 62
37 /* Heap used list constants: In the used heap, it is important to 63 /* Heap used list constants: In the used heap, it is important to
38 quickly find a free spot for a new object. Therefore the size 64 quickly find a free spot for a new object. Therefore the size
39 classes of the used heap are defined by the size of the cells on 65 classes of the used heap are defined by the size of the cells on
40 the pages. The size classes should match common object sizes, to 66 the pages. The size classes should match common object sizes, to
41 avoid wasting memory. */ 67 avoid wasting memory. */
42 68
43 /* Minimum object size in bytes. */ 69 /* Minimum object size in bytes. */
44 #define USED_LIST_MIN_OBJECT_SIZE 8 70 #if BITS_PER_EMACS_INT > 32
71 # define USED_LIST_MIN_OBJECT_SIZE 16
72 #else
73 # define USED_LIST_MIN_OBJECT_SIZE 8
74 #endif
45 75
46 /* The step size by which the size classes increase (up to upper 76 /* The step size by which the size classes increase (up to upper
47 threshold). This many bytes are mapped to a single used list: */ 77 threshold). This many bytes are mapped to a single used list: */
48 #define USED_LIST_LIN_STEP 4 78 #if BITS_PER_EMACS_INT > 32
79 # define USED_LIST_LIN_STEP 8
80 #else
81 # define USED_LIST_LIN_STEP 4
82 #endif
49 83
50 /* The upper threshold should always be set to PAGE_SIZE/2, because if 84 /* The upper threshold should always be set to PAGE_SIZE/2, because if
51 a object is larger than PAGE_SIZE/2 there is no room for any other 85 a object is larger than PAGE_SIZE/2 there is no room for any other
52 object on this page. Objects this big are kept in the page list of 86 object on this page. Objects this big are kept in the page list of
53 the multiple pages, since a quick search for free spots is not 87 the multiple pages, since a quick search for free spots is not
54 needed for this kind of pages (because there are no free spots). 88 needed for this kind of pages (because there are no free spots).
55 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ 89 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
56 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 90 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
57
58
59 /* Unmanaged memory used list constants: Like in the used heap, it is
60 important to quickly find a free spot for a new object. Therefore
61 the size classes of the unmanaged heap are defined by the size of
62 the cells on the pages. The size classes should match common object
63 sizes, to avoid wasting memory. */
64 /* Minimum object size in bytes. */
65 #define UNMANAGED_LIST_MIN_OBJECT_SIZE 8
66 /* The step size by which the size classes increase (up to upper
67 threshold). This many bytes are mapped to a single unmanaged list: */
68 #define UNMANAGED_LIST_LIN_STEP 4
69 /* The upper threshold should always be set to PAGE_SIZE/2, because if
70 a object is larger than PAGE_SIZE/2 there is no room for any other
71 object on this page. Objects this big are kept in the page list of
72 the multiple pages, since a quick search for free spots is not
73 needed for this kind of pages (because there are no free spots).
74 PAGE_SIZES defines maximum size of a unmanaged space list. */
75 #define UNMANAGED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
76 91
77 92
78 /* Heap free list constants: In the unused heap, the size of 93 /* Heap free list constants: In the unused heap, the size of
79 consecutive memory tips the scales. A page is smallest entity which 94 consecutive memory tips the scales. A page is smallest entity which
80 is asked for. Therefore, the size classes of the unused heap are 95 is asked for. Therefore, the size classes of the unused heap are
91 single list, which makes searching this list slow. But objects that 106 single list, which makes searching this list slow. But objects that
92 big are really seldom. */ 107 big are really seldom. */
93 #define FREE_LIST_UPPER_THRESHOLD 256 108 #define FREE_LIST_UPPER_THRESHOLD 256
94 109
95 110
111 /* used heap list count */
112 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
113 - USED_LIST_MIN_OBJECT_SIZE) \
114 / USED_LIST_LIN_STEP) + 1 ) + 1
115
116 /* free heap list count */
117 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
118 - FREE_LIST_LOWER_THRESHOLD) \
119 / FREE_LIST_LIN_STEP) \
120 + FREE_LIST_LOWER_THRESHOLD)
121
122
96 /* Maximum number of separately added heap sections. */ 123 /* Maximum number of separately added heap sections. */
97 #if BITS_PER_EMACS_INT > 32 124 #if BITS_PER_EMACS_INT > 32
98 # define MAX_HEAP_SECTS 2048 125 # define MAX_HEAP_SECTS 2048
99 #else 126 #else
100 # define MAX_HEAP_SECTS 768 127 # define MAX_HEAP_SECTS 768
101 #endif 128 #endif
102 129
103 130
104 /* Heap growth constants. Heap increases by any number between the 131 /* Heap growth constants. Heap increases by any number between the
105 boundaries (unit is PAGE_SIZE). */ 132 boundaries (unit is PAGE_SIZE). */
106 #define MIN_HEAP_INCREASE 32 133 #define MIN_HEAP_INCREASE 256
107 #define MAX_HEAP_INCREASE 256 /* not used */ 134 #define MAX_HEAP_INCREASE 256 /* not used */
108 135
109 /* Every heap growth is calculated like this: 136 /* Every heap growth is calculated like this:
110 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). 137 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
111 So the growth of the heap is influenced by the current size of the 138 So the growth of the heap is influenced by the current size of the
118 145
119 /* Zero memory before putting on free lists. */ 146 /* Zero memory before putting on free lists. */
120 #define ZERO_MEM 1 147 #define ZERO_MEM 1
121 148
122 149
123
124
125 /*--- calculations done by macros --------------------------------------*/
126
127 #ifndef CHAR_BIT /* should be included by limits.h */ 150 #ifndef CHAR_BIT /* should be included by limits.h */
128 # define CHAR_BIT BITS_PER_CHAR 151 # define CHAR_BIT BITS_PER_CHAR
129 #endif 152 #endif
130 153
131 #if PAGE_SIZE == 512 154
132 # define CPP_LOG_PAGE_SIZE 9 155
133 #endif 156 /*--- values depending on PAGE_SIZE ------------------------------------*/
134 #if PAGE_SIZE == 1024 157
135 # define CPP_LOG_PAGE_SIZE 10 158 /* initialized in init_mc_allocator () */
136 #endif 159 static EMACS_INT log_page_size;
137 #if PAGE_SIZE == 2048 160 static EMACS_INT page_size_div_2;
138 # define CPP_LOG_PAGE_SIZE 11 161
139 #endif
140 #if PAGE_SIZE == 4096
141 # define CPP_LOG_PAGE_SIZE 12
142 #endif
143 #if PAGE_SIZE == 8192
144 # define CPP_LOG_PAGE_SIZE 13
145 #endif
146 #if PAGE_SIZE == 16384
147 # define CPP_LOG_PAGE_SIZE 14
148 #endif
149 #ifndef CPP_LOG_PAGE_SIZE
150 --> fix PAGE_SIZE
151 #endif
152 #undef PAGE_SIZE 162 #undef PAGE_SIZE
153 #define CPP_PAGE_SIZE (1 << CPP_LOG_PAGE_SIZE) 163 #define PAGE_SIZE SYS_PAGE_SIZE
154 #define LOG_PAGE_SIZE ((EMACS_INT) CPP_LOG_PAGE_SIZE) 164 #define LOG_PAGE_SIZE log_page_size
155 #define PAGE_SIZE ((EMACS_INT) CPP_PAGE_SIZE) 165 #define PAGE_SIZE_DIV_2 page_size_div_2
156 #define PAGE_SIZE_DIV_2 (PAGE_SIZE >> 1)
157
158
159 /* NOT USED ANYMORE */
160 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
161 /* used heap list logarithms */
162 #if USED_LIST_LOWER_THRESHOLD == 8
163 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 3
164 #endif
165 #if USED_LIST_LOWER_THRESHOLD == 16
166 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 4
167 #endif
168 #if USED_LIST_LOWER_THRESHOLD == 32
169 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 5
170 #endif
171 #if USED_LIST_LOWER_THRESHOLD == 64
172 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 6
173 #endif
174 #if USED_LIST_LOWER_THRESHOLD == 128
175 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 7
176 #endif
177 #if USED_LIST_LOWER_THRESHOLD == 256
178 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 8
179 #endif
180 #ifndef CPP_LOG_USED_LIST_LOWER_THRESHOLD
181 --> fix USED_LIST_LOWER_THRESHOLD
182 #endif
183 #define LOG_USED_LIST_LOWER_THRESHOLD CPP_LOG_USED_LIST_LOWER_THRESHOLD
184 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
185
186 /* used heap list count */
187 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
188 - USED_LIST_MIN_OBJECT_SIZE) \
189 / USED_LIST_LIN_STEP) + 1 ) + 1
190
191 /* unmanaged memory list count */
192 #define N_UNMANAGED_PAGE_LISTS (((UNMANAGED_LIST_UPPER_THRESHOLD \
193 - UNMANAGED_LIST_MIN_OBJECT_SIZE) \
194 / UNMANAGED_LIST_LIN_STEP) + 1 ) + 1
195
196 /* NOT USED ANYMORE */
197 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
198 #define N_USED_PAGE_LISTS_LIN (((USED_LIST_LOWER_THRESHOLD \
199 - USED_LIST_MIN_OBJECT_SIZE) \
200 / USED_LIST_LIN_STEP) + 1 )
201 #define N_USED_PAGE_LISTS_EXP \
202 (LOG_PAGE_SIZE - LOG_USED_LIST_LOWER_THRESHOLD)
203
204 #define N_USED_PAGE_LISTS \
205 (N_USED_PAGE_LISTS_LIN + N_USED_PAGE_LISTS_EXP + 1)
206 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
207
208 /* free heap list count */
209 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
210 - FREE_LIST_LOWER_THRESHOLD) \
211 / FREE_LIST_LIN_STEP) \
212 + FREE_LIST_LOWER_THRESHOLD)
213 166
214 167
215 /* Constants for heap address to page header mapping. */ 168 /* Constants for heap address to page header mapping. */
216 #define LOG_LEVEL2_SIZE 10 169 #define LOG_LEVEL2_SIZE 10
217 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) 170 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
235 188
236 189
237 190
238 /*--- structs and typedefs ---------------------------------------------*/ 191 /*--- structs and typedefs ---------------------------------------------*/
239 192
240 /* Links the free lists (mark_bit_free_list, page_header_free_list, 193 /* Links the free lists (mark_bit_free_list and cell free list). */
241 cell free list). */
242 typedef struct free_link 194 typedef struct free_link
243 { 195 {
244 struct lrecord_header lheader; 196 struct lrecord_header lheader;
245 struct free_link *next_free; 197 struct free_link *next_free;
246 } free_link; 198 } free_link;
247 199
248 200
249 /* Header for pages. They are hold in a doubly linked list. */ 201 /* Header for pages. They are held in a doubly linked list. */
250 typedef struct page_header 202 typedef struct page_header
251 { 203 {
252 struct page_header *next; /* next page_header */ 204 struct page_header *next; /* next page_header */
253 struct page_header *prev; /* previous page_header */ 205 struct page_header *prev; /* previous page_header */
254 /* Field plh holds pointer to the according header of the page list.*/ 206 /* Field plh holds pointer to the according header of the page list.*/
261 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, 213 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
262 the mark bits are put in an extra memory area. Then the field 214 the mark bits are put in an extra memory area. Then the field
263 mark_bits holds the pointer to this area. Is the number of 215 mark_bits holds the pointer to this area. Is the number of
264 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the 216 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
265 mark_bit EMACS_INT directly, without an additional indirection. */ 217 mark_bit EMACS_INT directly, without an additional indirection. */
266 char *mark_bits; /* pointer to mark bits */ 218 unsigned int black_bit:1; /* objects on page are black */
219 unsigned int dirty_bit:1; /* page is dirty */
220 unsigned int protection_bit:1; /* page is write protected */
221 unsigned int array_bit:1; /* page holds arrays */
222 Rawbyte *mark_bits; /* pointer to mark bits */
267 void *heap_space; /* pointer to heap, where objects 223 void *heap_space; /* pointer to heap, where objects
268 are stored */ 224 are stored */
269 } page_header; 225 } page_header;
270 226
271 227
272 /* Different list types. */ 228 /* Different list types. */
273 enum list_type_enum { 229 enum list_type_enum {
274 USED_LIST, 230 USED_LIST,
275 UNMANAGED_LIST,
276 FREE_LIST 231 FREE_LIST
277 }; 232 };
278 233
279 234
280 /* Header for page lists. Field list_type holds the type of the list. */ 235 /* Header for page lists. Field list_type holds the type of the list. */
337 heap_sect heap_sections[MAX_HEAP_SECTS]; 292 heap_sect heap_sections[MAX_HEAP_SECTS];
338 EMACS_INT n_heap_sections; 293 EMACS_INT n_heap_sections;
339 294
340 /* Holds all allocated pages, each object size class in its separate list, 295 /* Holds all allocated pages, each object size class in its separate list,
341 to guarantee fast allocation on partially filled pages. */ 296 to guarantee fast allocation on partially filled pages. */
342 page_list_header used_heap_pages[N_USED_PAGE_LISTS]; 297 page_list_header *used_heap_pages;
343
344 /* Holds all unmanaged pages. */
345 page_list_header unmanaged_heap_pages[N_UNMANAGED_PAGE_LISTS];
346 298
347 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are 299 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
348 kept on the Nth free list. Contiguos pages are coalesced. */ 300 kept on the Nth free list. Contiguos pages are coalesced. */
349 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; 301 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
350 302
351 /* ptr lookup table */ 303 /* ptr lookup table */
352 level_2_lookup_tree *ptr_lookup_table[LEVEL1_SIZE]; 304 level_2_lookup_tree **ptr_lookup_table;
353 305
306 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
354 /* page header free list */ 307 /* page header free list */
355 free_link *page_header_free_list; 308 free_link *page_header_free_list;
309 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
356 310
357 #ifdef MEMORY_USAGE_STATS 311 #ifdef MEMORY_USAGE_STATS
358 EMACS_INT malloced_bytes; 312 EMACS_INT malloced_bytes;
359 #endif 313 #endif
360 } mc_allocator_globals_type; 314 } mc_allocator_globals_type;
366 320
367 /*--- macro accessors --------------------------------------------------*/ 321 /*--- macro accessors --------------------------------------------------*/
368 322
369 #define USED_HEAP_PAGES(i) \ 323 #define USED_HEAP_PAGES(i) \
370 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) 324 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
371
372 #define UNMANAGED_HEAP_PAGES(i) \
373 ((page_list_header*) &mc_allocator_globals.unmanaged_heap_pages[i])
374 325
375 #define FREE_HEAP_PAGES(i) \ 326 #define FREE_HEAP_PAGES(i) \
376 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) 327 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
377 328
378 #define PLH(plh) plh 329 #define PLH(plh) plh
396 # define PH_FREE_LIST(ph) PH (ph)->free_list 347 # define PH_FREE_LIST(ph) PH (ph)->free_list
397 # define PH_N_PAGES(ph) PH (ph)->n_pages 348 # define PH_N_PAGES(ph) PH (ph)->n_pages
398 # define PH_CELL_SIZE(ph) PH (ph)->cell_size 349 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
399 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page 350 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
400 # define PH_CELLS_USED(ph) PH (ph)->cells_used 351 # define PH_CELLS_USED(ph) PH (ph)->cells_used
352 # define PH_BLACK_BIT(ph) PH (ph)->black_bit
353 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit
354 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit
355 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit
401 # define PH_MARK_BITS(ph) PH (ph)->mark_bits 356 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
402 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space 357 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
403 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) 358 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
404 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) 359 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
405 360
410 #endif 365 #endif
411 366
412 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] 367 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
413 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections 368 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
414 369
370 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
415 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list 371 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
372 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
416 373
417 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free 374 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
418 #define FREE_LIST(free_list) (free_link*) (free_list) 375 #define FREE_LIST(free_list) (free_link*) (free_list)
419 376
420 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] 377 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
442 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) 399 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
443 400
444 #define PH_ON_USED_LIST_P(ph) \ 401 #define PH_ON_USED_LIST_P(ph) \
445 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) 402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
446 403
447 #define PH_ON_UNMANAGED_LIST_P(ph) \ 404
448 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == UNMANAGED_LIST)) 405 /* Number of mark bits: minimum 1, maximum 8. */
449 406 #ifdef NEW_GC
407 #define N_MARK_BITS 2
408 #else /* not NEW_GC */
409 #define N_MARK_BITS 1
410 #endif /* not NEW_GC */
450 411
451 412
452 413
453 /************************************************************************/ 414 /************************************************************************/
454 /* MC Allocator */ 415 /* MC Allocator */
455 /************************************************************************/ 416 /************************************************************************/
456 417
457 418
458 /* ###TODO### */
459 #if 1
460 # define ALLOC_MB_UNMANAGED 1
461 #endif
462
463
464 /*--- misc functions ---------------------------------------------------*/ 419 /*--- misc functions ---------------------------------------------------*/
465 420
466 /* moved here from alloc.c */ 421 /* moved here from alloc.c */
467 #ifdef ERROR_CHECK_GC 422 #ifdef ERROR_CHECK_GC
468 static void 423 static void
481 list and executes f with the current page header as 436 list and executes f with the current page header as
482 argument. Needed for sweep. */ 437 argument. Needed for sweep. */
483 static void 438 static void
484 visit_all_used_page_headers (void (*f) (page_header *ph)) 439 visit_all_used_page_headers (void (*f) (page_header *ph))
485 { 440 {
486 int i; 441 EMACS_INT i;
487 for (i = 0; i < N_USED_PAGE_LISTS; i++) 442 for (i = 0; i < N_USED_PAGE_LISTS; i++)
488 if (PLH_FIRST (USED_HEAP_PAGES (i))) 443 if (PLH_FIRST (USED_HEAP_PAGES (i)))
489 { 444 {
490 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); 445 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
491 while (PH_NEXT (ph)) 446 while (PH_NEXT (ph))
505 460
506 /* Sets a heap pointer and page header pair into the lookup table. */ 461 /* Sets a heap pointer and page header pair into the lookup table. */
507 static void 462 static void
508 set_lookup_table (void *ptr, page_header *ph) 463 set_lookup_table (void *ptr, page_header *ph)
509 { 464 {
510 int l1_index = L1_INDEX (ptr); 465 EMACS_INT l1_index = L1_INDEX (ptr);
511 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); 466 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
512 #ifdef USE_HASH_TABLE 467 #ifdef USE_HASH_TABLE
513 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) 468 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
514 l2 = LEVEL2_HASH_LINK (l2); 469 l2 = LEVEL2_HASH_LINK (l2);
515 #endif 470 #endif
535 #ifdef UNSET_LOOKUP_TABLE 490 #ifdef UNSET_LOOKUP_TABLE
536 /* Set the lookup table to 0 for given heap address. */ 491 /* Set the lookup table to 0 for given heap address. */
537 static void 492 static void
538 unset_lookup_table (void *ptr) 493 unset_lookup_table (void *ptr)
539 { 494 {
540 int l1_index = L1_INDEX (ptr); 495 EMACS_INT l1_index = L1_INDEX (ptr);
541 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); 496 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
542 #ifdef USE_HASH_TABLE 497 #ifdef USE_HASH_TABLE
543 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) 498 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
544 l2 = LEVEL2_HASH_LINK (l2); 499 l2 = LEVEL2_HASH_LINK (l2);
545 #endif 500 #endif
552 /* Returns the page header of a given heap address, or 0 if not in table. 507 /* Returns the page header of a given heap address, or 0 if not in table.
553 For internal use, no error checking. */ 508 For internal use, no error checking. */
554 static page_header * 509 static page_header *
555 get_page_header_internal (void *ptr) 510 get_page_header_internal (void *ptr)
556 { 511 {
557 int l1_index = L1_INDEX (ptr); 512 EMACS_INT l1_index = L1_INDEX (ptr);
558 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); 513 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
559 #ifdef USE_HASH_TABLE 514 #ifdef USE_HASH_TABLE
560 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) 515 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
561 l2 = LEVEL2_HASH_LINK (l2); 516 l2 = LEVEL2_HASH_LINK (l2);
562 #endif 517 #endif
567 522
568 /* Returns the page header of a given heap address, or 0 if not in table. */ 523 /* Returns the page header of a given heap address, or 0 if not in table. */
569 static page_header * 524 static page_header *
570 get_page_header (void *ptr) 525 get_page_header (void *ptr)
571 { 526 {
572 int l1_index = L1_INDEX (ptr); 527 EMACS_INT l1_index = L1_INDEX (ptr);
573 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); 528 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
574 assert (l2); 529 assert (l2);
575 #ifdef USE_HASH_TABLE 530 #ifdef USE_HASH_TABLE
576 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) 531 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
577 l2 = LEVEL2_HASH_LINK (l2); 532 l2 = LEVEL2_HASH_LINK (l2);
578 #endif 533 #endif
579 assert (LEVEL2 (l2, L2_INDEX (ptr))); 534 assert (LEVEL2 (l2, L2_INDEX (ptr)));
580 return LEVEL2 (l2, L2_INDEX (ptr)); 535 return LEVEL2 (l2, L2_INDEX (ptr));
581 } 536 }
582 537
583
584 /* Returns the mark bit index of a given heap address. */ 538 /* Returns the mark bit index of a given heap address. */
585 static EMACS_INT 539 static EMACS_INT
586 get_mark_bit_index (void *ptr, page_header *ph) 540 get_mark_bit_index (void *ptr, page_header *ph)
587 { 541 {
588 EMACS_INT cell_size = PH_CELL_SIZE (ph); 542 EMACS_INT cell_size = PH_CELL_SIZE (ph);
589 if (cell_size) 543 if (cell_size)
590 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size); 544 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size)
545 * N_MARK_BITS;
591 else /* only one object on page */ 546 else /* only one object on page */
592 return 0; 547 return 0;
593 } 548 }
594 549
595 550
596 /* Adds addresses of pages to lookup table. */ 551 /* Adds addresses of pages to lookup table. */
597 static void 552 static void
598 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) 553 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
599 { 554 {
600 char *p = (char*) PH_HEAP_SPACE (ph); 555 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph);
601 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); 556 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
602 for (p = (char*) PH_HEAP_SPACE (ph); 557 for (p = (Rawbyte *) PH_HEAP_SPACE (ph);
603 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) 558 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
604 set_lookup_table (p, ph); 559 set_lookup_table (p, ph);
605 } 560 }
606 561
607 562
608 /* Initializes lookup table. */ 563 /* Initializes lookup table. */
609 static void 564 static void
610 init_lookup_table (void) 565 init_lookup_table (void)
611 { 566 {
612 int i; 567 EMACS_INT i;
613 for (i = 0; i < LEVEL1_SIZE; i++) 568 for (i = 0; i < LEVEL1_SIZE; i++)
614 PTR_LOOKUP_TABLE (i) = 0; 569 PTR_LOOKUP_TABLE (i) = 0;
615 } 570 }
616 571
617 572
618 573
619 574
620 /*--- mark bits --------------------------------------------------------*/ 575 /*--- mark bits --------------------------------------------------------*/
621 576
622 /* Number of mark bits: minimum 1, maximum 8. */
623 #define N_MARK_BITS 1
624
625 /*--- bit operations --- */ 577 /*--- bit operations --- */
626 578
627 /* Allocates a bit array of length bits. */ 579 /* Allocates a bit array of length bits. */
628 static char * 580 static Rawbyte *
629 alloc_bit_array(size_t bits) 581 alloc_bit_array(size_t bits)
630 { 582 {
631 #ifdef ALLOC_MB_UNMANAGED 583 Rawbyte *bit_array;
632 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char); 584 #ifdef USE_MARK_BITS_FREE_LIST
585 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte);
586 #else /* not USE_MARK_BITS_FREE_LIST */
587 size_t size =
588 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte),
589 Rawbyte *);
590 #endif /* not USE_MARK_BITS_FREE_LIST */
633 if (size < sizeof (free_link)) size = sizeof (free_link); 591 if (size < sizeof (free_link)) size = sizeof (free_link);
634 return (char *) mc_alloc_unmanaged (size);
635 #else /* not ALLOC_MB_UNMANAGED */
636 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char);
637 char *bit_array;
638 if (size < sizeof (free_link)) size = sizeof (free_link);
639 bit_array = (char*) xmalloc_and_zero (size);
640 #ifdef MEMORY_USAGE_STATS 592 #ifdef MEMORY_USAGE_STATS
641 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); 593 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
642 #endif 594 #endif
595 bit_array = (Rawbyte *) xmalloc_and_zero (size);
643 return bit_array; 596 return bit_array;
644 #endif /* not ALLOC_MB_UNMANAGED */
645 } 597 }
646 598
647 599
648 /* Returns the bit value at pos. */ 600 /* Returns the bit value at pos. */
649 static EMACS_INT 601 static EMACS_INT
650 get_bit (char *bit_array, EMACS_INT pos) 602 get_bit (Rawbyte *bit_array, EMACS_INT pos)
651 { 603 {
652 #if N_MARK_BITS > 1 604 #if N_MARK_BITS > 1
653 EMACS_INT result = 0; 605 EMACS_INT result = 0;
654 EMACS_INT i; 606 EMACS_INT i;
655 #endif 607 #endif
656 bit_array += pos / CHAR_BIT; 608 bit_array += pos / CHAR_BIT;
657 #if N_MARK_BITS > 1 609 #if N_MARK_BITS > 1
658 for (i = 0; i < N_MARK_BITS; i++) 610 for (i = 0; i < N_MARK_BITS; i++)
659 result |= (*bit_array & (1 << ((pos + i) % CHAR_BIT))); 611 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i;
660 return result >> pos; 612 return result;
661 #else 613 #else
662 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; 614 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
663 #endif 615 #endif
664 } 616 }
665 617
666 618
667 /* Bit_Arrays bit at pos to val. */ 619 /* Bit_Arrays bit at pos to val. */
668 static void 620 static void
669 set_bit(char *bit_array, EMACS_INT pos, EMACS_INT val) 621 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_INT val)
670 { 622 {
671 #if N_MARK_BITS > 1 623 #if N_MARK_BITS > 1
672 EMACS_INT result = 0;
673 EMACS_INT i; 624 EMACS_INT i;
674 #endif 625 #endif
675 bit_array += pos / CHAR_BIT; 626 bit_array += pos / CHAR_BIT;
676 #if N_MARK_BITS > 1 627 #if N_MARK_BITS > 1
677 for (i = 0; i < N_MARK_BITS; i++) 628 for (i = 0; i < N_MARK_BITS; i++)
687 #endif 638 #endif
688 } 639 }
689 640
690 641
691 /*--- mark bit functions ---*/ 642 /*--- mark bit functions ---*/
692 #define USE_PNTR_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) > BITS_PER_EMACS_INT) 643 #define USE_PNTR_MARK_BITS(ph) \
693 #define USE_WORD_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) <= BITS_PER_EMACS_INT) 644 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT)
694 645 #define USE_WORD_MARK_BITS(ph) \
695 #define GET_BIT_WORD(b, p) get_bit ((char*) &b, p) 646 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT)
647
648 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p)
696 #define GET_BIT_PNTR(b, p) get_bit (b, p) 649 #define GET_BIT_PNTR(b, p) get_bit (b, p)
697 650
698 #define SET_BIT_WORD(b, p, v) set_bit ((char*) &b, p, v) 651 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v)
699 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) 652 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
700 653
701 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 654 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
702 #define ZERO_MARK_BITS_PNTR(ph) \ 655 #define ZERO_MARK_BITS_PNTR(ph) \
703 do { \ 656 do { \
704 memset (PH_MARK_BITS (ph), '\0', \ 657 memset (PH_MARK_BITS (ph), '\0', \
705 (PH_CELLS_ON_PAGE (ph) + CHAR_BIT - 1) \ 658 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \
706 / CHAR_BIT * sizeof(char)); \ 659 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \
707 } while (0) 660 } while (0)
708 661
709 #define GET_BIT(bit, ph, p) \ 662 #define GET_BIT(bit, ph, p) \
710 do { \ 663 do { \
711 if (USE_PNTR_MARK_BITS (ph)) \ 664 if (USE_PNTR_MARK_BITS (ph)) \
731 } while (0) 684 } while (0)
732 685
733 686
734 /* Allocates mark-bit space either from a free list or from the OS 687 /* Allocates mark-bit space either from a free list or from the OS
735 for the given page header. */ 688 for the given page header. */
736 static char * 689 static Rawbyte *
737 alloc_mark_bits (page_header *ph) 690 alloc_mark_bits (page_header *ph)
738 { 691 {
739 char *result; 692 Rawbyte *result;
693 #ifdef USE_MARK_BITS_FREE_LIST
740 if (PH_MARK_BIT_FREE_LIST (ph) == 0) 694 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
741 result = (char*) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); 695 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
742 else 696 else
743 { 697 {
744 result = (char*) PH_MARK_BIT_FREE_LIST (ph); 698 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph);
745 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); 699 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
746 } 700 }
701 #else /* not USE_MARK_BITS_FREE_LIST */
702 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
703 #endif /* not USE_MARK_BITS_FREE_LIST */
747 return result; 704 return result;
748 } 705 }
749 706
750 707
751 /* Frees by maintaining a free list. */ 708 /* Frees by maintaining a free list. */
752 static void 709 static void
753 free_mark_bits (page_header *ph) 710 free_mark_bits (page_header *ph)
754 { 711 {
755 #ifdef ALLOC_MB_UNMANAGED 712 #ifdef USE_MARK_BITS_FREE_LIST
713 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
714 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
715 #else /* not USE_MARK_BITS_FREE_LIST */
756 if (PH_MARK_BITS (ph)) 716 if (PH_MARK_BITS (ph))
757 mc_free (PH_MARK_BITS (ph)); 717 free (PH_MARK_BITS (ph));
758 #else /* not ALLOC_MB_UNMANAGED */ 718 #endif /* not USE_MARK_BITS_FREE_LIST */
759 if (PH_MARK_BITS (ph)) {
760 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
761 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
762 }
763 #endif /* not ALLOC_MB_UNMANAGED */
764 } 719 }
765 720
766 721
767 /* Installs mark bits and zeros bits. */ 722 /* Installs mark bits and zeros bits. */
768 static void 723 static void
816 { 771 {
817 page_header *ph = get_page_header (ptr); 772 page_header *ph = get_page_header (ptr);
818 assert (ph && PH_ON_USED_LIST_P (ph)); 773 assert (ph && PH_ON_USED_LIST_P (ph));
819 if (ph) 774 if (ph)
820 { 775 {
776 #ifdef NEW_GC
777 if (value == BLACK)
778 if (!PH_BLACK_BIT (ph))
779 PH_BLACK_BIT (ph) = 1;
780 #endif /* NEW_GC */
821 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); 781 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
822 } 782 }
823 } 783 }
824 784
825 785
826 786
827 787
828 /*--- page header functions --------------------------------------------*/ 788 /*--- page header functions --------------------------------------------*/
789
790 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
791 #include "blocktype.h"
792
793 struct page_header_blocktype
794 {
795 Blocktype_declare (page_header);
796 } *the_page_header_blocktype;
797 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
829 798
830 /* Allocates a page header either from a free list or from the OS. */ 799 /* Allocates a page header either from a free list or from the OS. */
831 static page_header * 800 static page_header *
832 alloc_page_header (void) 801 alloc_page_header (void)
833 { 802 {
803 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
804 page_header *result;
805 #ifdef MEMORY_USAGE_STATS
806 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
807 #endif
808 result = Blocktype_alloc (the_page_header_blocktype);
809 ZERO_PAGE_HEADER (result);
810 return result;
811 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
834 page_header *result; 812 page_header *result;
835 if (PAGE_HEADER_FREE_LIST == 0) 813 if (PAGE_HEADER_FREE_LIST == 0)
836 { 814 {
837 result = 815 result =
838 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); 816 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
839 #ifdef MEMORY_USAGE_STATS 817 #ifdef MEMORY_USAGE_STATS
840 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); 818 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
841 #endif 819 #endif
842
843 } 820 }
844 else 821 else
845 { 822 {
846 result = (page_header*) PAGE_HEADER_FREE_LIST; 823 result = (page_header*) PAGE_HEADER_FREE_LIST;
847 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); 824 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
848 } 825 }
849 return result; 826 return result;
827 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
850 } 828 }
851 829
852 830
853 /* Frees given page header by maintaining a free list. */ 831 /* Frees given page header by maintaining a free list. */
854 static void 832 static void
855 free_page_header (page_header *ph) 833 free_page_header (page_header *ph)
856 { 834 {
835 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
836 Blocktype_free (the_page_header_blocktype, ph);
837 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
857 #if ZERO_MEM 838 #if ZERO_MEM
858 ZERO_PAGE_HEADER (ph); 839 ZERO_PAGE_HEADER (ph);
859 #endif 840 #endif
860 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; 841 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
861 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); 842 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
843 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
862 } 844 }
863 845
864 846
865 /* Adds given page header to given page list header's list. */ 847 /* Adds given page header to given page list header's list. */
866 static void 848 static void
938 /* Returns the index of the used heap list according to given size. */ 920 /* Returns the index of the used heap list according to given size. */
939 static int 921 static int
940 get_used_list_index (size_t size) 922 get_used_list_index (size_t size)
941 { 923 {
942 if (size <= USED_LIST_MIN_OBJECT_SIZE) 924 if (size <= USED_LIST_MIN_OBJECT_SIZE)
943 return 0; 925 {
944 if (size <= USED_LIST_UPPER_THRESHOLD) 926 // printf ("size %d -> index %d\n", size, 0);
945 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) 927 return 0;
946 / USED_LIST_LIN_STEP) + 1; 928 }
929 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD)
930 {
931 // printf ("size %d -> index %d\n", size,
932 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
933 // / USED_LIST_LIN_STEP) + 1);
934 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
935 / USED_LIST_LIN_STEP) + 1;
936 }
937 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1);
947 return N_USED_PAGE_LISTS - 1; 938 return N_USED_PAGE_LISTS - 1;
948 } 939 }
949
950 940
951 /* Returns the size of the used heap list according to given index. */ 941 /* Returns the size of the used heap list according to given index. */
952 static size_t 942 static size_t
953 get_used_list_size_value (int used_index) 943 get_used_list_size_value (int used_index)
954 { 944 {
956 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; 946 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
957 return 0; 947 return 0;
958 } 948 }
959 949
960 950
961 /* Returns the index of the used heap list according to given size. */
962 static int
963 get_unmanaged_list_index (size_t size)
964 {
965 if (size <= UNMANAGED_LIST_MIN_OBJECT_SIZE)
966 return 0;
967 if (size <= UNMANAGED_LIST_UPPER_THRESHOLD)
968 return ((size - UNMANAGED_LIST_MIN_OBJECT_SIZE - 1)
969 / UNMANAGED_LIST_LIN_STEP) + 1;
970 return N_UNMANAGED_PAGE_LISTS - 1;
971 }
972
973
974 /* Returns the size of the unmanaged heap list according to given index. */
975 static size_t
976 get_unmanaged_list_size_value (int unmanaged_index)
977 {
978 if (unmanaged_index < N_UNMANAGED_PAGE_LISTS - 1)
979 return (unmanaged_index * UNMANAGED_LIST_LIN_STEP)
980 + UNMANAGED_LIST_MIN_OBJECT_SIZE;
981 return 0;
982 }
983
984
985 /* Returns the index of the free heap list according to given size. */ 951 /* Returns the index of the free heap list according to given size. */
986 static int 952 static EMACS_INT
987 get_free_list_index (EMACS_INT n_pages) 953 get_free_list_index (EMACS_INT n_pages)
988 { 954 {
989 if (n_pages == 0) 955 if (n_pages == 0)
990 return 0; 956 return 0;
991 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) 957 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
998 } 964 }
999 965
1000 966
1001 /* Returns the size in number of pages of the given free list at index. */ 967 /* Returns the size in number of pages of the given free list at index. */
1002 static size_t 968 static size_t
1003 get_free_list_size_value (int free_index) 969 get_free_list_size_value (EMACS_INT free_index)
1004 { 970 {
1005 if (free_index < FREE_LIST_LOWER_THRESHOLD) 971 if (free_index < FREE_LIST_LOWER_THRESHOLD)
1006 return free_index + 1; 972 return free_index + 1;
1007 if (free_index >= N_FREE_PAGE_LISTS) 973 if (free_index >= N_FREE_PAGE_LISTS)
1008 return FREE_LIST_UPPER_THRESHOLD; 974 return FREE_LIST_UPPER_THRESHOLD;
1036 1002
1037 /* Frees a heap section, if the heap_section is completly free */ 1003 /* Frees a heap section, if the heap_section is completly free */
1038 static EMACS_INT 1004 static EMACS_INT
1039 free_heap_section (page_header *ph) 1005 free_heap_section (page_header *ph)
1040 { 1006 {
1041 int i; 1007 EMACS_INT i;
1042 int removed = 0; 1008 EMACS_INT removed = 0;
1043 for (i = 0; i < N_HEAP_SECTIONS; i++) 1009 for (i = 0; i < N_HEAP_SECTIONS; i++)
1044 if (!removed) 1010 if (!removed)
1045 { 1011 {
1046 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) 1012 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
1047 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) 1013 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
1218 1184
1219 1185
1220 /*--- used heap functions ----------------------------------------------*/ 1186 /*--- used heap functions ----------------------------------------------*/
1221 /* Installs initial free list. */ 1187 /* Installs initial free list. */
1222 static void 1188 static void
1223 install_cell_free_list (page_header *ph) 1189 install_cell_free_list (page_header *ph, EMACS_INT elemcount)
1224 { 1190 {
1225 char *p; 1191 Rawbyte *p;
1226 int i; 1192 EMACS_INT i;
1227 EMACS_INT cell_size = PH_CELL_SIZE (ph); 1193 EMACS_INT cell_size = PH_CELL_SIZE (ph);
1228 /* write initial free list if cell_size is < PAGE_SIZE */ 1194 /* write initial free list if cell_size is < PAGE_SIZE */
1229 p = (char *) PH_HEAP_SPACE (ph); 1195 p = (Rawbyte *) PH_HEAP_SPACE (ph);
1230 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) 1196 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
1231 { 1197 {
1232 #ifdef ERROR_CHECK_GC 1198 #ifdef ERROR_CHECK_GC
1233 assert (!LRECORD_FREE_P (p)); 1199 assert (!LRECORD_FREE_P (p));
1234 MARK_LRECORD_AS_FREE (p); 1200 MARK_LRECORD_AS_FREE (p);
1235 #endif 1201 #endif
1236 NEXT_FREE (p) = FREE_LIST (p + cell_size); 1202 if (elemcount == 1)
1203 NEXT_FREE (p) = FREE_LIST (p + cell_size);
1237 set_lookup_table (p, ph); 1204 set_lookup_table (p, ph);
1238 p += cell_size; 1205 p += cell_size;
1239 } 1206 }
1240 #ifdef ERROR_CHECK_GC 1207 #ifdef ERROR_CHECK_GC
1241 assert (!LRECORD_FREE_P (p)); 1208 assert (!LRECORD_FREE_P (p));
1242 MARK_LRECORD_AS_FREE (p); 1209 MARK_LRECORD_AS_FREE (p);
1243 #endif 1210 #endif
1261 1228
1262 1229
1263 /* Installs a new page and hooks it into given page_list_header. */ 1230 /* Installs a new page and hooks it into given page_list_header. */
1264 static page_header * 1231 static page_header *
1265 install_page_in_used_list (page_header *ph, page_list_header *plh, 1232 install_page_in_used_list (page_header *ph, page_list_header *plh,
1266 size_t size, int managed) 1233 size_t size, EMACS_INT elemcount)
1267 { 1234 {
1268 /* add to list */ 1235 /* add to list */
1269 add_page_header_to_plh (ph, plh); 1236 add_page_header_to_plh (ph, plh);
1270 1237
1271 /* determine cell size */ 1238 /* determine cell size */
1272 if (PLH_SIZE (plh)) 1239 if (PLH_SIZE (plh))
1273 PH_CELL_SIZE (ph) = PLH_SIZE (plh); 1240 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
1274 else 1241 else
1275 PH_CELL_SIZE (ph) = size; 1242 PH_CELL_SIZE (ph) = size;
1276 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); 1243 if (elemcount == 1)
1244 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
1245 else
1246 {
1247 PH_CELLS_ON_PAGE (ph) = elemcount;
1248 PH_ARRAY_BIT (ph) = 1;
1249 }
1277 1250
1278 /* init cell count */ 1251 /* init cell count */
1279 PH_CELLS_USED (ph) = 0; 1252 PH_CELLS_USED (ph) = 0;
1280 1253
1281 /* install mark bits and initialize cell free list */ 1254 /* install mark bits and initialize cell free list */
1282 if (managed) 1255 install_mark_bits (ph);
1283 install_mark_bits (ph); 1256
1284 1257 install_cell_free_list (ph, elemcount);
1285 install_cell_free_list (ph);
1286 1258
1287 #ifdef MEMORY_USAGE_STATS 1259 #ifdef MEMORY_USAGE_STATS
1288 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); 1260 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
1289 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); 1261 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
1290 #endif 1262 #endif
1296 /* Cleans and frees a page, identified by the given page_header. */ 1268 /* Cleans and frees a page, identified by the given page_header. */
1297 static void 1269 static void
1298 remove_page_from_used_list (page_header *ph) 1270 remove_page_from_used_list (page_header *ph)
1299 { 1271 {
1300 page_list_header *plh = PH_PLH (ph); 1272 page_list_header *plh = PH_PLH (ph);
1273
1274 #ifdef NEW_GC
1275 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT();
1276 /* cleanup: remove memory protection, zero page_header bits. */
1277 #endif /* not NEW_GC */
1301 1278
1302 #ifdef MEMORY_USAGE_STATS 1279 #ifdef MEMORY_USAGE_STATS
1303 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); 1280 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
1304 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); 1281 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
1305 #endif 1282 #endif
1375 /* Allocates a page from the free list. */ 1352 /* Allocates a page from the free list. */
1376 static page_header * 1353 static page_header *
1377 allocate_page_from_free_list (EMACS_INT needed_pages) 1354 allocate_page_from_free_list (EMACS_INT needed_pages)
1378 { 1355 {
1379 page_header *ph = 0; 1356 page_header *ph = 0;
1380 int i; 1357 EMACS_INT i;
1381 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) 1358 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
1382 if ((ph = find_free_page_first_fit (needed_pages, 1359 if ((ph = find_free_page_first_fit (needed_pages,
1383 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) 1360 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
1384 { 1361 {
1385 if (PH_N_PAGES (ph) > needed_pages) 1362 if (PH_N_PAGES (ph) > needed_pages)
1394 } 1371 }
1395 1372
1396 1373
1397 /* Allocates a new page, either from free list or by expanding the heap. */ 1374 /* Allocates a new page, either from free list or by expanding the heap. */
1398 static page_header * 1375 static page_header *
1399 allocate_new_page (page_list_header *plh, size_t size, int managed) 1376 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount)
1400 { 1377 {
1401 EMACS_INT needed_pages = BYTES_TO_PAGES (size); 1378 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount);
1402 /* first check free list */ 1379 /* first check free list */
1403 page_header *result = allocate_page_from_free_list (needed_pages); 1380 page_header *result = allocate_page_from_free_list (needed_pages);
1404 if (!result) 1381 if (!result)
1405 /* expand heap */ 1382 /* expand heap */
1406 result = expand_heap (needed_pages); 1383 result = expand_heap (needed_pages);
1407 install_page_in_used_list (result, plh, size, managed); 1384 install_page_in_used_list (result, plh, size, elemcount);
1408 return result; 1385 return result;
1409 } 1386 }
1410 1387
1411 1388
1412 /* Selects the correct size class, tries to allocate a cell of this size 1389 /* Selects the correct size class, tries to allocate a cell of this size
1413 from the free list, if this fails, a new page is allocated. */ 1390 from the free list, if this fails, a new page is allocated. */
1414 static void * 1391 static void *
1415 mc_alloc_1 (size_t size, int managed) 1392 mc_alloc_1 (size_t size, EMACS_INT elemcount)
1416 { 1393 {
1417 page_list_header *plh = 0; 1394 page_list_header *plh = 0;
1418 page_header *ph = 0; 1395 page_header *ph = 0;
1419 void *result = 0; 1396 void *result = 0;
1420 1397
1421 if (managed) 1398 plh = USED_HEAP_PAGES (get_used_list_index (size));
1422 plh = USED_HEAP_PAGES (get_used_list_index (size));
1423 else
1424 plh = UNMANAGED_HEAP_PAGES (get_unmanaged_list_index (size));
1425 1399
1426 if (size == 0) 1400 if (size == 0)
1427 return 0; 1401 return 0;
1428 if (size < PAGE_SIZE_DIV_2) 1402 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2))
1429 /* first check any free cells */ 1403 /* first check any free cells */
1430 ph = allocate_cell (plh); 1404 ph = allocate_cell (plh);
1431 if (!ph) 1405 if (!ph)
1432 /* allocate a new page */ 1406 /* allocate a new page */
1433 ph = allocate_new_page (plh, size, managed); 1407 ph = allocate_new_page (plh, size, elemcount);
1434 1408
1435 /* return first element of free list and remove it from the list */ 1409 /* return first element of free list and remove it from the list */
1436 result = (void*) PH_FREE_LIST (ph); 1410 result = (void*) PH_FREE_LIST (ph);
1437 PH_FREE_LIST (ph) = 1411 PH_FREE_LIST (ph) =
1438 NEXT_FREE (PH_FREE_LIST (ph)); 1412 NEXT_FREE (PH_FREE_LIST (ph));
1439 1413
1440 memset (result, '\0', size); 1414 memset (result, '\0', (size * elemcount));
1441 if (managed) 1415 MARK_LRECORD_AS_FREE (result);
1442 MARK_LRECORD_AS_FREE (result);
1443 1416
1444 /* bump used cells counter */ 1417 /* bump used cells counter */
1445 PH_CELLS_USED (ph)++; 1418 PH_CELLS_USED (ph) += elemcount;
1446 1419
1447 #ifdef MEMORY_USAGE_STATS 1420 #ifdef MEMORY_USAGE_STATS
1448 PLH_USED_CELLS (plh)++; 1421 PLH_USED_CELLS (plh) += elemcount;
1449 if (managed) 1422 PLH_USED_SPACE (plh) += size * elemcount;
1450 PLH_USED_SPACE (plh) += size;
1451 else
1452 PLH_USED_SPACE (plh) += PLH_SIZE (plh);
1453 #endif 1423 #endif
1454 1424
1455 return result; 1425 return result;
1426 }
1427
1428 /* Array allocation. */
1429 void *
1430 mc_alloc_array (size_t size, EMACS_INT elemcount)
1431 {
1432 return mc_alloc_1 (size, elemcount);
1456 } 1433 }
1457 1434
1458 void * 1435 void *
1459 mc_alloc (size_t size) 1436 mc_alloc (size_t size)
1460 { 1437 {
1461 return mc_alloc_1 (size, 1); 1438 return mc_alloc_1 (size, 1);
1462 } 1439 }
1463
1464 void *
1465 mc_alloc_unmanaged (size_t size)
1466 {
1467 return mc_alloc_1 (size, 0);
1468 }
1469
1470 1440
1471 1441
1472 1442
1473 /*--- sweep & free & finalize-------------------------------------------*/ 1443 /*--- sweep & free & finalize-------------------------------------------*/
1474 1444
1510 mark_free_list (page_header *ph) 1480 mark_free_list (page_header *ph)
1511 { 1481 {
1512 free_link *fl = PH_FREE_LIST (ph); 1482 free_link *fl = PH_FREE_LIST (ph);
1513 while (fl) 1483 while (fl)
1514 { 1484 {
1485 #ifdef NEW_GC
1486 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK);
1487 #else /* not NEW_GC */
1515 SET_BIT (ph, get_mark_bit_index (fl, ph), 1); 1488 SET_BIT (ph, get_mark_bit_index (fl, ph), 1);
1489 #endif /* not NEW_GC */
1516 fl = NEXT_FREE (fl); 1490 fl = NEXT_FREE (fl);
1517 } 1491 }
1518 } 1492 }
1519 1493
1520 /* Finalize a page. You have to tell mc-alloc how to call your 1494 /* Finalize a page. You have to tell mc-alloc how to call your
1527 { 1501 {
1528 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); 1502 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
1529 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); 1503 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1530 EMACS_INT mark_bit = 0; 1504 EMACS_INT mark_bit = 0;
1531 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); 1505 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1532 int bit = 0; 1506 unsigned int bit = 0;
1533 1507
1534 mark_free_list (ph); 1508 mark_free_list (ph);
1535 1509
1510 #ifdef NEW_GC
1511 /* ARRAY_BIT_HACK */
1512 if (PH_ARRAY_BIT (ph))
1513 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1514 {
1515 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
1516 if (bit)
1517 {
1518 return;
1519 }
1520 }
1521 #endif /* NEW_GC */
1522
1536 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) 1523 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1537 { 1524 {
1538 GET_BIT (bit, ph, mark_bit); 1525 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
1539 if (!bit) 1526 #ifdef NEW_GC
1527 if (bit == WHITE)
1528 #else /* not NEW_GC */
1529 if (bit == 0)
1530 #endif /* not NEW_GC */
1540 { 1531 {
1541 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); 1532 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
1542 MC_ALLOC_CALL_FINALIZER ((void *) ptr); 1533 MC_ALLOC_CALL_FINALIZER ((void *) ptr);
1543 } 1534 }
1544 } 1535 }
1557 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); 1548 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
1558 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); 1549 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1559 EMACS_INT mark_bit = 0; 1550 EMACS_INT mark_bit = 0;
1560 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); 1551 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1561 1552
1562 mark_free_list (ph);
1563
1564 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) 1553 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1565 { 1554 {
1566 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); 1555 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
1567 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); 1556 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
1568 } 1557 }
1589 in the end, it is removed. If some cells are free, it is moved to the 1578 in the end, it is removed. If some cells are free, it is moved to the
1590 front of its page header list. Full pages stay where they are. */ 1579 front of its page header list. Full pages stay where they are. */
1591 static void 1580 static void
1592 sweep_page (page_header *ph) 1581 sweep_page (page_header *ph)
1593 { 1582 {
1594 char *heap_space = (char *) PH_HEAP_SPACE (ph); 1583 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
1595 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); 1584 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1596 EMACS_INT mark_bit = 0; 1585 EMACS_INT mark_bit = 0;
1597 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); 1586 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1598 int bit = 0; 1587 unsigned int bit = 0;
1599 1588
1600 mark_free_list (ph); 1589 mark_free_list (ph);
1601 1590
1591 #ifdef NEW_GC
1592 /* ARRAY_BIT_HACK */
1593 if (PH_ARRAY_BIT (ph))
1594 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1595 {
1596 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
1597 if (bit)
1598 {
1599 zero_mark_bits (ph);
1600 PH_BLACK_BIT (ph) = 0;
1601 return;
1602 }
1603 }
1604 #endif /* NEW_GC */
1605
1602 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) 1606 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1603 { 1607 {
1604 GET_BIT (bit, ph, mark_bit); 1608 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
1605 if (!bit) 1609 #ifdef NEW_GC
1610 if (bit == WHITE)
1611 #else /* not NEW_GC */
1612 if (bit == 0)
1613 #endif /* not NEW_GC */
1606 { 1614 {
1615 #ifdef NEW_GC
1616 GC_STAT_FREED;
1617 #endif /* NEW_GC */
1607 remove_cell (heap_space + (heap_space_step * mark_bit), ph); 1618 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
1608 } 1619 }
1609 } 1620 }
1610 zero_mark_bits (ph); 1621 zero_mark_bits (ph);
1622 PH_BLACK_BIT (ph) = 0;
1611 if (PH_CELLS_USED (ph) == 0) 1623 if (PH_CELLS_USED (ph) == 0)
1612 remove_page_from_used_list (ph); 1624 remove_page_from_used_list (ph);
1613 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) 1625 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
1614 move_page_header_to_front (ph); 1626 move_page_header_to_front (ph);
1615 } 1627 }
1625 1637
1626 /* Frees the cell pointed to by ptr. */ 1638 /* Frees the cell pointed to by ptr. */
1627 void 1639 void
1628 mc_free (void *ptr) 1640 mc_free (void *ptr)
1629 { 1641 {
1630 page_header *ph = get_page_header (ptr); 1642 page_header *ph;
1631 assert (!PH_ON_FREE_LIST_P (ph)); 1643
1632 1644 #ifdef NEW_GC
1645 /* Do not allow manual freeing while a gc is running. Data is going
1646 to be freed next gc cycle. */
1647 if (write_barrier_enabled || gc_in_progress)
1648 return;
1649 #endif /* NEW_GC */
1650
1651 ph = get_page_header (ptr);
1652 assert (ph);
1653 assert (PH_PLH (ph));
1654 assert (PLH_LIST_TYPE (PH_PLH (ph)) != FREE_LIST);
1655
1656 #ifdef NEW_GC
1657 if (PH_ON_USED_LIST_P (ph))
1658 SET_BIT (ph, get_mark_bit_index (ptr, ph), WHITE);
1659 #endif /* NEW_GC */
1633 remove_cell (ptr, ph); 1660 remove_cell (ptr, ph);
1634 1661
1635 if (PH_CELLS_USED (ph) == 0) 1662 if (PH_CELLS_USED (ph) == 0)
1636 remove_page_from_used_list (ph); 1663 remove_page_from_used_list (ph);
1637 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) 1664 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
1640 1667
1641 1668
1642 /* Changes the size of the cell pointed to by ptr. 1669 /* Changes the size of the cell pointed to by ptr.
1643 Returns the new address of the new cell with new size. */ 1670 Returns the new address of the new cell with new size. */
1644 void * 1671 void *
1645 mc_realloc_1 (void *ptr, size_t size, int managed) 1672 mc_realloc_1 (void *ptr, size_t size, int elemcount)
1646 { 1673 {
1647 if (ptr) 1674 if (ptr)
1648 { 1675 {
1649 if (size) 1676 if (size * elemcount)
1650 { 1677 {
1651 void *result = mc_alloc_1 (size, managed); 1678 void *result = mc_alloc_1 (size, elemcount);
1652 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); 1679 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
1653 size_t cpy_size = size; 1680 size_t cpy_size = size * elemcount;
1654 if (size > from_size) 1681 if (cpy_size > from_size)
1655 cpy_size = from_size; 1682 cpy_size = from_size;
1656 memcpy (result, ptr, cpy_size); 1683 memcpy (result, ptr, cpy_size);
1657 mc_free (ptr); 1684 #ifdef ALLOC_TYPE_STATS
1685 inc_lrecord_stats (size, (struct lrecord_header *) result);
1686 #endif /* not ALLOC_TYPE_STATS */
1687 /* mc_free (ptr); not needed, will be collected next gc */
1658 return result; 1688 return result;
1659 } 1689 }
1660 else 1690 else
1661 { 1691 {
1662 mc_free (ptr); 1692 /* mc_free (ptr); not needed, will be collected next gc */
1663 return 0; 1693 return 0;
1664 } 1694 }
1665 } 1695 }
1666 else 1696 else
1667 return mc_alloc_1 (size, managed); 1697 return mc_alloc_1 (size, elemcount);
1668 } 1698 }
1669 1699
1670 void * 1700 void *
1671 mc_realloc (void *ptr, size_t size) 1701 mc_realloc (void *ptr, size_t size)
1672 { 1702 {
1673 return mc_realloc_1 (ptr, size, 1); 1703 return mc_realloc_1 (ptr, size, 1);
1674 } 1704 }
1675 1705
1676 void * 1706 void *
1677 mc_realloc_unmanaged (void *ptr, size_t size) 1707 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount)
1678 { 1708 {
1679 return mc_realloc_1 (ptr, size, 0); 1709 return mc_realloc_1 (ptr, size, elemcount);
1680 } 1710 }
1681
1682 1711
1683 1712
1684 1713
1685 /*--- initialization ---------------------------------------------------*/ 1714 /*--- initialization ---------------------------------------------------*/
1686 1715
1687 /* Call once at the very beginning. */ 1716 /* Call once at the very beginning. */
1688 void 1717 void
1689 init_mc_allocator (void) 1718 init_mc_allocator (void)
1690 { 1719 {
1691 int i; 1720 EMACS_INT i;
1692 1721
1693 memset (&mc_allocator_globals, '\0', sizeof (mc_allocator_globals_type)); 1722 #ifdef MEMORY_USAGE_STATS
1723 MC_MALLOCED_BYTES = 0;
1724 #endif
1725
1726 /* init of pagesize dependent values */
1727 switch (SYS_PAGE_SIZE)
1728 {
1729 case 512: log_page_size = 9; break;
1730 case 1024: log_page_size = 10; break;
1731 case 2048: log_page_size = 11; break;
1732 case 4096: log_page_size = 12; break;
1733 case 8192: log_page_size = 13; break;
1734 case 16384: log_page_size = 14; break;
1735 default: ABORT ();
1736 }
1737
1738 page_size_div_2 = (EMACS_INT) SYS_PAGE_SIZE >> 1;
1739
1740 mc_allocator_globals.used_heap_pages =
1741 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1)
1742 * sizeof (page_list_header));
1743 #ifdef MEMORY_USAGE_STATS
1744 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header);
1745 #endif
1746
1747 mc_allocator_globals.ptr_lookup_table =
1748 (level_2_lookup_tree **)
1749 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *));
1750 #ifdef MEMORY_USAGE_STATS
1751 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *);
1752 #endif
1753
1754 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER
1755 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype);
1756 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */
1694 1757
1695 for (i = 0; i < N_USED_PAGE_LISTS; i++) 1758 for (i = 0; i < N_USED_PAGE_LISTS; i++)
1696 { 1759 {
1697 page_list_header *plh = USED_HEAP_PAGES (i); 1760 page_list_header *plh = USED_HEAP_PAGES (i);
1698 PLH_LIST_TYPE (plh) = USED_LIST; 1761 PLH_LIST_TYPE (plh) = USED_LIST;
1707 PLH_TOTAL_CELLS (plh) = 0; 1770 PLH_TOTAL_CELLS (plh) = 0;
1708 PLH_TOTAL_SPACE (plh) = 0; 1771 PLH_TOTAL_SPACE (plh) = 0;
1709 #endif 1772 #endif
1710 } 1773 }
1711 1774
1712 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
1713 {
1714 page_list_header *plh = UNMANAGED_HEAP_PAGES (i);
1715 PLH_LIST_TYPE (plh) = UNMANAGED_LIST;
1716 PLH_SIZE (plh) = get_unmanaged_list_size_value (i);
1717 PLH_FIRST (plh) = 0;
1718 PLH_LAST (plh) = 0;
1719 PLH_MARK_BIT_FREE_LIST (plh) = 0;
1720 #ifdef MEMORY_USAGE_STATS
1721 PLH_PAGE_COUNT (plh) = 0;
1722 PLH_USED_CELLS (plh) = 0;
1723 PLH_USED_SPACE (plh) = 0;
1724 PLH_TOTAL_CELLS (plh) = 0;
1725 PLH_TOTAL_SPACE (plh) = 0;
1726 #endif
1727 }
1728
1729 for (i = 0; i < N_FREE_PAGE_LISTS; i++) 1775 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
1730 { 1776 {
1731 page_list_header *plh = FREE_HEAP_PAGES (i); 1777 page_list_header *plh = FREE_HEAP_PAGES (i);
1732 PLH_LIST_TYPE (plh) = FREE_LIST; 1778 PLH_LIST_TYPE (plh) = FREE_LIST;
1733 PLH_SIZE (plh) = get_free_list_size_value (i); 1779 PLH_SIZE (plh) = get_free_list_size_value (i);
1741 PLH_TOTAL_CELLS (plh) = 0; 1787 PLH_TOTAL_CELLS (plh) = 0;
1742 PLH_TOTAL_SPACE (plh) = 0; 1788 PLH_TOTAL_SPACE (plh) = 0;
1743 #endif 1789 #endif
1744 } 1790 }
1745 1791
1792 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER
1746 PAGE_HEADER_FREE_LIST = 0; 1793 PAGE_HEADER_FREE_LIST = 0;
1747 1794 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */
1748 #ifdef MEMORY_USAGE_STATS 1795
1749 MC_MALLOCED_BYTES = sizeof (mc_allocator_globals); 1796 #ifdef MEMORY_USAGE_STATS
1797 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals);
1750 #endif 1798 #endif
1751 1799
1752 init_lookup_table (); 1800 init_lookup_table ();
1753 } 1801 }
1754 1802
1763 */ 1811 */
1764 ()) 1812 ())
1765 { 1813 {
1766 Lisp_Object free_plhs = Qnil; 1814 Lisp_Object free_plhs = Qnil;
1767 Lisp_Object used_plhs = Qnil; 1815 Lisp_Object used_plhs = Qnil;
1768 Lisp_Object unmanaged_plhs = Qnil;
1769 Lisp_Object heap_sects = Qnil; 1816 Lisp_Object heap_sects = Qnil;
1770 int used_size = 0; 1817 EMACS_INT used_size = 0;
1771 int real_size = 0; 1818 EMACS_INT real_size = 0;
1772 1819
1773 int i; 1820 EMACS_INT i;
1774 1821
1775 for (i = 0; i < N_FREE_PAGE_LISTS; i++) 1822 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
1776 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) 1823 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
1777 free_plhs = 1824 free_plhs =
1778 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), 1825 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
1779 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), 1826 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
1780 free_plhs); 1827 free_plhs);
1781
1782 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
1783 if (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i)) > 0)
1784 unmanaged_plhs =
1785 acons (make_int (PLH_SIZE (UNMANAGED_HEAP_PAGES(i))),
1786 list5 (make_int (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i))),
1787 make_int (PLH_USED_CELLS (UNMANAGED_HEAP_PAGES(i))),
1788 make_int (PLH_USED_SPACE (UNMANAGED_HEAP_PAGES(i))),
1789 make_int (PLH_TOTAL_CELLS (UNMANAGED_HEAP_PAGES(i))),
1790 make_int (PLH_TOTAL_SPACE (UNMANAGED_HEAP_PAGES(i)))),
1791 unmanaged_plhs);
1792 1828
1793 for (i = 0; i < N_USED_PAGE_LISTS; i++) 1829 for (i = 0; i < N_USED_PAGE_LISTS; i++)
1794 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) 1830 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
1795 used_plhs = 1831 used_plhs =
1796 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), 1832 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
1811 list3 (make_int (N_HEAP_SECTIONS), 1847 list3 (make_int (N_HEAP_SECTIONS),
1812 make_int (used_size), 1848 make_int (used_size),
1813 make_int (real_size)); 1849 make_int (real_size));
1814 1850
1815 return Fcons (make_int (PAGE_SIZE), 1851 return Fcons (make_int (PAGE_SIZE),
1816 list6 (heap_sects, 1852 list5 (heap_sects,
1817 Fnreverse (used_plhs), 1853 Fnreverse (used_plhs),
1818 Fnreverse (unmanaged_plhs),
1819 Fnreverse (free_plhs), 1854 Fnreverse (free_plhs),
1820 make_int (sizeof (mc_allocator_globals)), 1855 make_int (sizeof (mc_allocator_globals)),
1821 make_int (MC_MALLOCED_BYTES))); 1856 make_int (MC_MALLOCED_BYTES)));
1822 } 1857 }
1823 #endif /* MEMORY_USAGE_STATS */ 1858 #endif /* MEMORY_USAGE_STATS */
1827 { 1862 {
1828 #ifdef MEMORY_USAGE_STATS 1863 #ifdef MEMORY_USAGE_STATS
1829 DEFSUBR (Fmc_alloc_memory_usage); 1864 DEFSUBR (Fmc_alloc_memory_usage);
1830 #endif /* MEMORY_USAGE_STATS */ 1865 #endif /* MEMORY_USAGE_STATS */
1831 } 1866 }
1867
1868
1869 #ifdef NEW_GC
1870 /*--- incremental garbage collector ----------------------------------*/
1871
1872 /* access dirty bit of page header */
1873 void
1874 set_dirty_bit (page_header *ph, unsigned int value)
1875 {
1876 PH_DIRTY_BIT (ph) = value;
1877 }
1878
1879 void
1880 set_dirty_bit_for_address (void *ptr, unsigned int value)
1881 {
1882 set_dirty_bit (get_page_header (ptr), value);
1883 }
1884
1885 unsigned int
1886 get_dirty_bit (page_header *ph)
1887 {
1888 return PH_DIRTY_BIT (ph);
1889 }
1890
1891 unsigned int
1892 get_dirty_bit_for_address (void *ptr)
1893 {
1894 return get_dirty_bit (get_page_header (ptr));
1895 }
1896
1897
1898 /* access protection bit of page header */
1899 void
1900 set_protection_bit (page_header *ph, unsigned int value)
1901 {
1902 PH_PROTECTION_BIT (ph) = value;
1903 }
1904
1905 void
1906 set_protection_bit_for_address (void *ptr, unsigned int value)
1907 {
1908 set_protection_bit (get_page_header (ptr), value);
1909 }
1910
1911 unsigned int
1912 get_protection_bit (page_header *ph)
1913 {
1914 return PH_PROTECTION_BIT (ph);
1915 }
1916
1917 unsigned int
1918 get_protection_bit_for_address (void *ptr)
1919 {
1920 return get_protection_bit (get_page_header (ptr));
1921 }
1922
1923
1924 /* Returns the start of the page of the object pointed to by ptr. */
1925 void *
1926 get_page_start (void *ptr)
1927 {
1928 return PH_HEAP_SPACE (get_page_header (ptr));
1929 }
1930
1931 /* Make PAGE_SIZE globally available. */
1932 EMACS_INT
1933 mc_get_page_size ()
1934 {
1935 return PAGE_SIZE;
1936 }
1937
1938 /* Is the fault at ptr on a protected page? */
1939 EMACS_INT
1940 fault_on_protected_page (void *ptr)
1941 {
1942 page_header *ph = get_page_header_internal (ptr);
1943 return (ph
1944 && PH_HEAP_SPACE (ph)
1945 && (PH_HEAP_SPACE (ph) <= ptr)
1946 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph)
1947 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr)
1948 && (PH_PROTECTION_BIT (ph) == 1));
1949 }
1950
1951
1952 /* Protect the heap page of given page header ph if black objects are
1953 on the page. */
1954 static void
1955 protect_heap_page (page_header *ph)
1956 {
1957 if (PH_BLACK_BIT (ph))
1958 {
1959 void *heap_space = PH_HEAP_SPACE (ph);
1960 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
1961 vdb_protect ((void *) heap_space, heap_space_size);
1962 PH_PROTECTION_BIT (ph) = 1;
1963 }
1964 }
1965
1966 /* Protect all heap pages with black objects. */
1967 void
1968 protect_heap_pages (void)
1969 {
1970 visit_all_used_page_headers (protect_heap_page);
1971 }
1972
1973
1974 /* Remove protection (if there) of heap page of given page header
1975 ph. */
1976 static void
1977 unprotect_heap_page (page_header *ph)
1978 {
1979 if (PH_PROTECTION_BIT (ph))
1980 {
1981 void *heap_space = PH_HEAP_SPACE (ph);
1982 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE;
1983 vdb_unprotect (heap_space, heap_space_size);
1984 PH_PROTECTION_BIT (ph) = 0;
1985 }
1986 }
1987
1988 /* Remove protection for all heap pages which are protected. */
1989 void
1990 unprotect_heap_pages (void)
1991 {
1992 visit_all_used_page_headers (unprotect_heap_page);
1993 }
1994
1995 /* Remove protection and mark page dirty. */
1996 void
1997 unprotect_page_and_mark_dirty (void *ptr)
1998 {
1999 page_header *ph = get_page_header (ptr);
2000 unprotect_heap_page (ph);
2001 PH_DIRTY_BIT (ph) = 1;
2002 }
2003
2004 /* Repush all objects on dirty pages onto the mark stack. */
2005 int
2006 repush_all_objects_on_page (void *ptr)
2007 {
2008 int repushed_objects = 0;
2009 page_header *ph = get_page_header (ptr);
2010 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph);
2011 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
2012 EMACS_INT mark_bit = 0;
2013 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
2014 unsigned int bit = 0;
2015 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
2016 {
2017 GET_BIT (bit, ph, mark_bit * N_MARK_BITS);
2018 if (bit == BLACK)
2019 {
2020 repushed_objects++;
2021 gc_write_barrier
2022 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit))));
2023 }
2024 }
2025 PH_BLACK_BIT (ph) = 0;
2026 PH_DIRTY_BIT (ph) = 0;
2027 return repushed_objects;
2028 }
2029
2030 /* Mark black if object is currently grey. This first checks, if the
2031 object is really allocated on the mc-heap. If it is, it can be
2032 marked black; if it is not, it cannot be marked. */
2033 EMACS_INT
2034 maybe_mark_black (void *ptr)
2035 {
2036 page_header *ph = get_page_header_internal (ptr);
2037 unsigned int bit = 0;
2038
2039 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph))
2040 {
2041 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
2042 if (bit == GREY)
2043 {
2044 if (!PH_BLACK_BIT (ph))
2045 PH_BLACK_BIT (ph) = 1;
2046 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK);
2047 }
2048 return 1;
2049 }
2050 return 0;
2051 }
2052
2053 /* Only for debugging --- not used anywhere in the sources. */
2054 EMACS_INT
2055 object_on_heap_p (void *ptr)
2056 {
2057 page_header *ph = get_page_header_internal (ptr);
2058 return (ph && PH_ON_USED_LIST_P (ph));
2059 }
2060
2061 #endif /* NEW_GC */