Mercurial > hg > xemacs-beta
annotate src/mc-alloc.c @ 5050:6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
-------------------- ChangeLog entries follow: --------------------
ChangeLog addition:
2010-02-20 Ben Wing <ben@xemacs.org>
* configure.ac (XE_COMPLEX_ARG):
Correct doc of --quick-build: It also doesn't check for Lisp shadows.
src/ChangeLog addition:
2010-02-20 Ben Wing <ben@xemacs.org>
* EmacsFrame.c:
* EmacsFrame.c (EmacsFrameRecomputeCellSize):
* alloca.c (i00afunc):
* buffer.c:
* buffer.c (MARKED_SLOT):
* buffer.c (complex_vars_of_buffer):
* cm.c:
* cm.c (cmcheckmagic):
* console.c:
* console.c (MARKED_SLOT):
* device-x.c:
* device-x.c (x_get_visual_depth):
* emacs.c (sort_args):
* eval.c (throw_or_bomb_out):
* event-stream.c:
* event-stream.c (Fadd_timeout):
* event-stream.c (Fadd_async_timeout):
* event-stream.c (Frecent_keys):
* events.c:
* events.c (Fdeallocate_event):
* events.c (event_pixel_translation):
* extents.c:
* extents.c (process_extents_for_insertion_mapper):
* fns.c (Fbase64_encode_region):
* fns.c (Fbase64_encode_string):
* fns.c (Fbase64_decode_region):
* fns.c (Fbase64_decode_string):
* font-lock.c:
* font-lock.c (find_context):
* frame-x.c:
* frame-x.c (x_wm_mark_shell_size_user_specified):
* frame-x.c (x_wm_mark_shell_position_user_specified):
* frame-x.c (x_wm_set_shell_iconic_p):
* frame-x.c (x_wm_set_cell_size):
* frame-x.c (x_wm_set_variable_size):
* frame-x.c (x_wm_store_class_hints):
* frame-x.c (x_wm_maybe_store_wm_command):
* frame-x.c (x_initialize_frame_size):
* frame.c (delete_frame_internal):
* frame.c (change_frame_size_1):
* free-hook.c (check_free):
* free-hook.c (note_block_input):
* free-hook.c (log_gcpro):
* gccache-gtk.c (gc_cache_lookup):
* gccache-x.c:
* gccache-x.c (gc_cache_lookup):
* glyphs-gtk.c:
* glyphs-gtk.c (init_image_instance_from_gdk_pixmap):
* glyphs-x.c:
* glyphs-x.c (extract_xpm_color_names):
* insdel.c:
* insdel.c (move_gap):
* keymap.c:
* keymap.c (keymap_lookup_directly):
* keymap.c (keymap_delete_inverse_internal):
* keymap.c (accessible_keymaps_mapper_1):
* keymap.c (where_is_recursive_mapper):
* lisp.h:
* lstream.c (make_lisp_buffer_stream_1):
* macros.c:
* macros.c (pop_kbd_macro_event):
* mc-alloc.c (remove_page_from_used_list):
* menubar-x.c:
* menubar-x.c (set_frame_menubar):
* ralloc.c:
* ralloc.c (obtain):
* ralloc.c (relinquish):
* ralloc.c (relocate_blocs):
* ralloc.c (resize_bloc):
* ralloc.c (r_alloc_free):
* ralloc.c (r_re_alloc):
* ralloc.c (r_alloc_thaw):
* ralloc.c (init_ralloc):
* ralloc.c (Free_Addr_Block):
* scrollbar-x.c:
* scrollbar-x.c (x_update_scrollbar_instance_status):
* sunplay.c (init_device):
* unexnt.c:
* unexnt.c (read_in_bss):
* unexnt.c (map_in_heap):
* window.c:
* window.c (real_window):
* window.c (window_display_lines):
* window.c (window_display_buffer):
* window.c (set_window_display_buffer):
* window.c (unshow_buffer):
* window.c (Fget_lru_window):
if (...) ABORT(); ---> assert();
More specifically:
if (x == y) ABORT (); --> assert (x != y);
if (x != y) ABORT (); --> assert (x == y);
if (x > y) ABORT (); --> assert (x <= y);
etc.
if (!x) ABORT (); --> assert (x);
if (x) ABORT (); --> assert (!x);
DeMorgan's Law's applied and manually simplified:
if (x && !y) ABORT (); --> assert (!x || y);
if (!x || y >= z) ABORT (); --> assert (x && y < z);
Checked to make sure that assert() of an expression with side
effects ensures that the side effects get executed even when
asserts are disabled, and add a comment about this being a
requirement of any "disabled assert" expression.
* depend:
* make-src-depend:
* make-src-depend (PrintDeps):
Fix broken code in make-src-depend so it does what it was always
supposed to do, which was separate out config.h and lisp.h and
all the files they include into separate variables in the
depend part of Makefile so that quick-build can turn off the
lisp.h/config.h/text.h/etc. dependencies of the source files, to
speed up recompilation.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Sat, 20 Feb 2010 05:05:54 -0600 |
parents | 93bd75c45dca |
children | 92dc90c0bb40 |
rev | line source |
---|---|
2720 | 1 /* New size-based allocator for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
3 | |
4 This file is part of XEmacs. | |
5 | |
6 XEmacs is free software; you can redistribute it and/or modify it | |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 2, or (at your option) any | |
9 later version. | |
10 | |
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with XEmacs; see the file COPYING. If not, write to | |
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 Boston, MA 02111-1307, USA. */ | |
20 | |
21 /* Synched up with: Not in FSF. */ | |
22 | |
23 #include <config.h> | |
3092 | 24 |
2720 | 25 #include "lisp.h" |
26 #include "mc-alloc.h" | |
3092 | 27 #include "getpagesize.h" |
28 | |
29 | |
30 #if 0 | |
31 # define USE_MARK_BITS_FREE_LIST 1 | |
32 #endif | |
33 #if 1 | |
34 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1 | |
35 #endif | |
36 | |
37 /* Memory protection needs the real system-dependent pagesize. */ | |
38 #ifndef WIN32_NATIVE | |
39 #include <unistd.h> /* for getpagesize () */ | |
40 #endif | |
41 #if defined (HAVE_GETPAGESIZE) | |
42 # define SYS_PAGE_SIZE getpagesize () | |
43 #elif defined (_SC_PAGESIZE) | |
44 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE) | |
45 #elif defined (_SC_PAGE_SIZE) | |
46 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE) | |
47 #elif defined(get_page_size) | |
48 # define SYS_PAGE_SIZE get_page_size () | |
49 #elif defined(PAGESIZE) | |
50 # define SYS_PAGE_SIZE PAGESIZE | |
51 #elif defined(PAGE_SIZE) | |
52 # define SYS_PAGE_SIZE PAGE_SIZE | |
53 #else | |
54 /* Valid page sizes are powers of 2. */ | |
55 # define SYS_PAGE_SIZE 4096 | |
56 #endif | |
2720 | 57 |
58 | |
59 /*--- configurable values ----------------------------------------------*/ | |
60 | |
61 /* Definition of size classes */ | |
62 | |
63 /* Heap used list constants: In the used heap, it is important to | |
64 quickly find a free spot for a new object. Therefore the size | |
65 classes of the used heap are defined by the size of the cells on | |
66 the pages. The size classes should match common object sizes, to | |
67 avoid wasting memory. */ | |
68 | |
69 /* Minimum object size in bytes. */ | |
3092 | 70 #if BITS_PER_EMACS_INT > 32 |
71 # define USED_LIST_MIN_OBJECT_SIZE 16 | |
72 #else | |
73 # define USED_LIST_MIN_OBJECT_SIZE 8 | |
74 #endif | |
2720 | 75 |
76 /* The step size by which the size classes increase (up to upper | |
77 threshold). This many bytes are mapped to a single used list: */ | |
3092 | 78 #if BITS_PER_EMACS_INT > 32 |
79 # define USED_LIST_LIN_STEP 8 | |
80 #else | |
81 # define USED_LIST_LIN_STEP 4 | |
82 #endif | |
2720 | 83 |
84 /* The upper threshold should always be set to PAGE_SIZE/2, because if | |
85 a object is larger than PAGE_SIZE/2 there is no room for any other | |
86 object on this page. Objects this big are kept in the page list of | |
87 the multiple pages, since a quick search for free spots is not | |
88 needed for this kind of pages (because there are no free spots). | |
89 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ | |
3092 | 90 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 |
2720 | 91 |
92 | |
93 /* Heap free list constants: In the unused heap, the size of | |
94 consecutive memory tips the scales. A page is smallest entity which | |
95 is asked for. Therefore, the size classes of the unused heap are | |
96 defined by the number of consecutive pages. */ | |
97 /* Sizes up to this many pages each have their own free list. */ | |
98 #define FREE_LIST_LOWER_THRESHOLD 32 | |
99 /* The step size by which the size classes increase (up to upper | |
100 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a | |
101 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and | |
102 FREE_LIST_UPPER_THRESHOLD. */ | |
103 #define FREE_LIST_LIN_STEP 8 | |
104 /* Sizes of at least this many pages are mapped to a single free | |
105 list. Blocks of memory larger than this number are all kept in a | |
106 single list, which makes searching this list slow. But objects that | |
107 big are really seldom. */ | |
108 #define FREE_LIST_UPPER_THRESHOLD 256 | |
109 | |
110 | |
3092 | 111 /* used heap list count */ |
112 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \ | |
113 - USED_LIST_MIN_OBJECT_SIZE) \ | |
114 / USED_LIST_LIN_STEP) + 1 ) + 1 | |
115 | |
116 /* free heap list count */ | |
117 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \ | |
118 - FREE_LIST_LOWER_THRESHOLD) \ | |
119 / FREE_LIST_LIN_STEP) \ | |
120 + FREE_LIST_LOWER_THRESHOLD) | |
121 | |
122 | |
2720 | 123 /* Maximum number of separately added heap sections. */ |
124 #if BITS_PER_EMACS_INT > 32 | |
125 # define MAX_HEAP_SECTS 2048 | |
126 #else | |
127 # define MAX_HEAP_SECTS 768 | |
128 #endif | |
129 | |
130 | |
131 /* Heap growth constants. Heap increases by any number between the | |
132 boundaries (unit is PAGE_SIZE). */ | |
3092 | 133 #define MIN_HEAP_INCREASE 256 |
2720 | 134 #define MAX_HEAP_INCREASE 256 /* not used */ |
135 | |
136 /* Every heap growth is calculated like this: | |
137 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). | |
138 So the growth of the heap is influenced by the current size of the | |
139 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE | |
140 boundaries. | |
141 This reduces the number of heap sectors, the larger the heap grows | |
142 the larger are the newly allocated chunks. */ | |
143 #define HEAP_GROWTH_DIVISOR 3 | |
144 | |
145 | |
146 /* Zero memory before putting on free lists. */ | |
147 #define ZERO_MEM 1 | |
148 | |
149 | |
150 #ifndef CHAR_BIT /* should be included by limits.h */ | |
151 # define CHAR_BIT BITS_PER_CHAR | |
152 #endif | |
153 | |
154 | |
3092 | 155 |
156 /*--- values depending on PAGE_SIZE ------------------------------------*/ | |
2720 | 157 |
3092 | 158 /* initialized in init_mc_allocator () */ |
159 static EMACS_INT log_page_size; | |
160 static EMACS_INT page_size_div_2; | |
2720 | 161 |
3092 | 162 #undef PAGE_SIZE |
163 #define PAGE_SIZE SYS_PAGE_SIZE | |
164 #define LOG_PAGE_SIZE log_page_size | |
165 #define PAGE_SIZE_DIV_2 page_size_div_2 | |
2720 | 166 |
167 | |
168 /* Constants for heap address to page header mapping. */ | |
169 #define LOG_LEVEL2_SIZE 10 | |
170 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) | |
171 #if BITS_PER_EMACS_INT > 32 | |
172 # define USE_HASH_TABLE 1 | |
173 # define LOG_LEVEL1_SIZE 11 | |
174 #else | |
175 # define LOG_LEVEL1_SIZE \ | |
176 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE) | |
177 #endif | |
178 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE) | |
179 | |
180 #ifdef USE_HASH_TABLE | |
181 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1)) | |
4125 | 182 # define L1_INDEX(p) HASH ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 183 #else |
4125 | 184 # define L1_INDEX(p) ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 185 #endif |
4125 | 186 #define L2_INDEX(p) (((EMACS_UINT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1)) |
2720 | 187 |
188 | |
189 | |
190 | |
191 /*--- structs and typedefs ---------------------------------------------*/ | |
192 | |
3092 | 193 /* Links the free lists (mark_bit_free_list and cell free list). */ |
2720 | 194 typedef struct free_link |
195 { | |
196 struct lrecord_header lheader; | |
197 struct free_link *next_free; | |
198 } free_link; | |
199 | |
200 | |
3092 | 201 /* Header for pages. They are held in a doubly linked list. */ |
2720 | 202 typedef struct page_header |
203 { | |
204 struct page_header *next; /* next page_header */ | |
205 struct page_header *prev; /* previous page_header */ | |
206 /* Field plh holds pointer to the according header of the page list.*/ | |
207 struct page_list_header *plh; /* page list header */ | |
208 free_link *free_list; /* links free cells on page */ | |
209 EMACS_INT n_pages; /* number of pages */ | |
210 EMACS_INT cell_size; /* size of cells on page */ | |
211 EMACS_INT cells_on_page; /* total number of cells on page */ | |
212 EMACS_INT cells_used; /* number of used cells on page */ | |
213 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, | |
214 the mark bits are put in an extra memory area. Then the field | |
215 mark_bits holds the pointer to this area. Is the number of | |
216 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the | |
217 mark_bit EMACS_INT directly, without an additional indirection. */ | |
3092 | 218 unsigned int black_bit:1; /* objects on page are black */ |
219 unsigned int dirty_bit:1; /* page is dirty */ | |
220 unsigned int protection_bit:1; /* page is write protected */ | |
221 unsigned int array_bit:1; /* page holds arrays */ | |
222 Rawbyte *mark_bits; /* pointer to mark bits */ | |
2720 | 223 void *heap_space; /* pointer to heap, where objects |
224 are stored */ | |
225 } page_header; | |
226 | |
227 | |
228 /* Different list types. */ | |
229 enum list_type_enum { | |
230 USED_LIST, | |
231 FREE_LIST | |
232 }; | |
233 | |
234 | |
235 /* Header for page lists. Field list_type holds the type of the list. */ | |
236 typedef struct page_list_header | |
237 { | |
238 enum list_type_enum list_type; /* the type of the list */ | |
239 /* Size holds the size of one cell (in bytes) in a used heap list, or the | |
240 size of the heap sector (in number of pages). */ | |
241 size_t size; /* size of one cell / heap sector */ | |
242 page_header *first; /* first of page_header list */ | |
243 page_header *last; /* last of page_header list */ | |
244 /* If the number of objects on page is bigger than | |
245 BITS_PER_EMACS_INT, the mark bits are put in an extra memory | |
246 area, which is linked in this free list, if not used. Is the | |
247 number of objects smaller than BITS_PER_EMACS_INT, the mark bits | |
248 are hold in the mark bit EMACS_INT directly, without an | |
249 additional indirection. */ | |
250 free_link *mark_bit_free_list; | |
251 | |
252 #ifdef MEMORY_USAGE_STATS | |
253 EMACS_INT page_count; /* number if pages in list */ | |
254 EMACS_INT used_cells; /* number of objects in list */ | |
255 EMACS_INT used_space; /* used space */ | |
256 EMACS_INT total_cells; /* number of available cells */ | |
257 EMACS_INT total_space; /* available space */ | |
258 #endif | |
259 } page_list_header; | |
260 | |
261 | |
262 /* The heap sectors are stored with their real start pointer and their | |
263 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */ | |
264 typedef struct heap_sect { | |
265 void *real_start; /* real start pointer (NOT aligned) */ | |
266 size_t real_size; /* NOT multiple of PAGE_SIZE */ | |
267 void *start; /* aligned start pointer */ | |
268 EMACS_INT n_pages; /* multiple of PAGE_SIZE */ | |
269 } heap_sect; | |
270 | |
271 | |
272 /* 2nd tree level for mapping of heap addresses to page headers. */ | |
273 typedef struct level_2_lookup_tree { | |
274 page_header *index[LEVEL2_SIZE]; /* link to page header */ | |
275 EMACS_INT key; /* high order address bits */ | |
276 #ifdef USE_HASH_TABLE | |
277 struct level_2_lookup_tree *hash_link; /* hash chain link */ | |
278 #endif | |
279 } level_2_lookup_tree; | |
280 | |
281 | |
282 | |
283 /*--- global variable definitions --------------------------------------*/ | |
284 | |
285 /* All global allocator variables are kept in this struct. */ | |
286 typedef struct mc_allocator_globals_type { | |
287 | |
288 /* heap size */ | |
289 EMACS_INT heap_size; | |
290 | |
291 /* list of all separatly allocated chunks of heap */ | |
292 heap_sect heap_sections[MAX_HEAP_SECTS]; | |
293 EMACS_INT n_heap_sections; | |
294 | |
295 /* Holds all allocated pages, each object size class in its separate list, | |
296 to guarantee fast allocation on partially filled pages. */ | |
3092 | 297 page_list_header *used_heap_pages; |
2720 | 298 |
299 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are | |
300 kept on the Nth free list. Contiguos pages are coalesced. */ | |
301 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; | |
302 | |
303 /* ptr lookup table */ | |
3092 | 304 level_2_lookup_tree **ptr_lookup_table; |
2720 | 305 |
3092 | 306 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 307 /* page header free list */ |
308 free_link *page_header_free_list; | |
3092 | 309 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 310 |
311 #ifdef MEMORY_USAGE_STATS | |
312 EMACS_INT malloced_bytes; | |
313 #endif | |
314 } mc_allocator_globals_type; | |
315 | |
316 mc_allocator_globals_type mc_allocator_globals; | |
317 | |
318 | |
319 | |
320 | |
321 /*--- macro accessors --------------------------------------------------*/ | |
322 | |
323 #define USED_HEAP_PAGES(i) \ | |
324 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) | |
325 | |
326 #define FREE_HEAP_PAGES(i) \ | |
327 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) | |
328 | |
329 #define PLH(plh) plh | |
330 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type | |
331 # define PLH_SIZE(plh) PLH (plh)->size | |
332 # define PLH_FIRST(plh) PLH (plh)->first | |
333 # define PLH_LAST(plh) PLH (plh)->last | |
334 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list | |
335 #ifdef MEMORY_USAGE_STATS | |
336 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count | |
337 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells | |
338 # define PLH_USED_SPACE(plh) PLH (plh)->used_space | |
339 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells | |
340 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space | |
341 #endif | |
342 | |
343 #define PH(ph) ph | |
344 # define PH_NEXT(ph) PH (ph)->next | |
345 # define PH_PREV(ph) PH (ph)->prev | |
346 # define PH_PLH(ph) PH (ph)->plh | |
347 # define PH_FREE_LIST(ph) PH (ph)->free_list | |
348 # define PH_N_PAGES(ph) PH (ph)->n_pages | |
349 # define PH_CELL_SIZE(ph) PH (ph)->cell_size | |
350 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page | |
351 # define PH_CELLS_USED(ph) PH (ph)->cells_used | |
3092 | 352 # define PH_BLACK_BIT(ph) PH (ph)->black_bit |
353 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit | |
354 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit | |
355 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit | |
2720 | 356 # define PH_MARK_BITS(ph) PH (ph)->mark_bits |
357 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space | |
358 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) | |
359 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) | |
360 | |
361 #define HEAP_SIZE mc_allocator_globals.heap_size | |
362 | |
363 #ifdef MEMORY_USAGE_STATS | |
364 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes | |
365 #endif | |
366 | |
367 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] | |
368 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections | |
369 | |
3092 | 370 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 371 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list |
3092 | 372 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 373 |
374 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free | |
375 #define FREE_LIST(free_list) (free_link*) (free_list) | |
376 | |
377 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] | |
378 #define LEVEL2(l2, i) l2->index[i] | |
379 # define LEVEL2_KEY(l2) l2->key | |
380 #ifdef USE_HASH_TABLE | |
381 # define LEVEL2_HASH_LINK(l2) l2->hash_link | |
382 #endif | |
383 | |
384 #if ZERO_MEM | |
385 # define ZERO_HEAP_SPACE(ph) \ | |
386 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE) | |
387 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header)) | |
388 #endif | |
389 | |
390 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE) | |
391 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE) | |
392 | |
393 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1)))) | |
394 | |
395 #define PAGE_SIZE_ALIGNMENT(address) \ | |
4125 | 396 (void *) ((((EMACS_UINT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1)) |
2720 | 397 |
398 #define PH_ON_FREE_LIST_P(ph) \ | |
399 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) | |
400 | |
401 #define PH_ON_USED_LIST_P(ph) \ | |
402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | |
403 | |
404 | |
3092 | 405 /* Number of mark bits: minimum 1, maximum 8. */ |
406 #define N_MARK_BITS 2 | |
2720 | 407 |
408 | |
409 | |
410 /************************************************************************/ | |
411 /* MC Allocator */ | |
412 /************************************************************************/ | |
413 | |
3305 | 414 /* Set to 1 if memory becomes short. */ |
415 EMACS_INT memory_shortage; | |
2720 | 416 |
417 /*--- misc functions ---------------------------------------------------*/ | |
418 | |
419 /* moved here from alloc.c */ | |
420 #ifdef ERROR_CHECK_GC | |
421 static void | |
422 deadbeef_memory (void *ptr, Bytecount size) | |
423 { | |
424 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr; | |
425 Bytecount beefs = size >> 2; | |
426 | |
427 /* In practice, size will always be a multiple of four. */ | |
428 while (beefs--) | |
429 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */ | |
430 } | |
431 #endif /* ERROR_CHECK_GC */ | |
432 | |
433 /* Visits all pages (page_headers) hooked into the used heap pages | |
434 list and executes f with the current page header as | |
3303 | 435 argument. Needed for sweep. Returns number of processed pages. */ |
436 static EMACS_INT | |
437 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph)) | |
2720 | 438 { |
3303 | 439 EMACS_INT number_of_pages_processed = 0; |
3092 | 440 EMACS_INT i; |
2720 | 441 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
442 if (PLH_FIRST (USED_HEAP_PAGES (i))) | |
443 { | |
444 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); | |
445 while (PH_NEXT (ph)) | |
446 { | |
447 page_header *next = PH_NEXT (ph); /* in case f removes the page */ | |
3303 | 448 number_of_pages_processed += f (ph); |
2720 | 449 ph = next; |
450 } | |
3303 | 451 number_of_pages_processed += f (ph); |
2720 | 452 } |
3303 | 453 return number_of_pages_processed; |
2720 | 454 } |
455 | |
456 | |
457 | |
458 | |
459 /*--- mapping of heap addresses to page headers and mark bits ----------*/ | |
460 | |
461 /* Sets a heap pointer and page header pair into the lookup table. */ | |
462 static void | |
463 set_lookup_table (void *ptr, page_header *ph) | |
464 { | |
3092 | 465 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 466 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
467 #ifdef USE_HASH_TABLE | |
468 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
469 l2 = LEVEL2_HASH_LINK (l2); | |
470 #endif | |
471 if (!l2) | |
472 { | |
473 l2 = (level_2_lookup_tree*) | |
474 xmalloc_and_zero (sizeof (level_2_lookup_tree)); | |
475 #ifdef MEMORY_USAGE_STATS | |
476 MC_MALLOCED_BYTES += | |
477 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0); | |
478 #endif | |
2932 | 479 memset (l2, '\0', sizeof (level_2_lookup_tree)); |
2720 | 480 #ifdef USE_HASH_TABLE |
481 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index); | |
482 #endif | |
483 PTR_LOOKUP_TABLE (l1_index) = l2; | |
484 LEVEL2_KEY (l2) = l1_index; | |
485 } | |
486 LEVEL2 (l2, L2_INDEX (ptr)) = ph; | |
487 } | |
488 | |
489 | |
490 #ifdef UNSET_LOOKUP_TABLE | |
491 /* Set the lookup table to 0 for given heap address. */ | |
492 static void | |
493 unset_lookup_table (void *ptr) | |
494 { | |
3092 | 495 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 496 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
497 #ifdef USE_HASH_TABLE | |
498 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
499 l2 = LEVEL2_HASH_LINK (l2); | |
500 #endif | |
501 if (l2) { | |
502 LEVEL2 (l2, L2_INDEX (ptr)) = 0; | |
503 } | |
504 } | |
505 #endif | |
506 | |
507 /* Returns the page header of a given heap address, or 0 if not in table. | |
508 For internal use, no error checking. */ | |
509 static page_header * | |
510 get_page_header_internal (void *ptr) | |
511 { | |
3092 | 512 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 513 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
514 #ifdef USE_HASH_TABLE | |
515 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
516 l2 = LEVEL2_HASH_LINK (l2); | |
517 #endif | |
518 if (!l2) | |
519 return 0; | |
520 return LEVEL2 (l2, L2_INDEX (ptr)); | |
521 } | |
522 | |
523 /* Returns the page header of a given heap address, or 0 if not in table. */ | |
524 static page_header * | |
525 get_page_header (void *ptr) | |
526 { | |
3092 | 527 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 528 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
2723 | 529 assert (l2); |
2720 | 530 #ifdef USE_HASH_TABLE |
531 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
532 l2 = LEVEL2_HASH_LINK (l2); | |
533 #endif | |
2723 | 534 assert (LEVEL2 (l2, L2_INDEX (ptr))); |
2720 | 535 return LEVEL2 (l2, L2_INDEX (ptr)); |
536 } | |
537 | |
538 /* Returns the mark bit index of a given heap address. */ | |
539 static EMACS_INT | |
540 get_mark_bit_index (void *ptr, page_header *ph) | |
541 { | |
542 EMACS_INT cell_size = PH_CELL_SIZE (ph); | |
543 if (cell_size) | |
3092 | 544 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size) |
545 * N_MARK_BITS; | |
2720 | 546 else /* only one object on page */ |
547 return 0; | |
548 } | |
549 | |
550 | |
551 /* Adds addresses of pages to lookup table. */ | |
552 static void | |
553 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) | |
554 { | |
3092 | 555 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 556 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); |
3092 | 557 for (p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 558 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) |
559 set_lookup_table (p, ph); | |
560 } | |
561 | |
562 | |
563 /* Initializes lookup table. */ | |
564 static void | |
565 init_lookup_table (void) | |
566 { | |
3092 | 567 EMACS_INT i; |
2720 | 568 for (i = 0; i < LEVEL1_SIZE; i++) |
569 PTR_LOOKUP_TABLE (i) = 0; | |
570 } | |
571 | |
572 | |
573 | |
574 | |
575 /*--- mark bits --------------------------------------------------------*/ | |
576 | |
577 /*--- bit operations --- */ | |
578 | |
579 /* Allocates a bit array of length bits. */ | |
3092 | 580 static Rawbyte * |
2720 | 581 alloc_bit_array(size_t bits) |
582 { | |
3092 | 583 Rawbyte *bit_array; |
584 #ifdef USE_MARK_BITS_FREE_LIST | |
585 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte); | |
586 #else /* not USE_MARK_BITS_FREE_LIST */ | |
587 size_t size = | |
588 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte), | |
589 Rawbyte *); | |
590 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 591 if (size < sizeof (free_link)) size = sizeof (free_link); |
592 #ifdef MEMORY_USAGE_STATS | |
593 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); | |
594 #endif | |
3092 | 595 bit_array = (Rawbyte *) xmalloc_and_zero (size); |
2720 | 596 return bit_array; |
597 } | |
598 | |
599 | |
600 /* Returns the bit value at pos. */ | |
601 static EMACS_INT | |
3092 | 602 get_bit (Rawbyte *bit_array, EMACS_INT pos) |
2720 | 603 { |
604 #if N_MARK_BITS > 1 | |
605 EMACS_INT result = 0; | |
606 EMACS_INT i; | |
607 #endif | |
608 bit_array += pos / CHAR_BIT; | |
609 #if N_MARK_BITS > 1 | |
610 for (i = 0; i < N_MARK_BITS; i++) | |
3092 | 611 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i; |
612 return result; | |
2720 | 613 #else |
614 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; | |
615 #endif | |
616 } | |
617 | |
618 | |
619 /* Bit_Arrays bit at pos to val. */ | |
620 static void | |
4125 | 621 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_UINT val) |
2720 | 622 { |
623 #if N_MARK_BITS > 1 | |
624 EMACS_INT i; | |
625 #endif | |
626 bit_array += pos / CHAR_BIT; | |
627 #if N_MARK_BITS > 1 | |
628 for (i = 0; i < N_MARK_BITS; i++) | |
629 if ((val >> i) & 1) | |
630 *bit_array |= 1 << ((pos + i) % CHAR_BIT); | |
631 else | |
632 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT)); | |
633 #else | |
634 if (val) | |
635 *bit_array |= 1 << (pos % CHAR_BIT); | |
636 else | |
637 *bit_array &= ~(1 << (pos % CHAR_BIT)); | |
638 #endif | |
639 } | |
640 | |
641 | |
642 /*--- mark bit functions ---*/ | |
3092 | 643 #define USE_PNTR_MARK_BITS(ph) \ |
644 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT) | |
645 #define USE_WORD_MARK_BITS(ph) \ | |
646 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT) | |
2720 | 647 |
3092 | 648 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p) |
2720 | 649 #define GET_BIT_PNTR(b, p) get_bit (b, p) |
650 | |
3092 | 651 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v) |
2720 | 652 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) |
653 | |
654 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 | |
3092 | 655 #define ZERO_MARK_BITS_PNTR(ph) \ |
656 do { \ | |
657 memset (PH_MARK_BITS (ph), '\0', \ | |
658 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \ | |
659 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \ | |
2720 | 660 } while (0) |
661 | |
662 #define GET_BIT(bit, ph, p) \ | |
663 do { \ | |
664 if (USE_PNTR_MARK_BITS (ph)) \ | |
665 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \ | |
666 else \ | |
667 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \ | |
668 } while (0) | |
669 | |
670 #define SET_BIT(ph, p, v) \ | |
671 do { \ | |
672 if (USE_PNTR_MARK_BITS (ph)) \ | |
673 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \ | |
674 else \ | |
675 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \ | |
676 } while (0) | |
677 | |
678 #define ZERO_MARK_BITS(ph) \ | |
679 do { \ | |
680 if (USE_PNTR_MARK_BITS (ph)) \ | |
681 ZERO_MARK_BITS_PNTR (ph); \ | |
682 else \ | |
683 ZERO_MARK_BITS_WORD (ph); \ | |
684 } while (0) | |
685 | |
686 | |
687 /* Allocates mark-bit space either from a free list or from the OS | |
688 for the given page header. */ | |
3092 | 689 static Rawbyte * |
2720 | 690 alloc_mark_bits (page_header *ph) |
691 { | |
3092 | 692 Rawbyte *result; |
693 #ifdef USE_MARK_BITS_FREE_LIST | |
2720 | 694 if (PH_MARK_BIT_FREE_LIST (ph) == 0) |
3092 | 695 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); |
2720 | 696 else |
697 { | |
3092 | 698 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph); |
2720 | 699 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); |
700 } | |
3092 | 701 #else /* not USE_MARK_BITS_FREE_LIST */ |
702 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); | |
703 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 704 return result; |
705 } | |
706 | |
707 | |
708 /* Frees by maintaining a free list. */ | |
709 static void | |
710 free_mark_bits (page_header *ph) | |
711 { | |
3092 | 712 #ifdef USE_MARK_BITS_FREE_LIST |
713 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph); | |
714 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph)); | |
715 #else /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 716 if (PH_MARK_BITS (ph)) |
3092 | 717 free (PH_MARK_BITS (ph)); |
718 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 719 } |
720 | |
721 | |
722 /* Installs mark bits and zeros bits. */ | |
723 static void | |
724 install_mark_bits (page_header *ph) | |
725 { | |
726 if (USE_PNTR_MARK_BITS (ph)) | |
727 { | |
728 PH_MARK_BITS (ph) = alloc_mark_bits (ph); | |
729 ZERO_MARK_BITS_PNTR (ph); | |
730 } | |
731 else | |
732 ZERO_MARK_BITS_WORD (ph); | |
733 } | |
734 | |
735 | |
736 /* Cleans and frees the mark bits of the given page_header. */ | |
737 static void | |
738 remove_mark_bits (page_header *ph) | |
739 { | |
740 if (USE_PNTR_MARK_BITS (ph)) | |
741 free_mark_bits (ph); | |
742 } | |
743 | |
744 | |
745 /* Zeros all mark bits in given header. */ | |
746 static void | |
747 zero_mark_bits (page_header *ph) | |
748 { | |
749 ZERO_MARK_BITS (ph); | |
750 } | |
751 | |
752 | |
753 /* Returns mark bit for given heap pointer. */ | |
754 EMACS_INT | |
755 get_mark_bit (void *ptr) | |
756 { | |
757 EMACS_INT bit = 0; | |
758 page_header *ph = get_page_header (ptr); | |
759 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph)); | |
760 if (ph) | |
761 { | |
762 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
763 } | |
764 return bit; | |
765 } | |
766 | |
767 | |
768 /* Sets mark bit for given heap pointer. */ | |
769 void | |
770 set_mark_bit (void *ptr, EMACS_INT value) | |
771 { | |
772 page_header *ph = get_page_header (ptr); | |
773 assert (ph && PH_ON_USED_LIST_P (ph)); | |
774 if (ph) | |
775 { | |
3092 | 776 if (value == BLACK) |
777 if (!PH_BLACK_BIT (ph)) | |
778 PH_BLACK_BIT (ph) = 1; | |
2720 | 779 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
780 } | |
781 } | |
782 | |
783 | |
784 | |
785 | |
786 /*--- page header functions --------------------------------------------*/ | |
787 | |
3092 | 788 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
789 #include "blocktype.h" | |
790 | |
791 struct page_header_blocktype | |
792 { | |
793 Blocktype_declare (page_header); | |
794 } *the_page_header_blocktype; | |
795 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
796 | |
2720 | 797 /* Allocates a page header either from a free list or from the OS. */ |
798 static page_header * | |
799 alloc_page_header (void) | |
800 { | |
3092 | 801 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
802 page_header *result; | |
803 #ifdef MEMORY_USAGE_STATS | |
804 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
805 #endif | |
806 result = Blocktype_alloc (the_page_header_blocktype); | |
807 ZERO_PAGE_HEADER (result); | |
808 return result; | |
809 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 810 page_header *result; |
811 if (PAGE_HEADER_FREE_LIST == 0) | |
812 { | |
813 result = | |
814 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); | |
815 #ifdef MEMORY_USAGE_STATS | |
816 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
817 #endif | |
818 } | |
819 else | |
820 { | |
821 result = (page_header*) PAGE_HEADER_FREE_LIST; | |
822 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); | |
823 } | |
824 return result; | |
3092 | 825 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 826 } |
827 | |
828 | |
829 /* Frees given page header by maintaining a free list. */ | |
830 static void | |
831 free_page_header (page_header *ph) | |
832 { | |
3092 | 833 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
834 Blocktype_free (the_page_header_blocktype, ph); | |
835 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 836 #if ZERO_MEM |
837 ZERO_PAGE_HEADER (ph); | |
838 #endif | |
839 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; | |
840 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); | |
3092 | 841 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 842 } |
843 | |
844 | |
845 /* Adds given page header to given page list header's list. */ | |
846 static void | |
847 add_page_header_to_plh (page_header *ph, page_list_header *plh) | |
848 { | |
849 /* insert at the front of the list */ | |
850 PH_PREV (ph) = 0; | |
851 PH_NEXT (ph) = PLH_FIRST (plh); | |
852 PH_PLH (ph) = plh; | |
853 /* if list is not empty, set prev in the first element */ | |
854 if (PLH_FIRST (plh)) | |
855 PH_PREV (PLH_FIRST (plh)) = ph; | |
856 /* one element in list is first and last at the same time */ | |
857 PLH_FIRST (plh) = ph; | |
858 if (!PLH_LAST (plh)) | |
859 PLH_LAST (plh) = ph; | |
860 | |
861 #ifdef MEMORY_USAGE_STATS | |
862 /* bump page count */ | |
863 PLH_PAGE_COUNT (plh)++; | |
864 #endif | |
865 | |
866 } | |
867 | |
868 | |
869 /* Removes given page header from given page list header's list. */ | |
870 static void | |
871 remove_page_header_from_plh (page_header *ph, page_list_header *plh) | |
872 { | |
873 if (PLH_FIRST (plh) == ph) | |
874 PLH_FIRST (plh) = PH_NEXT (ph); | |
875 if (PLH_LAST (plh) == ph) | |
876 PLH_LAST (plh) = PH_PREV (ph); | |
877 if (PH_NEXT (ph)) | |
878 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
879 if (PH_PREV (ph)) | |
880 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
881 | |
882 #ifdef MEMORY_USAGE_STATS | |
883 /* decrease page count */ | |
884 PLH_PAGE_COUNT (plh)--; | |
885 #endif | |
886 } | |
887 | |
888 | |
889 /* Moves a page header to the front of its the page header list. | |
890 This is used during sweep: Pages with some alive objects are moved to | |
891 the front. This makes allocation faster, all pages with free slots | |
892 can be found at the front of the list. */ | |
893 static void | |
894 move_page_header_to_front (page_header *ph) | |
895 { | |
896 page_list_header *plh = PH_PLH (ph); | |
897 /* is page already first? */ | |
898 if (ph == PLH_FIRST (plh)) return; | |
899 /* remove from list */ | |
900 if (PLH_LAST (plh) == ph) | |
901 PLH_LAST (plh) = PH_PREV (ph); | |
902 if (PH_NEXT (ph)) | |
903 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
904 if (PH_PREV (ph)) | |
905 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
906 /* insert at the front */ | |
907 PH_NEXT (ph) = PLH_FIRST (plh); | |
908 PH_PREV (ph) = 0; | |
909 PH_PREV (PH_NEXT (ph)) = ph; | |
910 PLH_FIRST (plh) = ph; | |
911 } | |
912 | |
913 | |
914 | |
915 | |
916 /*--- page list functions ----------------------------------------------*/ | |
917 | |
918 /* Returns the index of the used heap list according to given size. */ | |
919 static int | |
920 get_used_list_index (size_t size) | |
921 { | |
922 if (size <= USED_LIST_MIN_OBJECT_SIZE) | |
3092 | 923 { |
924 // printf ("size %d -> index %d\n", size, 0); | |
925 return 0; | |
926 } | |
927 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD) | |
928 { | |
929 // printf ("size %d -> index %d\n", size, | |
930 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
931 // / USED_LIST_LIN_STEP) + 1); | |
932 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
933 / USED_LIST_LIN_STEP) + 1; | |
934 } | |
935 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1); | |
2720 | 936 return N_USED_PAGE_LISTS - 1; |
937 } | |
938 | |
939 /* Returns the size of the used heap list according to given index. */ | |
940 static size_t | |
941 get_used_list_size_value (int used_index) | |
942 { | |
943 if (used_index < N_USED_PAGE_LISTS - 1) | |
944 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; | |
945 return 0; | |
946 } | |
947 | |
948 | |
949 /* Returns the index of the free heap list according to given size. */ | |
3092 | 950 static EMACS_INT |
2720 | 951 get_free_list_index (EMACS_INT n_pages) |
952 { | |
953 if (n_pages == 0) | |
954 return 0; | |
955 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) | |
956 return n_pages - 1; | |
957 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1) | |
958 return N_FREE_PAGE_LISTS - 1; | |
959 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1) | |
960 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
961 | |
962 } | |
963 | |
964 | |
965 /* Returns the size in number of pages of the given free list at index. */ | |
966 static size_t | |
3092 | 967 get_free_list_size_value (EMACS_INT free_index) |
2720 | 968 { |
969 if (free_index < FREE_LIST_LOWER_THRESHOLD) | |
970 return free_index + 1; | |
971 if (free_index >= N_FREE_PAGE_LISTS) | |
972 return FREE_LIST_UPPER_THRESHOLD; | |
973 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD) | |
974 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
975 } | |
976 | |
977 | |
978 #ifdef MEMORY_USAGE_STATS | |
979 Bytecount | |
980 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats) | |
981 { | |
982 size_t used_size = | |
983 get_used_list_size_value (get_used_list_index (claimed_size)); | |
984 if (used_size == 0) | |
985 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size)); | |
986 | |
987 if (stats) | |
988 { | |
989 stats->was_requested += claimed_size; | |
990 stats->malloc_overhead += used_size - claimed_size; | |
991 } | |
992 | |
993 return used_size; | |
994 } | |
995 #endif /* not MEMORY_USAGE_STATS */ | |
996 | |
997 | |
998 | |
999 /*--- free heap functions ----------------------------------------------*/ | |
1000 | |
1001 /* Frees a heap section, if the heap_section is completly free */ | |
1002 static EMACS_INT | |
1003 free_heap_section (page_header *ph) | |
1004 { | |
3092 | 1005 EMACS_INT i; |
1006 EMACS_INT removed = 0; | |
2720 | 1007 for (i = 0; i < N_HEAP_SECTIONS; i++) |
1008 if (!removed) | |
1009 { | |
1010 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) | |
1011 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) | |
1012 { | |
1013 xfree_1 (HEAP_SECTION(i).real_start); | |
1014 #ifdef MEMORY_USAGE_STATS | |
1015 MC_MALLOCED_BYTES | |
1016 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1017 #endif | |
1018 | |
1019 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE; | |
1020 | |
1021 removed = 1; | |
1022 } | |
1023 } | |
1024 else | |
1025 { | |
1026 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start; | |
1027 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size; | |
1028 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start; | |
1029 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages; | |
1030 } | |
1031 | |
1032 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed; | |
1033 | |
1034 return removed; | |
1035 } | |
1036 | |
1037 /* Removes page from free list. */ | |
1038 static void | |
1039 remove_page_from_free_list (page_header *ph) | |
1040 { | |
1041 remove_page_header_from_plh (ph, PH_PLH (ph)); | |
1042 PH_PLH (ph) = 0; | |
1043 } | |
1044 | |
1045 | |
1046 /* Adds page to according free list. */ | |
1047 static void | |
1048 add_page_to_free_list (page_header *ph) | |
1049 { | |
1050 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph))); | |
1051 add_page_header_to_plh (ph, PH_PLH (ph)); | |
1052 } | |
1053 | |
1054 | |
1055 /* Merges two adjacent pages. */ | |
1056 static page_header * | |
1057 merge_pages (page_header *first_ph, page_header *second_ph) | |
1058 { | |
1059 /* merge */ | |
1060 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph); | |
1061 /* clean up left over page header */ | |
1062 free_page_header (second_ph); | |
1063 /* update lookup table */ | |
1064 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph)); | |
1065 | |
1066 return first_ph; | |
1067 } | |
1068 | |
1069 | |
1070 /* Checks if pages are adjacent, merges them, and adds merged page to | |
1071 free list */ | |
1072 static void | |
1073 merge_into_free_list (page_header *ph) | |
1074 { | |
1075 /* check if you can coalesce adjacent pages */ | |
1076 page_header *prev_ph = | |
1077 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1078 - PAGE_SIZE)); | |
1079 page_header *succ_ph = | |
1080 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1081 + (PH_N_PAGES (ph) * PAGE_SIZE))); | |
1082 if (PH_ON_FREE_LIST_P (prev_ph)) | |
1083 { | |
1084 remove_page_from_free_list (prev_ph); | |
1085 ph = merge_pages (prev_ph, ph); | |
1086 } | |
1087 if (PH_ON_FREE_LIST_P (succ_ph)) | |
1088 { | |
1089 remove_page_from_free_list (succ_ph); | |
1090 ph = merge_pages (ph, succ_ph); | |
1091 } | |
1092 /* try to free heap_section, if the section is complete */ | |
1093 if (!free_heap_section (ph)) | |
1094 /* else add merged page to free list */ | |
1095 add_page_to_free_list (ph); | |
1096 } | |
1097 | |
1098 | |
1099 /* Cuts given page header after n_pages, returns the first (cut) part, and | |
1100 puts the rest on the free list. */ | |
1101 static page_header * | |
1102 split_page (page_header *ph, EMACS_INT n_pages) | |
1103 { | |
1104 page_header *new_ph; | |
1105 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages; | |
1106 | |
1107 /* remove the page from the free list if already hooked in */ | |
1108 if (PH_PLH (ph)) | |
1109 remove_page_from_free_list (ph); | |
1110 /* set new number of pages */ | |
1111 PH_N_PAGES (ph) = n_pages; | |
1112 /* add new page to lookup table */ | |
1113 add_pages_to_lookup_table (ph, n_pages); | |
1114 | |
1115 if (rem_pages) | |
1116 { | |
1117 /* build new page with reminder */ | |
1118 new_ph = alloc_page_header (); | |
1119 PH_N_PAGES (new_ph) = rem_pages; | |
1120 PH_HEAP_SPACE (new_ph) = | |
1121 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE)); | |
1122 /* add new page to lookup table */ | |
1123 add_pages_to_lookup_table (new_ph, rem_pages); | |
1124 /* hook the rest into free list */ | |
1125 add_page_to_free_list (new_ph); | |
1126 } | |
1127 return ph; | |
1128 } | |
1129 | |
1130 | |
1131 /* Expands the heap by given number of pages. */ | |
1132 static page_header * | |
1133 expand_heap (EMACS_INT needed_pages) | |
1134 { | |
1135 page_header *ph; | |
1136 EMACS_INT n_pages; | |
1137 size_t real_size; | |
1138 void *real_start; | |
1139 | |
1140 /* determine number of pages the heap should grow */ | |
3305 | 1141 if (memory_shortage) |
1142 n_pages = needed_pages; | |
1143 else | |
1144 n_pages = max (MIN_HEAP_INCREASE, | |
1145 needed_pages | |
1146 + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR))); | |
2720 | 1147 |
1148 /* get the real values */ | |
1149 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE; | |
1150 real_start = xmalloc_and_zero (real_size); | |
1151 #ifdef MEMORY_USAGE_STATS | |
1152 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0); | |
1153 #endif | |
1154 | |
1155 /* maintain heap section count */ | |
1156 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS) | |
1157 { | |
1158 stderr_out ("Increase number of MAX_HEAP_SECTS"); | |
1159 ABORT (); | |
1160 } | |
1161 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start; | |
1162 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size; | |
1163 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start); | |
1164 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages; | |
1165 N_HEAP_SECTIONS ++; | |
1166 | |
1167 /* get page header */ | |
1168 ph = alloc_page_header (); | |
1169 | |
1170 /* setup page header */ | |
1171 PH_N_PAGES (ph) = n_pages; | |
1172 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start); | |
1173 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0); | |
1174 HEAP_SIZE += n_pages * PAGE_SIZE; | |
1175 | |
1176 /* this was also done by allocate_lisp_storage */ | |
1177 if (need_to_check_c_alloca) | |
1178 xemacs_c_alloca (0); | |
1179 | |
1180 /* return needed size, put rest on free list */ | |
1181 return split_page (ph, needed_pages); | |
1182 } | |
1183 | |
1184 | |
1185 | |
1186 | |
1187 /*--- used heap functions ----------------------------------------------*/ | |
1188 /* Installs initial free list. */ | |
1189 static void | |
3092 | 1190 install_cell_free_list (page_header *ph, EMACS_INT elemcount) |
2720 | 1191 { |
3092 | 1192 Rawbyte *p; |
1193 EMACS_INT i; | |
2720 | 1194 EMACS_INT cell_size = PH_CELL_SIZE (ph); |
1195 /* write initial free list if cell_size is < PAGE_SIZE */ | |
3092 | 1196 p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1197 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) |
1198 { | |
1199 #ifdef ERROR_CHECK_GC | |
1200 assert (!LRECORD_FREE_P (p)); | |
1201 MARK_LRECORD_AS_FREE (p); | |
1202 #endif | |
3092 | 1203 if (elemcount == 1) |
1204 NEXT_FREE (p) = FREE_LIST (p + cell_size); | |
2720 | 1205 set_lookup_table (p, ph); |
3092 | 1206 p += cell_size; |
2720 | 1207 } |
1208 #ifdef ERROR_CHECK_GC | |
1209 assert (!LRECORD_FREE_P (p)); | |
1210 MARK_LRECORD_AS_FREE (p); | |
1211 #endif | |
1212 NEXT_FREE (p) = 0; | |
1213 set_lookup_table (p, ph); | |
1214 | |
1215 /* hook free list into header */ | |
1216 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph)); | |
1217 } | |
1218 | |
1219 | |
1220 /* Cleans the object space of the given page_header. */ | |
1221 static void | |
1222 remove_cell_free_list (page_header *ph) | |
1223 { | |
1224 #if ZERO_MEM | |
1225 ZERO_HEAP_SPACE (ph); | |
1226 #endif | |
1227 PH_FREE_LIST (ph) = 0; | |
1228 } | |
1229 | |
1230 | |
1231 /* Installs a new page and hooks it into given page_list_header. */ | |
1232 static page_header * | |
1233 install_page_in_used_list (page_header *ph, page_list_header *plh, | |
3092 | 1234 size_t size, EMACS_INT elemcount) |
2720 | 1235 { |
1236 /* add to list */ | |
1237 add_page_header_to_plh (ph, plh); | |
1238 | |
1239 /* determine cell size */ | |
1240 if (PLH_SIZE (plh)) | |
1241 PH_CELL_SIZE (ph) = PLH_SIZE (plh); | |
1242 else | |
1243 PH_CELL_SIZE (ph) = size; | |
3092 | 1244 if (elemcount == 1) |
1245 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); | |
1246 else | |
1247 { | |
1248 PH_CELLS_ON_PAGE (ph) = elemcount; | |
1249 PH_ARRAY_BIT (ph) = 1; | |
1250 } | |
2720 | 1251 |
1252 /* init cell count */ | |
1253 PH_CELLS_USED (ph) = 0; | |
1254 | |
1255 /* install mark bits and initialize cell free list */ | |
3092 | 1256 install_mark_bits (ph); |
2720 | 1257 |
3092 | 1258 install_cell_free_list (ph, elemcount); |
2720 | 1259 |
1260 #ifdef MEMORY_USAGE_STATS | |
1261 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); | |
1262 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); | |
1263 #endif | |
1264 | |
1265 return ph; | |
1266 } | |
1267 | |
1268 | |
1269 /* Cleans and frees a page, identified by the given page_header. */ | |
1270 static void | |
1271 remove_page_from_used_list (page_header *ph) | |
1272 { | |
1273 page_list_header *plh = PH_PLH (ph); | |
1274 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1275 assert (!(gc_in_progress && PH_PROTECTION_BIT (ph))); |
3092 | 1276 /* cleanup: remove memory protection, zero page_header bits. */ |
1277 | |
2720 | 1278 #ifdef MEMORY_USAGE_STATS |
1279 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | |
1280 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | |
1281 #endif | |
1282 | |
1283 /* clean up mark bits and cell free list */ | |
1284 remove_cell_free_list (ph); | |
1285 if (PH_ON_USED_LIST_P (ph)) | |
1286 remove_mark_bits (ph); | |
1287 | |
1288 /* clean up page header */ | |
1289 PH_CELL_SIZE (ph) = 0; | |
1290 PH_CELLS_ON_PAGE (ph) = 0; | |
1291 PH_CELLS_USED (ph) = 0; | |
1292 | |
1293 /* remove from used list */ | |
1294 remove_page_header_from_plh (ph, plh); | |
1295 | |
1296 /* move to free list */ | |
1297 merge_into_free_list (ph); | |
1298 } | |
1299 | |
1300 | |
1301 | |
1302 | |
1303 /*--- allocation -------------------------------------------------------*/ | |
1304 | |
1305 /* Allocates from cell free list on already allocated pages. */ | |
1306 static page_header * | |
1307 allocate_cell (page_list_header *plh) | |
1308 { | |
1309 page_header *ph = PLH_FIRST (plh); | |
1310 if (ph) | |
1311 { | |
1312 if (PH_FREE_LIST (ph)) | |
1313 /* elements free on first page */ | |
1314 return ph; | |
1315 else if ((PH_NEXT (ph)) | |
1316 && (PH_FREE_LIST (PH_NEXT (ph)))) | |
1317 /* elements free on second page */ | |
1318 { | |
1319 page_header *temp = PH_NEXT (ph); | |
1320 /* move full page (first page) to end of list */ | |
1321 PH_NEXT (PLH_LAST (plh)) = ph; | |
1322 PH_PREV (ph) = PLH_LAST (plh); | |
1323 PLH_LAST (plh) = ph; | |
1324 PH_NEXT (ph) = 0; | |
1325 /* install second page as first page */ | |
1326 ph = temp; | |
1327 PH_PREV (ph) = 0; | |
1328 PLH_FIRST (plh) = ph; | |
1329 return ph; | |
1330 } | |
1331 } | |
1332 return 0; | |
1333 } | |
1334 | |
1335 | |
1336 /* Finds a page which has at least the needed number of pages. | |
1337 Algorithm: FIRST FIT. */ | |
1338 static page_header * | |
1339 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph) | |
1340 { | |
1341 while (ph) | |
1342 { | |
1343 if (PH_N_PAGES (ph) >= needed_pages) | |
1344 return ph; | |
1345 ph = PH_NEXT (ph); | |
1346 } | |
1347 return 0; | |
1348 } | |
1349 | |
1350 | |
1351 /* Allocates a page from the free list. */ | |
1352 static page_header * | |
1353 allocate_page_from_free_list (EMACS_INT needed_pages) | |
1354 { | |
1355 page_header *ph = 0; | |
3092 | 1356 EMACS_INT i; |
2720 | 1357 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) |
1358 if ((ph = find_free_page_first_fit (needed_pages, | |
1359 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) | |
1360 { | |
1361 if (PH_N_PAGES (ph) > needed_pages) | |
1362 return split_page (ph, needed_pages); | |
1363 else | |
1364 { | |
1365 remove_page_from_free_list (ph); | |
1366 return ph; | |
1367 } | |
1368 } | |
1369 return 0; | |
1370 } | |
1371 | |
1372 | |
1373 /* Allocates a new page, either from free list or by expanding the heap. */ | |
1374 static page_header * | |
3092 | 1375 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount) |
2720 | 1376 { |
3092 | 1377 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount); |
2720 | 1378 /* first check free list */ |
1379 page_header *result = allocate_page_from_free_list (needed_pages); | |
1380 if (!result) | |
1381 /* expand heap */ | |
1382 result = expand_heap (needed_pages); | |
3092 | 1383 install_page_in_used_list (result, plh, size, elemcount); |
2720 | 1384 return result; |
1385 } | |
1386 | |
1387 | |
1388 /* Selects the correct size class, tries to allocate a cell of this size | |
1389 from the free list, if this fails, a new page is allocated. */ | |
1390 static void * | |
3092 | 1391 mc_alloc_1 (size_t size, EMACS_INT elemcount) |
2720 | 1392 { |
1393 page_list_header *plh = 0; | |
2723 | 1394 page_header *ph = 0; |
1395 void *result = 0; | |
1396 | |
3092 | 1397 plh = USED_HEAP_PAGES (get_used_list_index (size)); |
2720 | 1398 |
1399 if (size == 0) | |
1400 return 0; | |
3092 | 1401 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2)) |
2720 | 1402 /* first check any free cells */ |
1403 ph = allocate_cell (plh); | |
1404 if (!ph) | |
1405 /* allocate a new page */ | |
3092 | 1406 ph = allocate_new_page (plh, size, elemcount); |
2720 | 1407 |
1408 /* return first element of free list and remove it from the list */ | |
1409 result = (void*) PH_FREE_LIST (ph); | |
1410 PH_FREE_LIST (ph) = | |
1411 NEXT_FREE (PH_FREE_LIST (ph)); | |
1412 | |
3092 | 1413 memset (result, '\0', (size * elemcount)); |
1414 MARK_LRECORD_AS_FREE (result); | |
2720 | 1415 |
1416 /* bump used cells counter */ | |
3092 | 1417 PH_CELLS_USED (ph) += elemcount; |
2720 | 1418 |
1419 #ifdef MEMORY_USAGE_STATS | |
3092 | 1420 PLH_USED_CELLS (plh) += elemcount; |
1421 PLH_USED_SPACE (plh) += size * elemcount; | |
2720 | 1422 #endif |
1423 | |
1424 return result; | |
1425 } | |
1426 | |
3092 | 1427 /* Array allocation. */ |
1428 void * | |
1429 mc_alloc_array (size_t size, EMACS_INT elemcount) | |
1430 { | |
1431 return mc_alloc_1 (size, elemcount); | |
1432 } | |
1433 | |
2720 | 1434 void * |
1435 mc_alloc (size_t size) | |
1436 { | |
1437 return mc_alloc_1 (size, 1); | |
1438 } | |
1439 | |
1440 | |
1441 | |
1442 /*--- sweep & free & finalize-------------------------------------------*/ | |
1443 | |
1444 /* Frees a heap pointer. */ | |
1445 static void | |
1446 remove_cell (void *ptr, page_header *ph) | |
1447 { | |
1448 #ifdef MEMORY_USAGE_STATS | |
1449 PLH_USED_CELLS (PH_PLH (ph))--; | |
1450 if (PH_ON_USED_LIST_P (ph)) | |
1451 PLH_USED_SPACE (PH_PLH (ph)) -= | |
1452 detagged_lisp_object_size ((const struct lrecord_header *) ptr); | |
1453 else | |
1454 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph); | |
1455 #endif | |
2775 | 1456 if (PH_ON_USED_LIST_P (ph)) |
1457 { | |
2994 | 1458 #ifdef ALLOC_TYPE_STATS |
2775 | 1459 dec_lrecord_stats (PH_CELL_SIZE (ph), |
1460 (const struct lrecord_header *) ptr); | |
2994 | 1461 #endif /* ALLOC_TYPE_STATS */ |
2775 | 1462 #ifdef ERROR_CHECK_GC |
1463 assert (!LRECORD_FREE_P (ptr)); | |
1464 deadbeef_memory (ptr, PH_CELL_SIZE (ph)); | |
1465 MARK_LRECORD_AS_FREE (ptr); | |
2720 | 1466 #endif |
2775 | 1467 } |
2720 | 1468 |
1469 /* hooks cell into free list */ | |
1470 NEXT_FREE (ptr) = PH_FREE_LIST (ph); | |
1471 PH_FREE_LIST (ph) = FREE_LIST (ptr); | |
1472 /* decrease cells used */ | |
1473 PH_CELLS_USED (ph)--; | |
1474 } | |
1475 | |
1476 | |
1477 /* Mark free list marks all free list entries. */ | |
1478 static void | |
1479 mark_free_list (page_header *ph) | |
1480 { | |
1481 free_link *fl = PH_FREE_LIST (ph); | |
1482 while (fl) | |
1483 { | |
3092 | 1484 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
2720 | 1485 fl = NEXT_FREE (fl); |
1486 } | |
1487 } | |
1488 | |
1489 | |
1490 /* Finalize a page for disksave. XEmacs calls this routine before it | |
1491 dumps the heap image. You have to tell mc-alloc how to call your | |
1492 object's finalizer for disksave. Therefore, you have to define the | |
1493 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should | |
1494 do nothing else then test if there is a finalizer and call it on | |
3303 | 1495 the given argument, which is the heap address of the object. |
1496 Returns number of processed pages. */ | |
1497 static EMACS_INT | |
2720 | 1498 finalize_page_for_disksave (page_header *ph) |
1499 { | |
1500 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
1501 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1502 EMACS_INT mark_bit = 0; | |
1503 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1504 | |
1505 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1506 { | |
1507 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
1508 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | |
1509 } | |
3303 | 1510 return 1; |
2720 | 1511 } |
1512 | |
1513 | |
3303 | 1514 /* Finalizes the heap for disksave. Returns number of processed |
1515 pages. */ | |
1516 EMACS_INT | |
2720 | 1517 mc_finalize_for_disksave (void) |
1518 { | |
3303 | 1519 return visit_all_used_page_headers (finalize_page_for_disksave); |
2720 | 1520 } |
1521 | |
1522 | |
3303 | 1523 /* Sweeps a page: all the non-marked cells are freed. If the page is |
1524 empty in the end, it is removed. If some cells are free, it is | |
1525 moved to the front of its page header list. Full pages stay where | |
1526 they are. Returns number of processed pages.*/ | |
1527 static EMACS_INT | |
2720 | 1528 sweep_page (page_header *ph) |
1529 { | |
3092 | 1530 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1531 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); |
1532 EMACS_INT mark_bit = 0; | |
1533 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
3092 | 1534 unsigned int bit = 0; |
2720 | 1535 |
1536 mark_free_list (ph); | |
1537 | |
3092 | 1538 /* ARRAY_BIT_HACK */ |
1539 if (PH_ARRAY_BIT (ph)) | |
1540 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1541 { | |
1542 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1543 if (bit) | |
1544 { | |
1545 zero_mark_bits (ph); | |
1546 PH_BLACK_BIT (ph) = 0; | |
3303 | 1547 return 1; |
3092 | 1548 } |
1549 } | |
1550 | |
2720 | 1551 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1552 { | |
3092 | 1553 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1554 if (bit == WHITE) | |
2720 | 1555 { |
3092 | 1556 GC_STAT_FREED; |
2720 | 1557 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
1558 } | |
1559 } | |
1560 zero_mark_bits (ph); | |
3092 | 1561 PH_BLACK_BIT (ph) = 0; |
2720 | 1562 if (PH_CELLS_USED (ph) == 0) |
1563 remove_page_from_used_list (ph); | |
1564 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
1565 move_page_header_to_front (ph); | |
3303 | 1566 |
1567 return 1; | |
2720 | 1568 } |
1569 | |
1570 | |
3303 | 1571 /* Sweeps the heap. Returns number of processed pages. */ |
1572 EMACS_INT | |
2720 | 1573 mc_sweep (void) |
1574 { | |
3303 | 1575 return visit_all_used_page_headers (sweep_page); |
2720 | 1576 } |
1577 | |
1578 | |
1579 /* Changes the size of the cell pointed to by ptr. | |
1580 Returns the new address of the new cell with new size. */ | |
1581 void * | |
3092 | 1582 mc_realloc_1 (void *ptr, size_t size, int elemcount) |
2720 | 1583 { |
1584 if (ptr) | |
1585 { | |
3092 | 1586 if (size * elemcount) |
2720 | 1587 { |
3092 | 1588 void *result = mc_alloc_1 (size, elemcount); |
2720 | 1589 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); |
3092 | 1590 size_t cpy_size = size * elemcount; |
1591 if (cpy_size > from_size) | |
2720 | 1592 cpy_size = from_size; |
1593 memcpy (result, ptr, cpy_size); | |
3092 | 1594 #ifdef ALLOC_TYPE_STATS |
1595 inc_lrecord_stats (size, (struct lrecord_header *) result); | |
1596 #endif /* not ALLOC_TYPE_STATS */ | |
2720 | 1597 return result; |
1598 } | |
1599 else | |
1600 { | |
1601 return 0; | |
1602 } | |
1603 } | |
1604 else | |
3092 | 1605 return mc_alloc_1 (size, elemcount); |
2720 | 1606 } |
1607 | |
1608 void * | |
1609 mc_realloc (void *ptr, size_t size) | |
1610 { | |
1611 return mc_realloc_1 (ptr, size, 1); | |
1612 } | |
1613 | |
1614 void * | |
3092 | 1615 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount) |
2720 | 1616 { |
3092 | 1617 return mc_realloc_1 (ptr, size, elemcount); |
2720 | 1618 } |
1619 | |
1620 | |
1621 | |
1622 /*--- initialization ---------------------------------------------------*/ | |
1623 | |
1624 /* Call once at the very beginning. */ | |
1625 void | |
1626 init_mc_allocator (void) | |
1627 { | |
3092 | 1628 EMACS_INT i; |
1629 | |
1630 #ifdef MEMORY_USAGE_STATS | |
1631 MC_MALLOCED_BYTES = 0; | |
1632 #endif | |
2720 | 1633 |
3092 | 1634 /* init of pagesize dependent values */ |
1635 switch (SYS_PAGE_SIZE) | |
1636 { | |
1637 case 512: log_page_size = 9; break; | |
1638 case 1024: log_page_size = 10; break; | |
1639 case 2048: log_page_size = 11; break; | |
1640 case 4096: log_page_size = 12; break; | |
1641 case 8192: log_page_size = 13; break; | |
1642 case 16384: log_page_size = 14; break; | |
3212 | 1643 case 32768: log_page_size = 15; break; |
1644 case 65536: log_page_size = 16; break; | |
1645 default: | |
1646 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n", | |
1647 SYS_PAGE_SIZE); | |
1648 ABORT (); | |
3092 | 1649 } |
1650 | |
4125 | 1651 page_size_div_2 = (EMACS_UINT) SYS_PAGE_SIZE >> 1; |
3092 | 1652 |
1653 mc_allocator_globals.used_heap_pages = | |
1654 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1) | |
1655 * sizeof (page_list_header)); | |
1656 #ifdef MEMORY_USAGE_STATS | |
1657 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header); | |
1658 #endif | |
1659 | |
1660 mc_allocator_globals.ptr_lookup_table = | |
1661 (level_2_lookup_tree **) | |
1662 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *)); | |
1663 #ifdef MEMORY_USAGE_STATS | |
1664 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *); | |
1665 #endif | |
1666 | |
1667 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER | |
1668 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype); | |
1669 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2932 | 1670 |
2720 | 1671 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
1672 { | |
1673 page_list_header *plh = USED_HEAP_PAGES (i); | |
1674 PLH_LIST_TYPE (plh) = USED_LIST; | |
1675 PLH_SIZE (plh) = get_used_list_size_value (i); | |
1676 PLH_FIRST (plh) = 0; | |
1677 PLH_LAST (plh) = 0; | |
1678 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1679 #ifdef MEMORY_USAGE_STATS | |
1680 PLH_PAGE_COUNT (plh) = 0; | |
1681 PLH_USED_CELLS (plh) = 0; | |
1682 PLH_USED_SPACE (plh) = 0; | |
1683 PLH_TOTAL_CELLS (plh) = 0; | |
1684 PLH_TOTAL_SPACE (plh) = 0; | |
1685 #endif | |
1686 } | |
1687 | |
1688 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1689 { | |
1690 page_list_header *plh = FREE_HEAP_PAGES (i); | |
1691 PLH_LIST_TYPE (plh) = FREE_LIST; | |
1692 PLH_SIZE (plh) = get_free_list_size_value (i); | |
1693 PLH_FIRST (plh) = 0; | |
1694 PLH_LAST (plh) = 0; | |
1695 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1696 #ifdef MEMORY_USAGE_STATS | |
1697 PLH_PAGE_COUNT (plh) = 0; | |
1698 PLH_USED_CELLS (plh) = 0; | |
1699 PLH_USED_SPACE (plh) = 0; | |
1700 PLH_TOTAL_CELLS (plh) = 0; | |
1701 PLH_TOTAL_SPACE (plh) = 0; | |
1702 #endif | |
1703 } | |
1704 | |
3092 | 1705 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 1706 PAGE_HEADER_FREE_LIST = 0; |
3092 | 1707 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 1708 |
1709 #ifdef MEMORY_USAGE_STATS | |
3092 | 1710 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals); |
2720 | 1711 #endif |
1712 | |
1713 init_lookup_table (); | |
1714 } | |
1715 | |
1716 | |
1717 | |
1718 | |
1719 /*--- lisp function for statistics -------------------------------------*/ | |
1720 | |
1721 #ifdef MEMORY_USAGE_STATS | |
1722 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /* | |
1723 Returns stats about the mc-alloc memory usage. See diagnose.el. | |
1724 */ | |
1725 ()) | |
1726 { | |
1727 Lisp_Object free_plhs = Qnil; | |
1728 Lisp_Object used_plhs = Qnil; | |
1729 Lisp_Object heap_sects = Qnil; | |
3092 | 1730 EMACS_INT used_size = 0; |
1731 EMACS_INT real_size = 0; | |
2720 | 1732 |
3092 | 1733 EMACS_INT i; |
2720 | 1734 |
1735 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1736 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) | |
1737 free_plhs = | |
1738 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), | |
1739 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), | |
1740 free_plhs); | |
1741 | |
1742 for (i = 0; i < N_USED_PAGE_LISTS; i++) | |
1743 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) | |
1744 used_plhs = | |
1745 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), | |
1746 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))), | |
1747 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))), | |
1748 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))), | |
1749 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))), | |
1750 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))), | |
1751 used_plhs); | |
1752 | |
1753 for (i = 0; i < N_HEAP_SECTIONS; i++) { | |
1754 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE; | |
1755 real_size += | |
1756 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1757 } | |
1758 | |
1759 heap_sects = | |
1760 list3 (make_int (N_HEAP_SECTIONS), | |
1761 make_int (used_size), | |
1762 make_int (real_size)); | |
1763 | |
1764 return Fcons (make_int (PAGE_SIZE), | |
3092 | 1765 list5 (heap_sects, |
2720 | 1766 Fnreverse (used_plhs), |
1767 Fnreverse (free_plhs), | |
1768 make_int (sizeof (mc_allocator_globals)), | |
1769 make_int (MC_MALLOCED_BYTES))); | |
1770 } | |
1771 #endif /* MEMORY_USAGE_STATS */ | |
1772 | |
1773 void | |
1774 syms_of_mc_alloc (void) | |
1775 { | |
1776 #ifdef MEMORY_USAGE_STATS | |
1777 DEFSUBR (Fmc_alloc_memory_usage); | |
1778 #endif /* MEMORY_USAGE_STATS */ | |
1779 } | |
3092 | 1780 |
1781 | |
1782 /*--- incremental garbage collector ----------------------------------*/ | |
1783 | |
1784 /* access dirty bit of page header */ | |
1785 void | |
1786 set_dirty_bit (page_header *ph, unsigned int value) | |
1787 { | |
1788 PH_DIRTY_BIT (ph) = value; | |
1789 } | |
1790 | |
1791 void | |
1792 set_dirty_bit_for_address (void *ptr, unsigned int value) | |
1793 { | |
1794 set_dirty_bit (get_page_header (ptr), value); | |
1795 } | |
1796 | |
1797 unsigned int | |
1798 get_dirty_bit (page_header *ph) | |
1799 { | |
1800 return PH_DIRTY_BIT (ph); | |
1801 } | |
1802 | |
1803 unsigned int | |
1804 get_dirty_bit_for_address (void *ptr) | |
1805 { | |
1806 return get_dirty_bit (get_page_header (ptr)); | |
1807 } | |
1808 | |
1809 | |
1810 /* access protection bit of page header */ | |
1811 void | |
1812 set_protection_bit (page_header *ph, unsigned int value) | |
1813 { | |
1814 PH_PROTECTION_BIT (ph) = value; | |
1815 } | |
1816 | |
1817 void | |
1818 set_protection_bit_for_address (void *ptr, unsigned int value) | |
1819 { | |
1820 set_protection_bit (get_page_header (ptr), value); | |
1821 } | |
1822 | |
1823 unsigned int | |
1824 get_protection_bit (page_header *ph) | |
1825 { | |
1826 return PH_PROTECTION_BIT (ph); | |
1827 } | |
1828 | |
1829 unsigned int | |
1830 get_protection_bit_for_address (void *ptr) | |
1831 { | |
1832 return get_protection_bit (get_page_header (ptr)); | |
1833 } | |
1834 | |
1835 | |
1836 /* Returns the start of the page of the object pointed to by ptr. */ | |
1837 void * | |
1838 get_page_start (void *ptr) | |
1839 { | |
1840 return PH_HEAP_SPACE (get_page_header (ptr)); | |
1841 } | |
1842 | |
1843 /* Make PAGE_SIZE globally available. */ | |
1844 EMACS_INT | |
1845 mc_get_page_size () | |
1846 { | |
1847 return PAGE_SIZE; | |
1848 } | |
1849 | |
1850 /* Is the fault at ptr on a protected page? */ | |
1851 EMACS_INT | |
1852 fault_on_protected_page (void *ptr) | |
1853 { | |
1854 page_header *ph = get_page_header_internal (ptr); | |
1855 return (ph | |
1856 && PH_HEAP_SPACE (ph) | |
1857 && (PH_HEAP_SPACE (ph) <= ptr) | |
1858 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph) | |
1859 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr) | |
1860 && (PH_PROTECTION_BIT (ph) == 1)); | |
1861 } | |
1862 | |
1863 | |
1864 /* Protect the heap page of given page header ph if black objects are | |
3303 | 1865 on the page. Returns number of processed pages. */ |
1866 static EMACS_INT | |
3092 | 1867 protect_heap_page (page_header *ph) |
1868 { | |
1869 if (PH_BLACK_BIT (ph)) | |
1870 { | |
1871 void *heap_space = PH_HEAP_SPACE (ph); | |
1872 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1873 vdb_protect ((void *) heap_space, heap_space_size); | |
1874 PH_PROTECTION_BIT (ph) = 1; | |
3303 | 1875 return 1; |
3092 | 1876 } |
3303 | 1877 return 0; |
3092 | 1878 } |
1879 | |
3303 | 1880 /* Protect all heap pages with black objects. Returns number of |
1881 processed pages.*/ | |
1882 EMACS_INT | |
3092 | 1883 protect_heap_pages (void) |
1884 { | |
3303 | 1885 return visit_all_used_page_headers (protect_heap_page); |
3092 | 1886 } |
1887 | |
1888 | |
1889 /* Remove protection (if there) of heap page of given page header | |
3303 | 1890 ph. Returns number of processed pages. */ |
1891 static EMACS_INT | |
3092 | 1892 unprotect_heap_page (page_header *ph) |
1893 { | |
1894 if (PH_PROTECTION_BIT (ph)) | |
1895 { | |
1896 void *heap_space = PH_HEAP_SPACE (ph); | |
1897 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1898 vdb_unprotect (heap_space, heap_space_size); | |
1899 PH_PROTECTION_BIT (ph) = 0; | |
3303 | 1900 return 1; |
3092 | 1901 } |
3303 | 1902 return 0; |
3092 | 1903 } |
1904 | |
3303 | 1905 /* Remove protection for all heap pages which are protected. Returns |
1906 number of processed pages. */ | |
1907 EMACS_INT | |
3092 | 1908 unprotect_heap_pages (void) |
1909 { | |
3303 | 1910 return visit_all_used_page_headers (unprotect_heap_page); |
3092 | 1911 } |
1912 | |
1913 /* Remove protection and mark page dirty. */ | |
1914 void | |
1915 unprotect_page_and_mark_dirty (void *ptr) | |
1916 { | |
1917 page_header *ph = get_page_header (ptr); | |
1918 unprotect_heap_page (ph); | |
1919 PH_DIRTY_BIT (ph) = 1; | |
1920 } | |
1921 | |
1922 /* Repush all objects on dirty pages onto the mark stack. */ | |
1923 int | |
1924 repush_all_objects_on_page (void *ptr) | |
1925 { | |
1926 int repushed_objects = 0; | |
1927 page_header *ph = get_page_header (ptr); | |
1928 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); | |
1929 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1930 EMACS_INT mark_bit = 0; | |
1931 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1932 unsigned int bit = 0; | |
1933 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1934 { | |
1935 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1936 if (bit == BLACK) | |
1937 { | |
1938 repushed_objects++; | |
1939 gc_write_barrier | |
1940 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit)))); | |
1941 } | |
1942 } | |
1943 PH_BLACK_BIT (ph) = 0; | |
1944 PH_DIRTY_BIT (ph) = 0; | |
1945 return repushed_objects; | |
1946 } | |
1947 | |
1948 /* Mark black if object is currently grey. This first checks, if the | |
1949 object is really allocated on the mc-heap. If it is, it can be | |
1950 marked black; if it is not, it cannot be marked. */ | |
1951 EMACS_INT | |
1952 maybe_mark_black (void *ptr) | |
1953 { | |
1954 page_header *ph = get_page_header_internal (ptr); | |
1955 unsigned int bit = 0; | |
1956 | |
1957 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph)) | |
1958 { | |
1959 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
1960 if (bit == GREY) | |
1961 { | |
1962 if (!PH_BLACK_BIT (ph)) | |
1963 PH_BLACK_BIT (ph) = 1; | |
1964 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK); | |
1965 } | |
1966 return 1; | |
1967 } | |
1968 return 0; | |
1969 } | |
1970 | |
1971 /* Only for debugging --- not used anywhere in the sources. */ | |
1972 EMACS_INT | |
1973 object_on_heap_p (void *ptr) | |
1974 { | |
1975 page_header *ph = get_page_header_internal (ptr); | |
1976 return (ph && PH_ON_USED_LIST_P (ph)); | |
1977 } |