Mercurial > hg > xemacs-beta
annotate src/mc-alloc.c @ 5157:1fae11d56ad2
redo memory-usage mechanism, add way of dynamically initializing Lisp objects
-------------------- ChangeLog entries follow: --------------------
lisp/ChangeLog addition:
2010-03-18 Ben Wing <ben@xemacs.org>
* diagnose.el (show-memory-usage):
Rewrite to take into account API changes in memory-usage functions.
src/ChangeLog addition:
2010-03-18 Ben Wing <ben@xemacs.org>
* alloc.c:
* alloc.c (disksave_object_finalization_1):
* alloc.c (lisp_object_storage_size):
* alloc.c (listu):
* alloc.c (listn):
* alloc.c (Fobject_memory_usage_stats):
* alloc.c (compute_memusage_stats_length):
* alloc.c (Fobject_memory_usage):
* alloc.c (Ftotal_object_memory_usage):
* alloc.c (malloced_storage_size):
* alloc.c (common_init_alloc_early):
* alloc.c (reinit_alloc_objects_early):
* alloc.c (reinit_alloc_early):
* alloc.c (init_alloc_once_early):
* alloc.c (syms_of_alloc):
* alloc.c (reinit_vars_of_alloc):
* buffer.c:
* buffer.c (struct buffer_stats):
* buffer.c (compute_buffer_text_usage):
* buffer.c (compute_buffer_usage):
* buffer.c (buffer_memory_usage):
* buffer.c (buffer_objects_create):
* buffer.c (syms_of_buffer):
* buffer.c (vars_of_buffer):
* console-impl.h (struct console_methods):
* dynarr.c (Dynarr_memory_usage):
* emacs.c (main_1):
* events.c (clear_event_resource):
* extents.c:
* extents.c (compute_buffer_extent_usage):
* extents.c (extent_objects_create):
* extents.h:
* faces.c:
* faces.c (compute_face_cachel_usage):
* faces.c (face_objects_create):
* faces.h:
* general-slots.h:
* glyphs.c:
* glyphs.c (compute_glyph_cachel_usage):
* glyphs.c (glyph_objects_create):
* glyphs.h:
* lisp.h:
* lisp.h (struct usage_stats):
* lrecord.h:
* lrecord.h (enum lrecord_type):
* lrecord.h (struct lrecord_implementation):
* lrecord.h (MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE):
* lrecord.h (DEFINE_DUMPABLE_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_SIZABLE_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_FROB_BLOCK_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_FROB_BLOCK_SIZABLE_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_INTERNAL_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_SIZABLE_INTERNAL_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_SIZABLE_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_FROB_BLOCK_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_FROB_BLOCK_SIZABLE_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_INTERNAL_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_SIZABLE_INTERNAL_LISP_OBJECT):
* lrecord.h (MAKE_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_MODULE_LISP_OBJECT):
* lrecord.h (DEFINE_DUMPABLE_MODULE_SIZABLE_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_MODULE_LISP_OBJECT):
* lrecord.h (DEFINE_NODUMP_MODULE_SIZABLE_LISP_OBJECT):
* lrecord.h (MAKE_MODULE_LISP_OBJECT):
* lrecord.h (INIT_LISP_OBJECT):
* lrecord.h (INIT_MODULE_LISP_OBJECT):
* lrecord.h (UNDEF_LISP_OBJECT):
* lrecord.h (UNDEF_MODULE_LISP_OBJECT):
* lrecord.h (DECLARE_LISP_OBJECT):
* lrecord.h (DECLARE_MODULE_API_LISP_OBJECT):
* lrecord.h (DECLARE_MODULE_LISP_OBJECT):
* lstream.c:
* lstream.c (syms_of_lstream):
* lstream.c (vars_of_lstream):
* marker.c:
* marker.c (compute_buffer_marker_usage):
* mc-alloc.c (mc_alloced_storage_size):
* mc-alloc.h:
* mule-charset.c:
* mule-charset.c (struct charset_stats):
* mule-charset.c (compute_charset_usage):
* mule-charset.c (charset_memory_usage):
* mule-charset.c (mule_charset_objects_create):
* mule-charset.c (syms_of_mule_charset):
* mule-charset.c (vars_of_mule_charset):
* redisplay.c:
* redisplay.c (compute_rune_dynarr_usage):
* redisplay.c (compute_display_block_dynarr_usage):
* redisplay.c (compute_glyph_block_dynarr_usage):
* redisplay.c (compute_display_line_dynarr_usage):
* redisplay.c (compute_line_start_cache_dynarr_usage):
* redisplay.h:
* scrollbar-gtk.c (gtk_compute_scrollbar_instance_usage):
* scrollbar-msw.c (mswindows_compute_scrollbar_instance_usage):
* scrollbar-x.c (x_compute_scrollbar_instance_usage):
* scrollbar.c (compute_scrollbar_instance_usage):
* scrollbar.h:
* symbols.c:
* symbols.c (reinit_symbol_objects_early):
* symbols.c (init_symbols_once_early):
* symbols.c (reinit_symbols_early):
* symbols.c (defsymbol_massage_name_1):
* symsinit.h:
* ui-gtk.c:
* ui-gtk.c (emacs_gtk_object_getprop):
* ui-gtk.c (emacs_gtk_object_putprop):
* ui-gtk.c (ui_gtk_objects_create):
* unicode.c (compute_from_unicode_table_size_1):
* unicode.c (compute_to_unicode_table_size_1):
* unicode.c (compute_from_unicode_table_size):
* unicode.c (compute_to_unicode_table_size):
* window.c:
* window.c (struct window_stats):
* window.c (compute_window_mirror_usage):
* window.c (compute_window_usage):
* window.c (window_memory_usage):
* window.c (window_objects_create):
* window.c (syms_of_window):
* window.c (vars_of_window):
* window.h:
Redo memory-usage mechanism, make it general; add way of dynamically
initializing Lisp object types -- OBJECT_HAS_METHOD(), similar to
CONSOLE_HAS_METHOD().
(1) Create OBJECT_HAS_METHOD(), OBJECT_HAS_PROPERTY() etc. for
specifying that a Lisp object type has a particular method or
property. Call such methods with OBJECT_METH, MAYBE_OBJECT_METH,
OBJECT_METH_OR_GIVEN; retrieve properties with OBJECT_PROPERTY.
Methods that formerly required a DEFINE_*GENERAL_LISP_OBJECT() to
specify them (getprop, putprop, remprop, plist, disksave) now
instead use the dynamic-method mechanism. The main benefit of
this is that new methods or properties can be added without
requiring that the declaration statements of all existing methods
be modified. We have to make the `struct lrecord_implementation'
non-const, but I don't think this should have any effect on speed --
the only possible method that's really speed-critical is the
mark method, and we already extract those out into a separate
(non-const) array for increased cache locality.
Object methods need to be reinitialized after pdump, so we put
them in separate functions such as face_objects_create(),
extent_objects_create() and call them appropriately from emacs.c
The only current object property (`memusage_stats_list') that
objects can specify is a Lisp object and gets staticpro()ed so it
only needs to be set during dump time, but because it references
symbols that might not exist in a syms_of_() function, we
initialize it in vars_of_(). There is also an object property
(`num_extra_memusage_stats') that is automatically initialized based
on `memusage_stats_list'; we do that in reinit_vars_of_alloc(),
which is called after all vars_of_() functions are called.
`disksaver' method was renamed `disksave' to correspond with the
name normally given to the function (e.g. disksave_lstream()).
(2) Generalize the memory-usage mechanism in `buffer-memory-usage',
`window-memory-usage', `charset-memory-usage' into an object-type-
specific mechanism called by a single function
`object-memory-usage'. (Former function `object-memory-usage'
renamed to `total-object-memory-usage'). Generalize the mechanism
of different "slices" so that we can have different "classes" of
memory described and different "slices" onto each class; `t'
separates classes, `nil' separates slices. Currently we have
three classes defined: the memory of an object itself,
non-Lisp-object memory associated with the object (e.g. arrays or
dynarrs stored as fields in the object), and Lisp-object memory
associated with the object (other internal Lisp objects stored in
the object). This isn't completely finished yet and we might need
to further separate the "other internal Lisp objects" class into
two classes.
The memory-usage mechanism uses a `struct usage_stats' (renamed
from `struct overhead_stats') to describe a malloc-view onto a set
of allocated memory (listing how much was requested and various
types of overhead) and a more general `struct generic_usage_stats'
(with a `struct usage_stats' in it) to hold all statistics about
object memory. `struct generic_usage_stats' contains an array of
32 Bytecounts, which are statistics of unspecified semantics. The
intention is that individual types declare a corresponding struct
(e.g. `struct window_stats') with the same structure but with
specific fields in place of the array, corresponding to specific
statistics. The number of such statistics is an object property
computed from the list of tags (Lisp symbols describing the
statistics) stored in `memusage_stats_list'. The idea here is to
allow particular object types to customize the number and
semantics of the statistics where completely avoiding consing.
This doesn't matter so much yet, but the intention is to have the
memory usage of all objects computed at the end of GC, at the same
time as other statistics are currently computed. The values for
all statistics for a single type would be added up to compute
aggregate values for all objects of a specific type. To make this
efficient, we can't allow any memory allocation at all.
(3) Create some additional functions for creating lists that
specify the elements directly as args rather than indirectly through
an array: listn() (number of args given), listu() (list terminated
by Qunbound).
(4) Delete a bit of remaining unused C window_config stuff, also
unused lrecord_type_popup_data.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Thu, 18 Mar 2010 10:50:06 -0500 |
parents | 88bd4f3ef8e4 |
children | e374ea766cc1 |
rev | line source |
---|---|
2720 | 1 /* New size-based allocator for XEmacs. |
2 Copyright (C) 2005 Marcus Crestani. | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
2720 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with XEmacs; see the file COPYING. If not, write to | |
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 Boston, MA 02111-1307, USA. */ | |
21 | |
22 /* Synched up with: Not in FSF. */ | |
23 | |
24 #include <config.h> | |
3092 | 25 |
2720 | 26 #include "lisp.h" |
27 #include "mc-alloc.h" | |
3092 | 28 #include "getpagesize.h" |
29 | |
30 | |
31 #if 0 | |
32 # define USE_MARK_BITS_FREE_LIST 1 | |
33 #endif | |
34 #if 1 | |
35 # define BLOCKTYPE_ALLOC_PAGE_HEADER 1 | |
36 #endif | |
37 | |
38 /* Memory protection needs the real system-dependent pagesize. */ | |
39 #ifndef WIN32_NATIVE | |
40 #include <unistd.h> /* for getpagesize () */ | |
41 #endif | |
42 #if defined (HAVE_GETPAGESIZE) | |
43 # define SYS_PAGE_SIZE getpagesize () | |
44 #elif defined (_SC_PAGESIZE) | |
45 # define SYS_PAGE_SIZE sysconf (_SC_PAGESIZE) | |
46 #elif defined (_SC_PAGE_SIZE) | |
47 # define SYS_PAGE_SIZE sysconf (_SC_PAGE_SIZE) | |
48 #elif defined(get_page_size) | |
49 # define SYS_PAGE_SIZE get_page_size () | |
50 #elif defined(PAGESIZE) | |
51 # define SYS_PAGE_SIZE PAGESIZE | |
52 #elif defined(PAGE_SIZE) | |
53 # define SYS_PAGE_SIZE PAGE_SIZE | |
54 #else | |
55 /* Valid page sizes are powers of 2. */ | |
56 # define SYS_PAGE_SIZE 4096 | |
57 #endif | |
2720 | 58 |
59 | |
60 /*--- configurable values ----------------------------------------------*/ | |
61 | |
62 /* Definition of size classes */ | |
63 | |
64 /* Heap used list constants: In the used heap, it is important to | |
65 quickly find a free spot for a new object. Therefore the size | |
66 classes of the used heap are defined by the size of the cells on | |
67 the pages. The size classes should match common object sizes, to | |
68 avoid wasting memory. */ | |
69 | |
70 /* Minimum object size in bytes. */ | |
3092 | 71 #if BITS_PER_EMACS_INT > 32 |
72 # define USED_LIST_MIN_OBJECT_SIZE 16 | |
73 #else | |
74 # define USED_LIST_MIN_OBJECT_SIZE 8 | |
75 #endif | |
2720 | 76 |
77 /* The step size by which the size classes increase (up to upper | |
78 threshold). This many bytes are mapped to a single used list: */ | |
3092 | 79 #if BITS_PER_EMACS_INT > 32 |
80 # define USED_LIST_LIN_STEP 8 | |
81 #else | |
82 # define USED_LIST_LIN_STEP 4 | |
83 #endif | |
2720 | 84 |
85 /* The upper threshold should always be set to PAGE_SIZE/2, because if | |
86 a object is larger than PAGE_SIZE/2 there is no room for any other | |
87 object on this page. Objects this big are kept in the page list of | |
88 the multiple pages, since a quick search for free spots is not | |
89 needed for this kind of pages (because there are no free spots). | |
90 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */ | |
3092 | 91 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2 |
2720 | 92 |
93 | |
94 /* Heap free list constants: In the unused heap, the size of | |
95 consecutive memory tips the scales. A page is smallest entity which | |
96 is asked for. Therefore, the size classes of the unused heap are | |
97 defined by the number of consecutive pages. */ | |
98 /* Sizes up to this many pages each have their own free list. */ | |
99 #define FREE_LIST_LOWER_THRESHOLD 32 | |
100 /* The step size by which the size classes increase (up to upper | |
101 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a | |
102 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and | |
103 FREE_LIST_UPPER_THRESHOLD. */ | |
104 #define FREE_LIST_LIN_STEP 8 | |
105 /* Sizes of at least this many pages are mapped to a single free | |
106 list. Blocks of memory larger than this number are all kept in a | |
107 single list, which makes searching this list slow. But objects that | |
108 big are really seldom. */ | |
109 #define FREE_LIST_UPPER_THRESHOLD 256 | |
110 | |
111 | |
3092 | 112 /* used heap list count */ |
113 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \ | |
114 - USED_LIST_MIN_OBJECT_SIZE) \ | |
115 / USED_LIST_LIN_STEP) + 1 ) + 1 | |
116 | |
117 /* free heap list count */ | |
118 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \ | |
119 - FREE_LIST_LOWER_THRESHOLD) \ | |
120 / FREE_LIST_LIN_STEP) \ | |
121 + FREE_LIST_LOWER_THRESHOLD) | |
122 | |
123 | |
2720 | 124 /* Maximum number of separately added heap sections. */ |
125 #if BITS_PER_EMACS_INT > 32 | |
126 # define MAX_HEAP_SECTS 2048 | |
127 #else | |
128 # define MAX_HEAP_SECTS 768 | |
129 #endif | |
130 | |
131 | |
132 /* Heap growth constants. Heap increases by any number between the | |
133 boundaries (unit is PAGE_SIZE). */ | |
3092 | 134 #define MIN_HEAP_INCREASE 256 |
2720 | 135 #define MAX_HEAP_INCREASE 256 /* not used */ |
136 | |
137 /* Every heap growth is calculated like this: | |
138 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )). | |
139 So the growth of the heap is influenced by the current size of the | |
140 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE | |
141 boundaries. | |
142 This reduces the number of heap sectors, the larger the heap grows | |
143 the larger are the newly allocated chunks. */ | |
144 #define HEAP_GROWTH_DIVISOR 3 | |
145 | |
146 | |
147 /* Zero memory before putting on free lists. */ | |
148 #define ZERO_MEM 1 | |
149 | |
150 | |
151 #ifndef CHAR_BIT /* should be included by limits.h */ | |
152 # define CHAR_BIT BITS_PER_CHAR | |
153 #endif | |
154 | |
155 | |
3092 | 156 |
157 /*--- values depending on PAGE_SIZE ------------------------------------*/ | |
2720 | 158 |
3092 | 159 /* initialized in init_mc_allocator () */ |
160 static EMACS_INT log_page_size; | |
161 static EMACS_INT page_size_div_2; | |
2720 | 162 |
3092 | 163 #undef PAGE_SIZE |
164 #define PAGE_SIZE SYS_PAGE_SIZE | |
165 #define LOG_PAGE_SIZE log_page_size | |
166 #define PAGE_SIZE_DIV_2 page_size_div_2 | |
2720 | 167 |
168 | |
169 /* Constants for heap address to page header mapping. */ | |
170 #define LOG_LEVEL2_SIZE 10 | |
171 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE) | |
172 #if BITS_PER_EMACS_INT > 32 | |
173 # define USE_HASH_TABLE 1 | |
174 # define LOG_LEVEL1_SIZE 11 | |
175 #else | |
176 # define LOG_LEVEL1_SIZE \ | |
177 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE) | |
178 #endif | |
179 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE) | |
180 | |
181 #ifdef USE_HASH_TABLE | |
182 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1)) | |
4125 | 183 # define L1_INDEX(p) HASH ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 184 #else |
4125 | 185 # define L1_INDEX(p) ((EMACS_UINT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE)) |
2720 | 186 #endif |
4125 | 187 #define L2_INDEX(p) (((EMACS_UINT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1)) |
2720 | 188 |
189 | |
190 | |
191 | |
192 /*--- structs and typedefs ---------------------------------------------*/ | |
193 | |
3092 | 194 /* Links the free lists (mark_bit_free_list and cell free list). */ |
2720 | 195 typedef struct free_link |
196 { | |
197 struct lrecord_header lheader; | |
198 struct free_link *next_free; | |
199 } free_link; | |
200 | |
201 | |
3092 | 202 /* Header for pages. They are held in a doubly linked list. */ |
2720 | 203 typedef struct page_header |
204 { | |
205 struct page_header *next; /* next page_header */ | |
206 struct page_header *prev; /* previous page_header */ | |
207 /* Field plh holds pointer to the according header of the page list.*/ | |
208 struct page_list_header *plh; /* page list header */ | |
209 free_link *free_list; /* links free cells on page */ | |
210 EMACS_INT n_pages; /* number of pages */ | |
211 EMACS_INT cell_size; /* size of cells on page */ | |
212 EMACS_INT cells_on_page; /* total number of cells on page */ | |
213 EMACS_INT cells_used; /* number of used cells on page */ | |
214 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT, | |
215 the mark bits are put in an extra memory area. Then the field | |
216 mark_bits holds the pointer to this area. Is the number of | |
217 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the | |
218 mark_bit EMACS_INT directly, without an additional indirection. */ | |
3092 | 219 unsigned int black_bit:1; /* objects on page are black */ |
220 unsigned int dirty_bit:1; /* page is dirty */ | |
221 unsigned int protection_bit:1; /* page is write protected */ | |
222 unsigned int array_bit:1; /* page holds arrays */ | |
223 Rawbyte *mark_bits; /* pointer to mark bits */ | |
2720 | 224 void *heap_space; /* pointer to heap, where objects |
225 are stored */ | |
226 } page_header; | |
227 | |
228 | |
229 /* Different list types. */ | |
230 enum list_type_enum { | |
231 USED_LIST, | |
232 FREE_LIST | |
233 }; | |
234 | |
235 | |
236 /* Header for page lists. Field list_type holds the type of the list. */ | |
237 typedef struct page_list_header | |
238 { | |
239 enum list_type_enum list_type; /* the type of the list */ | |
240 /* Size holds the size of one cell (in bytes) in a used heap list, or the | |
241 size of the heap sector (in number of pages). */ | |
242 size_t size; /* size of one cell / heap sector */ | |
243 page_header *first; /* first of page_header list */ | |
244 page_header *last; /* last of page_header list */ | |
245 /* If the number of objects on page is bigger than | |
246 BITS_PER_EMACS_INT, the mark bits are put in an extra memory | |
247 area, which is linked in this free list, if not used. Is the | |
248 number of objects smaller than BITS_PER_EMACS_INT, the mark bits | |
249 are hold in the mark bit EMACS_INT directly, without an | |
250 additional indirection. */ | |
251 free_link *mark_bit_free_list; | |
252 | |
253 #ifdef MEMORY_USAGE_STATS | |
254 EMACS_INT page_count; /* number if pages in list */ | |
255 EMACS_INT used_cells; /* number of objects in list */ | |
256 EMACS_INT used_space; /* used space */ | |
257 EMACS_INT total_cells; /* number of available cells */ | |
258 EMACS_INT total_space; /* available space */ | |
259 #endif | |
260 } page_list_header; | |
261 | |
262 | |
263 /* The heap sectors are stored with their real start pointer and their | |
264 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */ | |
265 typedef struct heap_sect { | |
266 void *real_start; /* real start pointer (NOT aligned) */ | |
267 size_t real_size; /* NOT multiple of PAGE_SIZE */ | |
268 void *start; /* aligned start pointer */ | |
269 EMACS_INT n_pages; /* multiple of PAGE_SIZE */ | |
270 } heap_sect; | |
271 | |
272 | |
273 /* 2nd tree level for mapping of heap addresses to page headers. */ | |
274 typedef struct level_2_lookup_tree { | |
275 page_header *index[LEVEL2_SIZE]; /* link to page header */ | |
276 EMACS_INT key; /* high order address bits */ | |
277 #ifdef USE_HASH_TABLE | |
278 struct level_2_lookup_tree *hash_link; /* hash chain link */ | |
279 #endif | |
280 } level_2_lookup_tree; | |
281 | |
282 | |
283 | |
284 /*--- global variable definitions --------------------------------------*/ | |
285 | |
286 /* All global allocator variables are kept in this struct. */ | |
287 typedef struct mc_allocator_globals_type { | |
288 | |
289 /* heap size */ | |
290 EMACS_INT heap_size; | |
291 | |
292 /* list of all separatly allocated chunks of heap */ | |
293 heap_sect heap_sections[MAX_HEAP_SECTS]; | |
294 EMACS_INT n_heap_sections; | |
295 | |
296 /* Holds all allocated pages, each object size class in its separate list, | |
297 to guarantee fast allocation on partially filled pages. */ | |
3092 | 298 page_list_header *used_heap_pages; |
2720 | 299 |
300 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are | |
301 kept on the Nth free list. Contiguos pages are coalesced. */ | |
302 page_list_header free_heap_pages[N_FREE_PAGE_LISTS]; | |
303 | |
304 /* ptr lookup table */ | |
3092 | 305 level_2_lookup_tree **ptr_lookup_table; |
2720 | 306 |
3092 | 307 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 308 /* page header free list */ |
309 free_link *page_header_free_list; | |
3092 | 310 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 311 |
312 #ifdef MEMORY_USAGE_STATS | |
313 EMACS_INT malloced_bytes; | |
314 #endif | |
315 } mc_allocator_globals_type; | |
316 | |
317 mc_allocator_globals_type mc_allocator_globals; | |
318 | |
319 | |
320 | |
321 | |
322 /*--- macro accessors --------------------------------------------------*/ | |
323 | |
324 #define USED_HEAP_PAGES(i) \ | |
325 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i]) | |
326 | |
327 #define FREE_HEAP_PAGES(i) \ | |
328 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i]) | |
329 | |
330 #define PLH(plh) plh | |
331 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type | |
332 # define PLH_SIZE(plh) PLH (plh)->size | |
333 # define PLH_FIRST(plh) PLH (plh)->first | |
334 # define PLH_LAST(plh) PLH (plh)->last | |
335 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list | |
336 #ifdef MEMORY_USAGE_STATS | |
337 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count | |
338 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells | |
339 # define PLH_USED_SPACE(plh) PLH (plh)->used_space | |
340 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells | |
341 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space | |
342 #endif | |
343 | |
344 #define PH(ph) ph | |
345 # define PH_NEXT(ph) PH (ph)->next | |
346 # define PH_PREV(ph) PH (ph)->prev | |
347 # define PH_PLH(ph) PH (ph)->plh | |
348 # define PH_FREE_LIST(ph) PH (ph)->free_list | |
349 # define PH_N_PAGES(ph) PH (ph)->n_pages | |
350 # define PH_CELL_SIZE(ph) PH (ph)->cell_size | |
351 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page | |
352 # define PH_CELLS_USED(ph) PH (ph)->cells_used | |
3092 | 353 # define PH_BLACK_BIT(ph) PH (ph)->black_bit |
354 # define PH_DIRTY_BIT(ph) PH (ph)->dirty_bit | |
355 # define PH_PROTECTION_BIT(ph) PH (ph)->protection_bit | |
356 # define PH_ARRAY_BIT(ph) PH (ph)->array_bit | |
2720 | 357 # define PH_MARK_BITS(ph) PH (ph)->mark_bits |
358 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space | |
359 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph)) | |
360 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph)) | |
361 | |
362 #define HEAP_SIZE mc_allocator_globals.heap_size | |
363 | |
364 #ifdef MEMORY_USAGE_STATS | |
365 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes | |
366 #endif | |
367 | |
368 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index] | |
369 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections | |
370 | |
3092 | 371 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 372 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list |
3092 | 373 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 374 |
375 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free | |
376 #define FREE_LIST(free_list) (free_link*) (free_list) | |
377 | |
378 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i] | |
379 #define LEVEL2(l2, i) l2->index[i] | |
380 # define LEVEL2_KEY(l2) l2->key | |
381 #ifdef USE_HASH_TABLE | |
382 # define LEVEL2_HASH_LINK(l2) l2->hash_link | |
383 #endif | |
384 | |
385 #if ZERO_MEM | |
386 # define ZERO_HEAP_SPACE(ph) \ | |
387 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE) | |
388 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header)) | |
389 #endif | |
390 | |
391 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE) | |
392 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE) | |
393 | |
394 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1)))) | |
395 | |
396 #define PAGE_SIZE_ALIGNMENT(address) \ | |
4125 | 397 (void *) ((((EMACS_UINT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1)) |
2720 | 398 |
399 #define PH_ON_FREE_LIST_P(ph) \ | |
400 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST)) | |
401 | |
402 #define PH_ON_USED_LIST_P(ph) \ | |
403 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | |
404 | |
405 | |
3092 | 406 /* Number of mark bits: minimum 1, maximum 8. */ |
407 #define N_MARK_BITS 2 | |
2720 | 408 |
409 | |
410 | |
411 /************************************************************************/ | |
412 /* MC Allocator */ | |
413 /************************************************************************/ | |
414 | |
3305 | 415 /* Set to 1 if memory becomes short. */ |
416 EMACS_INT memory_shortage; | |
2720 | 417 |
418 /*--- misc functions ---------------------------------------------------*/ | |
419 | |
420 /* Visits all pages (page_headers) hooked into the used heap pages | |
421 list and executes f with the current page header as | |
3303 | 422 argument. Needed for sweep. Returns number of processed pages. */ |
423 static EMACS_INT | |
424 visit_all_used_page_headers (EMACS_INT (*f) (page_header *ph)) | |
2720 | 425 { |
3303 | 426 EMACS_INT number_of_pages_processed = 0; |
3092 | 427 EMACS_INT i; |
2720 | 428 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
429 if (PLH_FIRST (USED_HEAP_PAGES (i))) | |
430 { | |
431 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i)); | |
432 while (PH_NEXT (ph)) | |
433 { | |
434 page_header *next = PH_NEXT (ph); /* in case f removes the page */ | |
3303 | 435 number_of_pages_processed += f (ph); |
2720 | 436 ph = next; |
437 } | |
3303 | 438 number_of_pages_processed += f (ph); |
2720 | 439 } |
3303 | 440 return number_of_pages_processed; |
2720 | 441 } |
442 | |
443 | |
444 | |
445 | |
446 /*--- mapping of heap addresses to page headers and mark bits ----------*/ | |
447 | |
448 /* Sets a heap pointer and page header pair into the lookup table. */ | |
449 static void | |
450 set_lookup_table (void *ptr, page_header *ph) | |
451 { | |
3092 | 452 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 453 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
454 #ifdef USE_HASH_TABLE | |
455 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
456 l2 = LEVEL2_HASH_LINK (l2); | |
457 #endif | |
458 if (!l2) | |
459 { | |
460 l2 = (level_2_lookup_tree*) | |
461 xmalloc_and_zero (sizeof (level_2_lookup_tree)); | |
462 #ifdef MEMORY_USAGE_STATS | |
463 MC_MALLOCED_BYTES += | |
464 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0); | |
465 #endif | |
2932 | 466 memset (l2, '\0', sizeof (level_2_lookup_tree)); |
2720 | 467 #ifdef USE_HASH_TABLE |
468 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index); | |
469 #endif | |
470 PTR_LOOKUP_TABLE (l1_index) = l2; | |
471 LEVEL2_KEY (l2) = l1_index; | |
472 } | |
473 LEVEL2 (l2, L2_INDEX (ptr)) = ph; | |
474 } | |
475 | |
476 | |
477 #ifdef UNSET_LOOKUP_TABLE | |
478 /* Set the lookup table to 0 for given heap address. */ | |
479 static void | |
480 unset_lookup_table (void *ptr) | |
481 { | |
3092 | 482 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 483 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
484 #ifdef USE_HASH_TABLE | |
485 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
486 l2 = LEVEL2_HASH_LINK (l2); | |
487 #endif | |
488 if (l2) { | |
489 LEVEL2 (l2, L2_INDEX (ptr)) = 0; | |
490 } | |
491 } | |
492 #endif | |
493 | |
494 /* Returns the page header of a given heap address, or 0 if not in table. | |
495 For internal use, no error checking. */ | |
496 static page_header * | |
497 get_page_header_internal (void *ptr) | |
498 { | |
3092 | 499 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 500 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
501 #ifdef USE_HASH_TABLE | |
502 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
503 l2 = LEVEL2_HASH_LINK (l2); | |
504 #endif | |
505 if (!l2) | |
506 return 0; | |
507 return LEVEL2 (l2, L2_INDEX (ptr)); | |
508 } | |
509 | |
510 /* Returns the page header of a given heap address, or 0 if not in table. */ | |
511 static page_header * | |
512 get_page_header (void *ptr) | |
513 { | |
3092 | 514 EMACS_INT l1_index = L1_INDEX (ptr); |
2720 | 515 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index); |
2723 | 516 assert (l2); |
2720 | 517 #ifdef USE_HASH_TABLE |
518 while ((l2) && (LEVEL2_KEY (l2) != l1_index)) | |
519 l2 = LEVEL2_HASH_LINK (l2); | |
520 #endif | |
2723 | 521 assert (LEVEL2 (l2, L2_INDEX (ptr))); |
2720 | 522 return LEVEL2 (l2, L2_INDEX (ptr)); |
523 } | |
524 | |
525 /* Returns the mark bit index of a given heap address. */ | |
526 static EMACS_INT | |
527 get_mark_bit_index (void *ptr, page_header *ph) | |
528 { | |
529 EMACS_INT cell_size = PH_CELL_SIZE (ph); | |
530 if (cell_size) | |
3092 | 531 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size) |
532 * N_MARK_BITS; | |
2720 | 533 else /* only one object on page */ |
534 return 0; | |
535 } | |
536 | |
537 | |
538 /* Adds addresses of pages to lookup table. */ | |
539 static void | |
540 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages) | |
541 { | |
3092 | 542 Rawbyte *p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 543 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages); |
3092 | 544 for (p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 545 (EMACS_INT) p < end_of_section; p += PAGE_SIZE) |
546 set_lookup_table (p, ph); | |
547 } | |
548 | |
549 | |
550 /* Initializes lookup table. */ | |
551 static void | |
552 init_lookup_table (void) | |
553 { | |
3092 | 554 EMACS_INT i; |
2720 | 555 for (i = 0; i < LEVEL1_SIZE; i++) |
556 PTR_LOOKUP_TABLE (i) = 0; | |
557 } | |
558 | |
559 | |
560 | |
561 | |
562 /*--- mark bits --------------------------------------------------------*/ | |
563 | |
564 /*--- bit operations --- */ | |
565 | |
566 /* Allocates a bit array of length bits. */ | |
3092 | 567 static Rawbyte * |
2720 | 568 alloc_bit_array(size_t bits) |
569 { | |
3092 | 570 Rawbyte *bit_array; |
571 #ifdef USE_MARK_BITS_FREE_LIST | |
572 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte); | |
573 #else /* not USE_MARK_BITS_FREE_LIST */ | |
574 size_t size = | |
575 ALIGN_FOR_TYPE (((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof (Rawbyte), | |
576 Rawbyte *); | |
577 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 578 if (size < sizeof (free_link)) size = sizeof (free_link); |
579 #ifdef MEMORY_USAGE_STATS | |
580 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0); | |
581 #endif | |
3092 | 582 bit_array = (Rawbyte *) xmalloc_and_zero (size); |
2720 | 583 return bit_array; |
584 } | |
585 | |
586 | |
587 /* Returns the bit value at pos. */ | |
588 static EMACS_INT | |
3092 | 589 get_bit (Rawbyte *bit_array, EMACS_INT pos) |
2720 | 590 { |
591 #if N_MARK_BITS > 1 | |
592 EMACS_INT result = 0; | |
593 EMACS_INT i; | |
594 #endif | |
595 bit_array += pos / CHAR_BIT; | |
596 #if N_MARK_BITS > 1 | |
597 for (i = 0; i < N_MARK_BITS; i++) | |
3092 | 598 result |= ((*bit_array & (1 << ((pos + i) % CHAR_BIT))) != 0) << i; |
599 return result; | |
2720 | 600 #else |
601 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0; | |
602 #endif | |
603 } | |
604 | |
605 | |
606 /* Bit_Arrays bit at pos to val. */ | |
607 static void | |
4125 | 608 set_bit (Rawbyte *bit_array, EMACS_INT pos, EMACS_UINT val) |
2720 | 609 { |
610 #if N_MARK_BITS > 1 | |
611 EMACS_INT i; | |
612 #endif | |
613 bit_array += pos / CHAR_BIT; | |
614 #if N_MARK_BITS > 1 | |
615 for (i = 0; i < N_MARK_BITS; i++) | |
616 if ((val >> i) & 1) | |
617 *bit_array |= 1 << ((pos + i) % CHAR_BIT); | |
618 else | |
619 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT)); | |
620 #else | |
621 if (val) | |
622 *bit_array |= 1 << (pos % CHAR_BIT); | |
623 else | |
624 *bit_array &= ~(1 << (pos % CHAR_BIT)); | |
625 #endif | |
626 } | |
627 | |
628 | |
629 /*--- mark bit functions ---*/ | |
3092 | 630 #define USE_PNTR_MARK_BITS(ph) \ |
631 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) > BITS_PER_EMACS_INT) | |
632 #define USE_WORD_MARK_BITS(ph) \ | |
633 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) <= BITS_PER_EMACS_INT) | |
2720 | 634 |
3092 | 635 #define GET_BIT_WORD(b, p) get_bit ((Rawbyte *) &b, p) |
2720 | 636 #define GET_BIT_PNTR(b, p) get_bit (b, p) |
637 | |
3092 | 638 #define SET_BIT_WORD(b, p, v) set_bit ((Rawbyte *) &b, p, v) |
2720 | 639 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v) |
640 | |
641 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0 | |
3092 | 642 #define ZERO_MARK_BITS_PNTR(ph) \ |
643 do { \ | |
644 memset (PH_MARK_BITS (ph), '\0', \ | |
645 ((PH_CELLS_ON_PAGE (ph) * N_MARK_BITS) \ | |
646 + CHAR_BIT - 1) / CHAR_BIT * sizeof (Rawbyte)); \ | |
2720 | 647 } while (0) |
648 | |
649 #define GET_BIT(bit, ph, p) \ | |
650 do { \ | |
651 if (USE_PNTR_MARK_BITS (ph)) \ | |
652 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \ | |
653 else \ | |
654 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \ | |
655 } while (0) | |
656 | |
657 #define SET_BIT(ph, p, v) \ | |
658 do { \ | |
659 if (USE_PNTR_MARK_BITS (ph)) \ | |
660 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \ | |
661 else \ | |
662 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \ | |
663 } while (0) | |
664 | |
665 #define ZERO_MARK_BITS(ph) \ | |
666 do { \ | |
667 if (USE_PNTR_MARK_BITS (ph)) \ | |
668 ZERO_MARK_BITS_PNTR (ph); \ | |
669 else \ | |
670 ZERO_MARK_BITS_WORD (ph); \ | |
671 } while (0) | |
672 | |
673 | |
674 /* Allocates mark-bit space either from a free list or from the OS | |
675 for the given page header. */ | |
3092 | 676 static Rawbyte * |
2720 | 677 alloc_mark_bits (page_header *ph) |
678 { | |
3092 | 679 Rawbyte *result; |
680 #ifdef USE_MARK_BITS_FREE_LIST | |
2720 | 681 if (PH_MARK_BIT_FREE_LIST (ph) == 0) |
3092 | 682 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); |
2720 | 683 else |
684 { | |
3092 | 685 result = (Rawbyte *) PH_MARK_BIT_FREE_LIST (ph); |
2720 | 686 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result); |
687 } | |
3092 | 688 #else /* not USE_MARK_BITS_FREE_LIST */ |
689 result = (Rawbyte *) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS); | |
690 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 691 return result; |
692 } | |
693 | |
694 | |
695 /* Frees by maintaining a free list. */ | |
696 static void | |
697 free_mark_bits (page_header *ph) | |
698 { | |
3092 | 699 #ifdef USE_MARK_BITS_FREE_LIST |
700 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph); | |
701 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph)); | |
702 #else /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 703 if (PH_MARK_BITS (ph)) |
3092 | 704 free (PH_MARK_BITS (ph)); |
705 #endif /* not USE_MARK_BITS_FREE_LIST */ | |
2720 | 706 } |
707 | |
708 | |
709 /* Installs mark bits and zeros bits. */ | |
710 static void | |
711 install_mark_bits (page_header *ph) | |
712 { | |
713 if (USE_PNTR_MARK_BITS (ph)) | |
714 { | |
715 PH_MARK_BITS (ph) = alloc_mark_bits (ph); | |
716 ZERO_MARK_BITS_PNTR (ph); | |
717 } | |
718 else | |
719 ZERO_MARK_BITS_WORD (ph); | |
720 } | |
721 | |
722 | |
723 /* Cleans and frees the mark bits of the given page_header. */ | |
724 static void | |
725 remove_mark_bits (page_header *ph) | |
726 { | |
727 if (USE_PNTR_MARK_BITS (ph)) | |
728 free_mark_bits (ph); | |
729 } | |
730 | |
731 | |
732 /* Zeros all mark bits in given header. */ | |
733 static void | |
734 zero_mark_bits (page_header *ph) | |
735 { | |
736 ZERO_MARK_BITS (ph); | |
737 } | |
738 | |
739 | |
740 /* Returns mark bit for given heap pointer. */ | |
741 EMACS_INT | |
742 get_mark_bit (void *ptr) | |
743 { | |
744 EMACS_INT bit = 0; | |
745 page_header *ph = get_page_header (ptr); | |
746 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph)); | |
747 if (ph) | |
748 { | |
749 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
750 } | |
751 return bit; | |
752 } | |
753 | |
754 | |
755 /* Sets mark bit for given heap pointer. */ | |
756 void | |
757 set_mark_bit (void *ptr, EMACS_INT value) | |
758 { | |
759 page_header *ph = get_page_header (ptr); | |
760 assert (ph && PH_ON_USED_LIST_P (ph)); | |
761 if (ph) | |
762 { | |
3092 | 763 if (value == BLACK) |
764 if (!PH_BLACK_BIT (ph)) | |
765 PH_BLACK_BIT (ph) = 1; | |
2720 | 766 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
767 } | |
768 } | |
769 | |
770 | |
771 | |
772 | |
773 /*--- page header functions --------------------------------------------*/ | |
774 | |
3092 | 775 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
776 #include "blocktype.h" | |
777 | |
778 struct page_header_blocktype | |
779 { | |
780 Blocktype_declare (page_header); | |
781 } *the_page_header_blocktype; | |
782 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
783 | |
2720 | 784 /* Allocates a page header either from a free list or from the OS. */ |
785 static page_header * | |
786 alloc_page_header (void) | |
787 { | |
3092 | 788 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
789 page_header *result; | |
790 #ifdef MEMORY_USAGE_STATS | |
791 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
792 #endif | |
793 result = Blocktype_alloc (the_page_header_blocktype); | |
794 ZERO_PAGE_HEADER (result); | |
795 return result; | |
796 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 797 page_header *result; |
798 if (PAGE_HEADER_FREE_LIST == 0) | |
799 { | |
800 result = | |
801 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header))); | |
802 #ifdef MEMORY_USAGE_STATS | |
803 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0); | |
804 #endif | |
805 } | |
806 else | |
807 { | |
808 result = (page_header*) PAGE_HEADER_FREE_LIST; | |
809 PAGE_HEADER_FREE_LIST = NEXT_FREE (result); | |
810 } | |
811 return result; | |
3092 | 812 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 813 } |
814 | |
815 | |
816 /* Frees given page header by maintaining a free list. */ | |
817 static void | |
818 free_page_header (page_header *ph) | |
819 { | |
3092 | 820 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER |
821 Blocktype_free (the_page_header_blocktype, ph); | |
822 #else /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2720 | 823 #if ZERO_MEM |
824 ZERO_PAGE_HEADER (ph); | |
825 #endif | |
826 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST; | |
827 PAGE_HEADER_FREE_LIST = FREE_LIST (ph); | |
3092 | 828 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 829 } |
830 | |
831 | |
832 /* Adds given page header to given page list header's list. */ | |
833 static void | |
834 add_page_header_to_plh (page_header *ph, page_list_header *plh) | |
835 { | |
836 /* insert at the front of the list */ | |
837 PH_PREV (ph) = 0; | |
838 PH_NEXT (ph) = PLH_FIRST (plh); | |
839 PH_PLH (ph) = plh; | |
840 /* if list is not empty, set prev in the first element */ | |
841 if (PLH_FIRST (plh)) | |
842 PH_PREV (PLH_FIRST (plh)) = ph; | |
843 /* one element in list is first and last at the same time */ | |
844 PLH_FIRST (plh) = ph; | |
845 if (!PLH_LAST (plh)) | |
846 PLH_LAST (plh) = ph; | |
847 | |
848 #ifdef MEMORY_USAGE_STATS | |
849 /* bump page count */ | |
850 PLH_PAGE_COUNT (plh)++; | |
851 #endif | |
852 | |
853 } | |
854 | |
855 | |
856 /* Removes given page header from given page list header's list. */ | |
857 static void | |
858 remove_page_header_from_plh (page_header *ph, page_list_header *plh) | |
859 { | |
860 if (PLH_FIRST (plh) == ph) | |
861 PLH_FIRST (plh) = PH_NEXT (ph); | |
862 if (PLH_LAST (plh) == ph) | |
863 PLH_LAST (plh) = PH_PREV (ph); | |
864 if (PH_NEXT (ph)) | |
865 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
866 if (PH_PREV (ph)) | |
867 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
868 | |
869 #ifdef MEMORY_USAGE_STATS | |
870 /* decrease page count */ | |
871 PLH_PAGE_COUNT (plh)--; | |
872 #endif | |
873 } | |
874 | |
875 | |
876 /* Moves a page header to the front of its the page header list. | |
877 This is used during sweep: Pages with some alive objects are moved to | |
878 the front. This makes allocation faster, all pages with free slots | |
879 can be found at the front of the list. */ | |
880 static void | |
881 move_page_header_to_front (page_header *ph) | |
882 { | |
883 page_list_header *plh = PH_PLH (ph); | |
884 /* is page already first? */ | |
885 if (ph == PLH_FIRST (plh)) return; | |
886 /* remove from list */ | |
887 if (PLH_LAST (plh) == ph) | |
888 PLH_LAST (plh) = PH_PREV (ph); | |
889 if (PH_NEXT (ph)) | |
890 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph); | |
891 if (PH_PREV (ph)) | |
892 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph); | |
893 /* insert at the front */ | |
894 PH_NEXT (ph) = PLH_FIRST (plh); | |
895 PH_PREV (ph) = 0; | |
896 PH_PREV (PH_NEXT (ph)) = ph; | |
897 PLH_FIRST (plh) = ph; | |
898 } | |
899 | |
900 | |
901 | |
902 | |
903 /*--- page list functions ----------------------------------------------*/ | |
904 | |
905 /* Returns the index of the used heap list according to given size. */ | |
906 static int | |
907 get_used_list_index (size_t size) | |
908 { | |
909 if (size <= USED_LIST_MIN_OBJECT_SIZE) | |
3092 | 910 { |
911 // printf ("size %d -> index %d\n", size, 0); | |
912 return 0; | |
913 } | |
914 if (size <= (size_t) USED_LIST_UPPER_THRESHOLD) | |
915 { | |
916 // printf ("size %d -> index %d\n", size, | |
917 // ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
918 // / USED_LIST_LIN_STEP) + 1); | |
919 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1) | |
920 / USED_LIST_LIN_STEP) + 1; | |
921 } | |
922 // printf ("size %d -> index %d\n", size, N_USED_PAGE_LISTS - 1); | |
2720 | 923 return N_USED_PAGE_LISTS - 1; |
924 } | |
925 | |
926 /* Returns the size of the used heap list according to given index. */ | |
927 static size_t | |
928 get_used_list_size_value (int used_index) | |
929 { | |
930 if (used_index < N_USED_PAGE_LISTS - 1) | |
931 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE; | |
932 return 0; | |
933 } | |
934 | |
935 | |
936 /* Returns the index of the free heap list according to given size. */ | |
3092 | 937 static EMACS_INT |
2720 | 938 get_free_list_index (EMACS_INT n_pages) |
939 { | |
940 if (n_pages == 0) | |
941 return 0; | |
942 if (n_pages <= FREE_LIST_LOWER_THRESHOLD) | |
943 return n_pages - 1; | |
944 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1) | |
945 return N_FREE_PAGE_LISTS - 1; | |
946 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1) | |
947 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
948 | |
949 } | |
950 | |
951 | |
952 /* Returns the size in number of pages of the given free list at index. */ | |
953 static size_t | |
3092 | 954 get_free_list_size_value (EMACS_INT free_index) |
2720 | 955 { |
956 if (free_index < FREE_LIST_LOWER_THRESHOLD) | |
957 return free_index + 1; | |
958 if (free_index >= N_FREE_PAGE_LISTS) | |
959 return FREE_LIST_UPPER_THRESHOLD; | |
960 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD) | |
961 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD; | |
962 } | |
963 | |
964 | |
965 #ifdef MEMORY_USAGE_STATS | |
966 Bytecount | |
5157
1fae11d56ad2
redo memory-usage mechanism, add way of dynamically initializing Lisp objects
Ben Wing <ben@xemacs.org>
parents:
5146
diff
changeset
|
967 mc_alloced_storage_size (Bytecount claimed_size, struct usage_stats *stats) |
2720 | 968 { |
969 size_t used_size = | |
970 get_used_list_size_value (get_used_list_index (claimed_size)); | |
971 if (used_size == 0) | |
972 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size)); | |
973 | |
974 if (stats) | |
975 { | |
976 stats->was_requested += claimed_size; | |
977 stats->malloc_overhead += used_size - claimed_size; | |
978 } | |
979 | |
980 return used_size; | |
981 } | |
982 #endif /* not MEMORY_USAGE_STATS */ | |
983 | |
984 | |
985 | |
986 /*--- free heap functions ----------------------------------------------*/ | |
987 | |
988 /* Frees a heap section, if the heap_section is completly free */ | |
989 static EMACS_INT | |
990 free_heap_section (page_header *ph) | |
991 { | |
3092 | 992 EMACS_INT i; |
993 EMACS_INT removed = 0; | |
2720 | 994 for (i = 0; i < N_HEAP_SECTIONS; i++) |
995 if (!removed) | |
996 { | |
997 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start) | |
998 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages)) | |
999 { | |
1000 xfree_1 (HEAP_SECTION(i).real_start); | |
1001 #ifdef MEMORY_USAGE_STATS | |
1002 MC_MALLOCED_BYTES | |
1003 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1004 #endif | |
1005 | |
1006 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE; | |
1007 | |
1008 removed = 1; | |
1009 } | |
1010 } | |
1011 else | |
1012 { | |
1013 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start; | |
1014 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size; | |
1015 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start; | |
1016 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages; | |
1017 } | |
1018 | |
1019 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed; | |
1020 | |
1021 return removed; | |
1022 } | |
1023 | |
1024 /* Removes page from free list. */ | |
1025 static void | |
1026 remove_page_from_free_list (page_header *ph) | |
1027 { | |
1028 remove_page_header_from_plh (ph, PH_PLH (ph)); | |
1029 PH_PLH (ph) = 0; | |
1030 } | |
1031 | |
1032 | |
1033 /* Adds page to according free list. */ | |
1034 static void | |
1035 add_page_to_free_list (page_header *ph) | |
1036 { | |
1037 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph))); | |
1038 add_page_header_to_plh (ph, PH_PLH (ph)); | |
1039 } | |
1040 | |
1041 | |
1042 /* Merges two adjacent pages. */ | |
1043 static page_header * | |
1044 merge_pages (page_header *first_ph, page_header *second_ph) | |
1045 { | |
1046 /* merge */ | |
1047 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph); | |
1048 /* clean up left over page header */ | |
1049 free_page_header (second_ph); | |
1050 /* update lookup table */ | |
1051 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph)); | |
1052 | |
1053 return first_ph; | |
1054 } | |
1055 | |
1056 | |
1057 /* Checks if pages are adjacent, merges them, and adds merged page to | |
1058 free list */ | |
1059 static void | |
1060 merge_into_free_list (page_header *ph) | |
1061 { | |
1062 /* check if you can coalesce adjacent pages */ | |
1063 page_header *prev_ph = | |
1064 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1065 - PAGE_SIZE)); | |
1066 page_header *succ_ph = | |
1067 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph)) | |
1068 + (PH_N_PAGES (ph) * PAGE_SIZE))); | |
1069 if (PH_ON_FREE_LIST_P (prev_ph)) | |
1070 { | |
1071 remove_page_from_free_list (prev_ph); | |
1072 ph = merge_pages (prev_ph, ph); | |
1073 } | |
1074 if (PH_ON_FREE_LIST_P (succ_ph)) | |
1075 { | |
1076 remove_page_from_free_list (succ_ph); | |
1077 ph = merge_pages (ph, succ_ph); | |
1078 } | |
1079 /* try to free heap_section, if the section is complete */ | |
1080 if (!free_heap_section (ph)) | |
1081 /* else add merged page to free list */ | |
1082 add_page_to_free_list (ph); | |
1083 } | |
1084 | |
1085 | |
1086 /* Cuts given page header after n_pages, returns the first (cut) part, and | |
1087 puts the rest on the free list. */ | |
1088 static page_header * | |
1089 split_page (page_header *ph, EMACS_INT n_pages) | |
1090 { | |
1091 page_header *new_ph; | |
1092 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages; | |
1093 | |
1094 /* remove the page from the free list if already hooked in */ | |
1095 if (PH_PLH (ph)) | |
1096 remove_page_from_free_list (ph); | |
1097 /* set new number of pages */ | |
1098 PH_N_PAGES (ph) = n_pages; | |
1099 /* add new page to lookup table */ | |
1100 add_pages_to_lookup_table (ph, n_pages); | |
1101 | |
1102 if (rem_pages) | |
1103 { | |
1104 /* build new page with reminder */ | |
1105 new_ph = alloc_page_header (); | |
1106 PH_N_PAGES (new_ph) = rem_pages; | |
1107 PH_HEAP_SPACE (new_ph) = | |
1108 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE)); | |
1109 /* add new page to lookup table */ | |
1110 add_pages_to_lookup_table (new_ph, rem_pages); | |
1111 /* hook the rest into free list */ | |
1112 add_page_to_free_list (new_ph); | |
1113 } | |
1114 return ph; | |
1115 } | |
1116 | |
1117 | |
1118 /* Expands the heap by given number of pages. */ | |
1119 static page_header * | |
1120 expand_heap (EMACS_INT needed_pages) | |
1121 { | |
1122 page_header *ph; | |
1123 EMACS_INT n_pages; | |
1124 size_t real_size; | |
1125 void *real_start; | |
1126 | |
1127 /* determine number of pages the heap should grow */ | |
3305 | 1128 if (memory_shortage) |
1129 n_pages = needed_pages; | |
1130 else | |
1131 n_pages = max (MIN_HEAP_INCREASE, | |
1132 needed_pages | |
1133 + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR))); | |
2720 | 1134 |
1135 /* get the real values */ | |
1136 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE; | |
1137 real_start = xmalloc_and_zero (real_size); | |
1138 #ifdef MEMORY_USAGE_STATS | |
1139 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0); | |
1140 #endif | |
1141 | |
1142 /* maintain heap section count */ | |
1143 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS) | |
1144 { | |
1145 stderr_out ("Increase number of MAX_HEAP_SECTS"); | |
1146 ABORT (); | |
1147 } | |
1148 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start; | |
1149 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size; | |
1150 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start); | |
1151 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages; | |
1152 N_HEAP_SECTIONS ++; | |
1153 | |
1154 /* get page header */ | |
1155 ph = alloc_page_header (); | |
1156 | |
1157 /* setup page header */ | |
1158 PH_N_PAGES (ph) = n_pages; | |
1159 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start); | |
1160 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0); | |
1161 HEAP_SIZE += n_pages * PAGE_SIZE; | |
1162 | |
1163 /* this was also done by allocate_lisp_storage */ | |
1164 if (need_to_check_c_alloca) | |
1165 xemacs_c_alloca (0); | |
1166 | |
1167 /* return needed size, put rest on free list */ | |
1168 return split_page (ph, needed_pages); | |
1169 } | |
1170 | |
1171 | |
1172 | |
1173 | |
1174 /*--- used heap functions ----------------------------------------------*/ | |
1175 /* Installs initial free list. */ | |
1176 static void | |
3092 | 1177 install_cell_free_list (page_header *ph, EMACS_INT elemcount) |
2720 | 1178 { |
3092 | 1179 Rawbyte *p; |
1180 EMACS_INT i; | |
2720 | 1181 EMACS_INT cell_size = PH_CELL_SIZE (ph); |
1182 /* write initial free list if cell_size is < PAGE_SIZE */ | |
3092 | 1183 p = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1184 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++) |
1185 { | |
1186 #ifdef ERROR_CHECK_GC | |
1187 assert (!LRECORD_FREE_P (p)); | |
1188 MARK_LRECORD_AS_FREE (p); | |
1189 #endif | |
3092 | 1190 if (elemcount == 1) |
1191 NEXT_FREE (p) = FREE_LIST (p + cell_size); | |
2720 | 1192 set_lookup_table (p, ph); |
3092 | 1193 p += cell_size; |
2720 | 1194 } |
1195 #ifdef ERROR_CHECK_GC | |
1196 assert (!LRECORD_FREE_P (p)); | |
1197 MARK_LRECORD_AS_FREE (p); | |
1198 #endif | |
1199 NEXT_FREE (p) = 0; | |
1200 set_lookup_table (p, ph); | |
1201 | |
1202 /* hook free list into header */ | |
1203 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph)); | |
1204 } | |
1205 | |
1206 | |
1207 /* Cleans the object space of the given page_header. */ | |
1208 static void | |
1209 remove_cell_free_list (page_header *ph) | |
1210 { | |
1211 #if ZERO_MEM | |
1212 ZERO_HEAP_SPACE (ph); | |
1213 #endif | |
1214 PH_FREE_LIST (ph) = 0; | |
1215 } | |
1216 | |
1217 | |
1218 /* Installs a new page and hooks it into given page_list_header. */ | |
1219 static page_header * | |
1220 install_page_in_used_list (page_header *ph, page_list_header *plh, | |
3092 | 1221 size_t size, EMACS_INT elemcount) |
2720 | 1222 { |
1223 /* add to list */ | |
1224 add_page_header_to_plh (ph, plh); | |
1225 | |
1226 /* determine cell size */ | |
1227 if (PLH_SIZE (plh)) | |
1228 PH_CELL_SIZE (ph) = PLH_SIZE (plh); | |
1229 else | |
1230 PH_CELL_SIZE (ph) = size; | |
3092 | 1231 if (elemcount == 1) |
1232 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph); | |
1233 else | |
1234 { | |
1235 PH_CELLS_ON_PAGE (ph) = elemcount; | |
1236 PH_ARRAY_BIT (ph) = 1; | |
1237 } | |
2720 | 1238 |
1239 /* init cell count */ | |
1240 PH_CELLS_USED (ph) = 0; | |
1241 | |
1242 /* install mark bits and initialize cell free list */ | |
3092 | 1243 install_mark_bits (ph); |
2720 | 1244 |
3092 | 1245 install_cell_free_list (ph, elemcount); |
2720 | 1246 |
1247 #ifdef MEMORY_USAGE_STATS | |
1248 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph); | |
1249 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph); | |
1250 #endif | |
1251 | |
1252 return ph; | |
1253 } | |
1254 | |
1255 | |
1256 /* Cleans and frees a page, identified by the given page_header. */ | |
1257 static void | |
1258 remove_page_from_used_list (page_header *ph) | |
1259 { | |
1260 page_list_header *plh = PH_PLH (ph); | |
1261 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1262 assert (!(gc_in_progress && PH_PROTECTION_BIT (ph))); |
3092 | 1263 /* cleanup: remove memory protection, zero page_header bits. */ |
1264 | |
2720 | 1265 #ifdef MEMORY_USAGE_STATS |
1266 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | |
1267 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | |
1268 #endif | |
1269 | |
1270 /* clean up mark bits and cell free list */ | |
1271 remove_cell_free_list (ph); | |
1272 if (PH_ON_USED_LIST_P (ph)) | |
1273 remove_mark_bits (ph); | |
1274 | |
1275 /* clean up page header */ | |
1276 PH_CELL_SIZE (ph) = 0; | |
1277 PH_CELLS_ON_PAGE (ph) = 0; | |
1278 PH_CELLS_USED (ph) = 0; | |
1279 | |
1280 /* remove from used list */ | |
1281 remove_page_header_from_plh (ph, plh); | |
1282 | |
1283 /* move to free list */ | |
1284 merge_into_free_list (ph); | |
1285 } | |
1286 | |
1287 | |
1288 | |
1289 | |
1290 /*--- allocation -------------------------------------------------------*/ | |
1291 | |
1292 /* Allocates from cell free list on already allocated pages. */ | |
1293 static page_header * | |
1294 allocate_cell (page_list_header *plh) | |
1295 { | |
1296 page_header *ph = PLH_FIRST (plh); | |
1297 if (ph) | |
1298 { | |
1299 if (PH_FREE_LIST (ph)) | |
1300 /* elements free on first page */ | |
1301 return ph; | |
1302 else if ((PH_NEXT (ph)) | |
1303 && (PH_FREE_LIST (PH_NEXT (ph)))) | |
1304 /* elements free on second page */ | |
1305 { | |
1306 page_header *temp = PH_NEXT (ph); | |
1307 /* move full page (first page) to end of list */ | |
1308 PH_NEXT (PLH_LAST (plh)) = ph; | |
1309 PH_PREV (ph) = PLH_LAST (plh); | |
1310 PLH_LAST (plh) = ph; | |
1311 PH_NEXT (ph) = 0; | |
1312 /* install second page as first page */ | |
1313 ph = temp; | |
1314 PH_PREV (ph) = 0; | |
1315 PLH_FIRST (plh) = ph; | |
1316 return ph; | |
1317 } | |
1318 } | |
1319 return 0; | |
1320 } | |
1321 | |
1322 | |
1323 /* Finds a page which has at least the needed number of pages. | |
1324 Algorithm: FIRST FIT. */ | |
1325 static page_header * | |
1326 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph) | |
1327 { | |
1328 while (ph) | |
1329 { | |
1330 if (PH_N_PAGES (ph) >= needed_pages) | |
1331 return ph; | |
1332 ph = PH_NEXT (ph); | |
1333 } | |
1334 return 0; | |
1335 } | |
1336 | |
1337 | |
1338 /* Allocates a page from the free list. */ | |
1339 static page_header * | |
1340 allocate_page_from_free_list (EMACS_INT needed_pages) | |
1341 { | |
1342 page_header *ph = 0; | |
3092 | 1343 EMACS_INT i; |
2720 | 1344 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++) |
1345 if ((ph = find_free_page_first_fit (needed_pages, | |
1346 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0) | |
1347 { | |
1348 if (PH_N_PAGES (ph) > needed_pages) | |
1349 return split_page (ph, needed_pages); | |
1350 else | |
1351 { | |
1352 remove_page_from_free_list (ph); | |
1353 return ph; | |
1354 } | |
1355 } | |
1356 return 0; | |
1357 } | |
1358 | |
1359 | |
1360 /* Allocates a new page, either from free list or by expanding the heap. */ | |
1361 static page_header * | |
3092 | 1362 allocate_new_page (page_list_header *plh, size_t size, EMACS_INT elemcount) |
2720 | 1363 { |
3092 | 1364 EMACS_INT needed_pages = BYTES_TO_PAGES (size * elemcount); |
2720 | 1365 /* first check free list */ |
1366 page_header *result = allocate_page_from_free_list (needed_pages); | |
1367 if (!result) | |
1368 /* expand heap */ | |
1369 result = expand_heap (needed_pages); | |
3092 | 1370 install_page_in_used_list (result, plh, size, elemcount); |
2720 | 1371 return result; |
1372 } | |
1373 | |
1374 | |
1375 /* Selects the correct size class, tries to allocate a cell of this size | |
1376 from the free list, if this fails, a new page is allocated. */ | |
1377 static void * | |
3092 | 1378 mc_alloc_1 (size_t size, EMACS_INT elemcount) |
2720 | 1379 { |
1380 page_list_header *plh = 0; | |
2723 | 1381 page_header *ph = 0; |
1382 void *result = 0; | |
1383 | |
3092 | 1384 plh = USED_HEAP_PAGES (get_used_list_index (size)); |
2720 | 1385 |
1386 if (size == 0) | |
1387 return 0; | |
3092 | 1388 if ((elemcount == 1) && (size < (size_t) PAGE_SIZE_DIV_2)) |
2720 | 1389 /* first check any free cells */ |
1390 ph = allocate_cell (plh); | |
1391 if (!ph) | |
1392 /* allocate a new page */ | |
3092 | 1393 ph = allocate_new_page (plh, size, elemcount); |
2720 | 1394 |
1395 /* return first element of free list and remove it from the list */ | |
1396 result = (void*) PH_FREE_LIST (ph); | |
1397 PH_FREE_LIST (ph) = | |
1398 NEXT_FREE (PH_FREE_LIST (ph)); | |
1399 | |
3092 | 1400 memset (result, '\0', (size * elemcount)); |
1401 MARK_LRECORD_AS_FREE (result); | |
2720 | 1402 |
1403 /* bump used cells counter */ | |
3092 | 1404 PH_CELLS_USED (ph) += elemcount; |
2720 | 1405 |
1406 #ifdef MEMORY_USAGE_STATS | |
3092 | 1407 PLH_USED_CELLS (plh) += elemcount; |
1408 PLH_USED_SPACE (plh) += size * elemcount; | |
2720 | 1409 #endif |
1410 | |
1411 return result; | |
1412 } | |
1413 | |
3092 | 1414 /* Array allocation. */ |
1415 void * | |
1416 mc_alloc_array (size_t size, EMACS_INT elemcount) | |
1417 { | |
1418 return mc_alloc_1 (size, elemcount); | |
1419 } | |
1420 | |
2720 | 1421 void * |
1422 mc_alloc (size_t size) | |
1423 { | |
1424 return mc_alloc_1 (size, 1); | |
1425 } | |
1426 | |
1427 | |
1428 | |
1429 /*--- sweep & free & finalize-------------------------------------------*/ | |
1430 | |
1431 /* Frees a heap pointer. */ | |
1432 static void | |
1433 remove_cell (void *ptr, page_header *ph) | |
1434 { | |
1435 #ifdef MEMORY_USAGE_STATS | |
1436 PLH_USED_CELLS (PH_PLH (ph))--; | |
1437 if (PH_ON_USED_LIST_P (ph)) | |
1438 PLH_USED_SPACE (PH_PLH (ph)) -= | |
1439 detagged_lisp_object_size ((const struct lrecord_header *) ptr); | |
1440 else | |
1441 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph); | |
1442 #endif | |
2775 | 1443 if (PH_ON_USED_LIST_P (ph)) |
1444 { | |
2994 | 1445 #ifdef ALLOC_TYPE_STATS |
2775 | 1446 dec_lrecord_stats (PH_CELL_SIZE (ph), |
1447 (const struct lrecord_header *) ptr); | |
2994 | 1448 #endif /* ALLOC_TYPE_STATS */ |
2775 | 1449 #ifdef ERROR_CHECK_GC |
1450 assert (!LRECORD_FREE_P (ptr)); | |
1451 deadbeef_memory (ptr, PH_CELL_SIZE (ph)); | |
1452 MARK_LRECORD_AS_FREE (ptr); | |
2720 | 1453 #endif |
2775 | 1454 } |
2720 | 1455 |
1456 /* hooks cell into free list */ | |
1457 NEXT_FREE (ptr) = PH_FREE_LIST (ph); | |
1458 PH_FREE_LIST (ph) = FREE_LIST (ptr); | |
1459 /* decrease cells used */ | |
1460 PH_CELLS_USED (ph)--; | |
1461 } | |
1462 | |
1463 | |
1464 /* Mark free list marks all free list entries. */ | |
1465 static void | |
1466 mark_free_list (page_header *ph) | |
1467 { | |
1468 free_link *fl = PH_FREE_LIST (ph); | |
1469 while (fl) | |
1470 { | |
3092 | 1471 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
2720 | 1472 fl = NEXT_FREE (fl); |
1473 } | |
1474 } | |
1475 | |
1476 | |
1477 /* Finalize a page for disksave. XEmacs calls this routine before it | |
1478 dumps the heap image. You have to tell mc-alloc how to call your | |
1479 object's finalizer for disksave. Therefore, you have to define the | |
1480 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should | |
1481 do nothing else then test if there is a finalizer and call it on | |
3303 | 1482 the given argument, which is the heap address of the object. |
1483 Returns number of processed pages. */ | |
1484 static EMACS_INT | |
2720 | 1485 finalize_page_for_disksave (page_header *ph) |
1486 { | |
1487 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
1488 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1489 EMACS_INT mark_bit = 0; | |
1490 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1491 | |
1492 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1493 { | |
1494 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
1495 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | |
1496 } | |
3303 | 1497 return 1; |
2720 | 1498 } |
1499 | |
1500 | |
3303 | 1501 /* Finalizes the heap for disksave. Returns number of processed |
1502 pages. */ | |
1503 EMACS_INT | |
2720 | 1504 mc_finalize_for_disksave (void) |
1505 { | |
3303 | 1506 return visit_all_used_page_headers (finalize_page_for_disksave); |
2720 | 1507 } |
1508 | |
1509 | |
3303 | 1510 /* Sweeps a page: all the non-marked cells are freed. If the page is |
1511 empty in the end, it is removed. If some cells are free, it is | |
1512 moved to the front of its page header list. Full pages stay where | |
1513 they are. Returns number of processed pages.*/ | |
1514 static EMACS_INT | |
2720 | 1515 sweep_page (page_header *ph) |
1516 { | |
3092 | 1517 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); |
2720 | 1518 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); |
1519 EMACS_INT mark_bit = 0; | |
1520 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
3092 | 1521 unsigned int bit = 0; |
2720 | 1522 |
1523 mark_free_list (ph); | |
1524 | |
3092 | 1525 /* ARRAY_BIT_HACK */ |
1526 if (PH_ARRAY_BIT (ph)) | |
1527 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1528 { | |
1529 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1530 if (bit) | |
1531 { | |
1532 zero_mark_bits (ph); | |
1533 PH_BLACK_BIT (ph) = 0; | |
3303 | 1534 return 1; |
3092 | 1535 } |
1536 } | |
1537 | |
2720 | 1538 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1539 { | |
3092 | 1540 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1541 if (bit == WHITE) | |
2720 | 1542 { |
3092 | 1543 GC_STAT_FREED; |
2720 | 1544 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
1545 } | |
1546 } | |
1547 zero_mark_bits (ph); | |
3092 | 1548 PH_BLACK_BIT (ph) = 0; |
2720 | 1549 if (PH_CELLS_USED (ph) == 0) |
1550 remove_page_from_used_list (ph); | |
1551 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
1552 move_page_header_to_front (ph); | |
3303 | 1553 |
1554 return 1; | |
2720 | 1555 } |
1556 | |
1557 | |
3303 | 1558 /* Sweeps the heap. Returns number of processed pages. */ |
1559 EMACS_INT | |
2720 | 1560 mc_sweep (void) |
1561 { | |
3303 | 1562 return visit_all_used_page_headers (sweep_page); |
2720 | 1563 } |
1564 | |
1565 | |
1566 /* Changes the size of the cell pointed to by ptr. | |
1567 Returns the new address of the new cell with new size. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1568 static void * |
3092 | 1569 mc_realloc_1 (void *ptr, size_t size, int elemcount) |
2720 | 1570 { |
1571 if (ptr) | |
1572 { | |
3092 | 1573 if (size * elemcount) |
2720 | 1574 { |
3092 | 1575 void *result = mc_alloc_1 (size, elemcount); |
2720 | 1576 size_t from_size = PH_CELL_SIZE (get_page_header (ptr)); |
3092 | 1577 size_t cpy_size = size * elemcount; |
1578 if (cpy_size > from_size) | |
2720 | 1579 cpy_size = from_size; |
1580 memcpy (result, ptr, cpy_size); | |
3092 | 1581 #ifdef ALLOC_TYPE_STATS |
1582 inc_lrecord_stats (size, (struct lrecord_header *) result); | |
1583 #endif /* not ALLOC_TYPE_STATS */ | |
2720 | 1584 return result; |
1585 } | |
1586 else | |
1587 { | |
1588 return 0; | |
1589 } | |
1590 } | |
1591 else | |
3092 | 1592 return mc_alloc_1 (size, elemcount); |
2720 | 1593 } |
1594 | |
1595 void * | |
1596 mc_realloc (void *ptr, size_t size) | |
1597 { | |
1598 return mc_realloc_1 (ptr, size, 1); | |
1599 } | |
1600 | |
1601 void * | |
3092 | 1602 mc_realloc_array (void *ptr, size_t size, EMACS_INT elemcount) |
2720 | 1603 { |
3092 | 1604 return mc_realloc_1 (ptr, size, elemcount); |
2720 | 1605 } |
1606 | |
1607 | |
1608 | |
1609 /*--- initialization ---------------------------------------------------*/ | |
1610 | |
1611 /* Call once at the very beginning. */ | |
1612 void | |
1613 init_mc_allocator (void) | |
1614 { | |
3092 | 1615 EMACS_INT i; |
1616 | |
1617 #ifdef MEMORY_USAGE_STATS | |
1618 MC_MALLOCED_BYTES = 0; | |
1619 #endif | |
2720 | 1620 |
3092 | 1621 /* init of pagesize dependent values */ |
1622 switch (SYS_PAGE_SIZE) | |
1623 { | |
1624 case 512: log_page_size = 9; break; | |
1625 case 1024: log_page_size = 10; break; | |
1626 case 2048: log_page_size = 11; break; | |
1627 case 4096: log_page_size = 12; break; | |
1628 case 8192: log_page_size = 13; break; | |
1629 case 16384: log_page_size = 14; break; | |
3212 | 1630 case 32768: log_page_size = 15; break; |
1631 case 65536: log_page_size = 16; break; | |
1632 default: | |
1633 fprintf(stderr, "##### SYS_PAGE_SIZE=%d not supported #####\n", | |
1634 SYS_PAGE_SIZE); | |
1635 ABORT (); | |
3092 | 1636 } |
1637 | |
4125 | 1638 page_size_div_2 = (EMACS_UINT) SYS_PAGE_SIZE >> 1; |
3092 | 1639 |
1640 mc_allocator_globals.used_heap_pages = | |
1641 (page_list_header *) xmalloc_and_zero ((N_USED_PAGE_LISTS + 1) | |
1642 * sizeof (page_list_header)); | |
1643 #ifdef MEMORY_USAGE_STATS | |
1644 MC_MALLOCED_BYTES += (N_USED_PAGE_LISTS + 1) * sizeof (page_list_header); | |
1645 #endif | |
1646 | |
1647 mc_allocator_globals.ptr_lookup_table = | |
1648 (level_2_lookup_tree **) | |
1649 xmalloc_and_zero ((LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *)); | |
1650 #ifdef MEMORY_USAGE_STATS | |
1651 MC_MALLOCED_BYTES += (LEVEL1_SIZE + 1) * sizeof (level_2_lookup_tree *); | |
1652 #endif | |
1653 | |
1654 #ifdef BLOCKTYPE_ALLOC_PAGE_HEADER | |
1655 the_page_header_blocktype = Blocktype_new (struct page_header_blocktype); | |
1656 #endif /* BLOCKTYPE_ALLOC_PAGE_HEADER */ | |
2932 | 1657 |
2720 | 1658 for (i = 0; i < N_USED_PAGE_LISTS; i++) |
1659 { | |
1660 page_list_header *plh = USED_HEAP_PAGES (i); | |
1661 PLH_LIST_TYPE (plh) = USED_LIST; | |
1662 PLH_SIZE (plh) = get_used_list_size_value (i); | |
1663 PLH_FIRST (plh) = 0; | |
1664 PLH_LAST (plh) = 0; | |
1665 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1666 #ifdef MEMORY_USAGE_STATS | |
1667 PLH_PAGE_COUNT (plh) = 0; | |
1668 PLH_USED_CELLS (plh) = 0; | |
1669 PLH_USED_SPACE (plh) = 0; | |
1670 PLH_TOTAL_CELLS (plh) = 0; | |
1671 PLH_TOTAL_SPACE (plh) = 0; | |
1672 #endif | |
1673 } | |
1674 | |
1675 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1676 { | |
1677 page_list_header *plh = FREE_HEAP_PAGES (i); | |
1678 PLH_LIST_TYPE (plh) = FREE_LIST; | |
1679 PLH_SIZE (plh) = get_free_list_size_value (i); | |
1680 PLH_FIRST (plh) = 0; | |
1681 PLH_LAST (plh) = 0; | |
1682 PLH_MARK_BIT_FREE_LIST (plh) = 0; | |
1683 #ifdef MEMORY_USAGE_STATS | |
1684 PLH_PAGE_COUNT (plh) = 0; | |
1685 PLH_USED_CELLS (plh) = 0; | |
1686 PLH_USED_SPACE (plh) = 0; | |
1687 PLH_TOTAL_CELLS (plh) = 0; | |
1688 PLH_TOTAL_SPACE (plh) = 0; | |
1689 #endif | |
1690 } | |
1691 | |
3092 | 1692 #ifndef BLOCKTYPE_ALLOC_PAGE_HEADER |
2720 | 1693 PAGE_HEADER_FREE_LIST = 0; |
3092 | 1694 #endif /* not BLOCKTYPE_ALLOC_PAGE_HEADER */ |
2720 | 1695 |
1696 #ifdef MEMORY_USAGE_STATS | |
3092 | 1697 MC_MALLOCED_BYTES += sizeof (mc_allocator_globals); |
2720 | 1698 #endif |
1699 | |
1700 init_lookup_table (); | |
1701 } | |
1702 | |
1703 | |
1704 | |
1705 | |
1706 /*--- lisp function for statistics -------------------------------------*/ | |
1707 | |
1708 #ifdef MEMORY_USAGE_STATS | |
1709 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /* | |
1710 Returns stats about the mc-alloc memory usage. See diagnose.el. | |
1711 */ | |
1712 ()) | |
1713 { | |
1714 Lisp_Object free_plhs = Qnil; | |
1715 Lisp_Object used_plhs = Qnil; | |
1716 Lisp_Object heap_sects = Qnil; | |
3092 | 1717 EMACS_INT used_size = 0; |
1718 EMACS_INT real_size = 0; | |
2720 | 1719 |
3092 | 1720 EMACS_INT i; |
2720 | 1721 |
1722 for (i = 0; i < N_FREE_PAGE_LISTS; i++) | |
1723 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0) | |
1724 free_plhs = | |
1725 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))), | |
1726 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))), | |
1727 free_plhs); | |
1728 | |
1729 for (i = 0; i < N_USED_PAGE_LISTS; i++) | |
1730 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0) | |
1731 used_plhs = | |
1732 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))), | |
1733 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))), | |
1734 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))), | |
1735 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))), | |
1736 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))), | |
1737 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))), | |
1738 used_plhs); | |
1739 | |
1740 for (i = 0; i < N_HEAP_SECTIONS; i++) { | |
1741 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE; | |
1742 real_size += | |
1743 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0); | |
1744 } | |
1745 | |
1746 heap_sects = | |
1747 list3 (make_int (N_HEAP_SECTIONS), | |
1748 make_int (used_size), | |
1749 make_int (real_size)); | |
1750 | |
1751 return Fcons (make_int (PAGE_SIZE), | |
3092 | 1752 list5 (heap_sects, |
2720 | 1753 Fnreverse (used_plhs), |
1754 Fnreverse (free_plhs), | |
1755 make_int (sizeof (mc_allocator_globals)), | |
1756 make_int (MC_MALLOCED_BYTES))); | |
1757 } | |
1758 #endif /* MEMORY_USAGE_STATS */ | |
1759 | |
1760 void | |
1761 syms_of_mc_alloc (void) | |
1762 { | |
1763 #ifdef MEMORY_USAGE_STATS | |
1764 DEFSUBR (Fmc_alloc_memory_usage); | |
1765 #endif /* MEMORY_USAGE_STATS */ | |
1766 } | |
3092 | 1767 |
1768 | |
1769 /*--- incremental garbage collector ----------------------------------*/ | |
1770 | |
5054 | 1771 #if 0 /* currently unused */ |
1772 | |
3092 | 1773 /* access dirty bit of page header */ |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1774 static void |
3092 | 1775 set_dirty_bit (page_header *ph, unsigned int value) |
1776 { | |
1777 PH_DIRTY_BIT (ph) = value; | |
1778 } | |
1779 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1780 static void |
3092 | 1781 set_dirty_bit_for_address (void *ptr, unsigned int value) |
1782 { | |
1783 set_dirty_bit (get_page_header (ptr), value); | |
1784 } | |
1785 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1786 static unsigned int |
3092 | 1787 get_dirty_bit (page_header *ph) |
1788 { | |
1789 return PH_DIRTY_BIT (ph); | |
1790 } | |
1791 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1792 static unsigned int |
3092 | 1793 get_dirty_bit_for_address (void *ptr) |
1794 { | |
1795 return get_dirty_bit (get_page_header (ptr)); | |
1796 } | |
1797 | |
1798 | |
1799 /* access protection bit of page header */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1800 static void |
3092 | 1801 set_protection_bit (page_header *ph, unsigned int value) |
1802 { | |
1803 PH_PROTECTION_BIT (ph) = value; | |
1804 } | |
1805 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1806 static void |
3092 | 1807 set_protection_bit_for_address (void *ptr, unsigned int value) |
1808 { | |
1809 set_protection_bit (get_page_header (ptr), value); | |
1810 } | |
1811 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1812 static unsigned int |
3092 | 1813 get_protection_bit (page_header *ph) |
1814 { | |
1815 return PH_PROTECTION_BIT (ph); | |
1816 } | |
1817 | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1818 static unsigned int |
3092 | 1819 get_protection_bit_for_address (void *ptr) |
1820 { | |
1821 return get_protection_bit (get_page_header (ptr)); | |
1822 } | |
1823 | |
1824 | |
1825 /* Returns the start of the page of the object pointed to by ptr. */ | |
5042
f395ee7ad844
Fix some compile warnings, make vdb test code conditional on DEBUG_XEMACS
Ben Wing <ben@xemacs.org>
parents:
4125
diff
changeset
|
1826 static void * |
3092 | 1827 get_page_start (void *ptr) |
1828 { | |
1829 return PH_HEAP_SPACE (get_page_header (ptr)); | |
1830 } | |
1831 | |
5054 | 1832 #endif /* 0 */ |
1833 | |
3092 | 1834 /* Make PAGE_SIZE globally available. */ |
1835 EMACS_INT | |
1836 mc_get_page_size () | |
1837 { | |
1838 return PAGE_SIZE; | |
1839 } | |
1840 | |
1841 /* Is the fault at ptr on a protected page? */ | |
1842 EMACS_INT | |
1843 fault_on_protected_page (void *ptr) | |
1844 { | |
1845 page_header *ph = get_page_header_internal (ptr); | |
1846 return (ph | |
1847 && PH_HEAP_SPACE (ph) | |
1848 && (PH_HEAP_SPACE (ph) <= ptr) | |
1849 && ((void *) ((EMACS_INT) PH_HEAP_SPACE (ph) | |
1850 + PH_N_PAGES (ph) * PAGE_SIZE) > ptr) | |
1851 && (PH_PROTECTION_BIT (ph) == 1)); | |
1852 } | |
1853 | |
1854 | |
1855 /* Protect the heap page of given page header ph if black objects are | |
3303 | 1856 on the page. Returns number of processed pages. */ |
1857 static EMACS_INT | |
3092 | 1858 protect_heap_page (page_header *ph) |
1859 { | |
1860 if (PH_BLACK_BIT (ph)) | |
1861 { | |
1862 void *heap_space = PH_HEAP_SPACE (ph); | |
1863 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1864 vdb_protect ((void *) heap_space, heap_space_size); | |
1865 PH_PROTECTION_BIT (ph) = 1; | |
3303 | 1866 return 1; |
3092 | 1867 } |
3303 | 1868 return 0; |
3092 | 1869 } |
1870 | |
3303 | 1871 /* Protect all heap pages with black objects. Returns number of |
1872 processed pages.*/ | |
1873 EMACS_INT | |
3092 | 1874 protect_heap_pages (void) |
1875 { | |
3303 | 1876 return visit_all_used_page_headers (protect_heap_page); |
3092 | 1877 } |
1878 | |
1879 | |
1880 /* Remove protection (if there) of heap page of given page header | |
3303 | 1881 ph. Returns number of processed pages. */ |
1882 static EMACS_INT | |
3092 | 1883 unprotect_heap_page (page_header *ph) |
1884 { | |
1885 if (PH_PROTECTION_BIT (ph)) | |
1886 { | |
1887 void *heap_space = PH_HEAP_SPACE (ph); | |
1888 EMACS_INT heap_space_size = PH_N_PAGES (ph) * PAGE_SIZE; | |
1889 vdb_unprotect (heap_space, heap_space_size); | |
1890 PH_PROTECTION_BIT (ph) = 0; | |
3303 | 1891 return 1; |
3092 | 1892 } |
3303 | 1893 return 0; |
3092 | 1894 } |
1895 | |
3303 | 1896 /* Remove protection for all heap pages which are protected. Returns |
1897 number of processed pages. */ | |
1898 EMACS_INT | |
3092 | 1899 unprotect_heap_pages (void) |
1900 { | |
3303 | 1901 return visit_all_used_page_headers (unprotect_heap_page); |
3092 | 1902 } |
1903 | |
1904 /* Remove protection and mark page dirty. */ | |
1905 void | |
1906 unprotect_page_and_mark_dirty (void *ptr) | |
1907 { | |
1908 page_header *ph = get_page_header (ptr); | |
1909 unprotect_heap_page (ph); | |
1910 PH_DIRTY_BIT (ph) = 1; | |
1911 } | |
1912 | |
1913 /* Repush all objects on dirty pages onto the mark stack. */ | |
1914 int | |
1915 repush_all_objects_on_page (void *ptr) | |
1916 { | |
1917 int repushed_objects = 0; | |
1918 page_header *ph = get_page_header (ptr); | |
1919 Rawbyte *heap_space = (Rawbyte *) PH_HEAP_SPACE (ph); | |
1920 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1921 EMACS_INT mark_bit = 0; | |
1922 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1923 unsigned int bit = 0; | |
1924 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1925 { | |
1926 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1927 if (bit == BLACK) | |
1928 { | |
1929 repushed_objects++; | |
1930 gc_write_barrier | |
1931 (wrap_pointer_1 ((heap_space + (heap_space_step * mark_bit)))); | |
1932 } | |
1933 } | |
1934 PH_BLACK_BIT (ph) = 0; | |
1935 PH_DIRTY_BIT (ph) = 0; | |
1936 return repushed_objects; | |
1937 } | |
1938 | |
1939 /* Mark black if object is currently grey. This first checks, if the | |
1940 object is really allocated on the mc-heap. If it is, it can be | |
1941 marked black; if it is not, it cannot be marked. */ | |
1942 EMACS_INT | |
1943 maybe_mark_black (void *ptr) | |
1944 { | |
1945 page_header *ph = get_page_header_internal (ptr); | |
1946 unsigned int bit = 0; | |
1947 | |
1948 if (ph && PH_PLH (ph) && PH_ON_USED_LIST_P (ph)) | |
1949 { | |
1950 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph)); | |
1951 if (bit == GREY) | |
1952 { | |
1953 if (!PH_BLACK_BIT (ph)) | |
1954 PH_BLACK_BIT (ph) = 1; | |
1955 SET_BIT (ph, get_mark_bit_index (ptr, ph), BLACK); | |
1956 } | |
1957 return 1; | |
1958 } | |
1959 return 0; | |
1960 } | |
1961 | |
1962 /* Only for debugging --- not used anywhere in the sources. */ | |
1963 EMACS_INT | |
1964 object_on_heap_p (void *ptr) | |
1965 { | |
1966 page_header *ph = get_page_header_internal (ptr); | |
1967 return (ph && PH_ON_USED_LIST_P (ph)); | |
1968 } |