Mercurial > hg > xemacs-beta
annotate src/ralloc.c @ 5090:0ca81354c4c7
Further frame-geometry cleanups
-------------------- ChangeLog entries follow: --------------------
man/ChangeLog addition:
2010-03-03 Ben Wing <ben@xemacs.org>
* internals/internals.texi (Intro to Window and Frame Geometry):
* internals/internals.texi (The Paned Area):
* internals/internals.texi (The Displayable Area):
Update to make note of e.g. the fact that the bottom gutter is
actually above the minibuffer.
src/ChangeLog addition:
2010-03-03 Ben Wing <ben@xemacs.org>
* emacs.c:
* emacs.c (assert_equal_failed):
* lisp.h:
* lisp.h (assert_equal):
New fun assert_equal, asserting that two values == each other, and
printing out both values upon failure.
* frame-gtk.c (gtk_initialize_frame_size):
* frame-impl.h:
* frame-impl.h (FRAME_TOP_INTERNAL_BORDER_START):
* frame-impl.h (FRAME_BOTTOM_INTERNAL_BORDER_START):
* frame-impl.h (FRAME_LEFT_INTERNAL_BORDER_START):
* frame-impl.h (FRAME_PANED_TOP_EDGE):
* frame-impl.h (FRAME_NONPANED_SIZE):
* frame-x.c (x_initialize_frame_size):
* frame.c:
* gutter.c (get_gutter_coords):
* gutter.c (calculate_gutter_size):
* gutter.h:
* gutter.h (WINDOW_REAL_TOP_GUTTER_BOUNDS):
* gutter.h (FRAME_TOP_GUTTER_BOUNDS):
* input-method-xlib.c:
* input-method-xlib.c (XIM_SetGeometry):
* redisplay-output.c (clear_left_border):
* redisplay-output.c (clear_right_border):
* redisplay-output.c (redisplay_output_pixmap):
* redisplay-output.c (redisplay_clear_region):
* redisplay-output.c (redisplay_clear_top_of_window):
* redisplay-output.c (redisplay_clear_to_window_end):
* redisplay-xlike-inc.c (XLIKE_clear_frame):
* redisplay.c:
* redisplay.c (UPDATE_CACHE_RETURN):
* redisplay.c (pixel_to_glyph_translation):
* toolbar.c (update_frame_toolbars_geometry):
* window.c (Fwindow_pixel_edges):
Get rid of some redundant macros. Consistently use the
FRAME_TOP_*_START, FRAME_RIGHT_*_END, etc. format. Rename
FRAME_*_BORDER_* to FRAME_*_INTERNAL_BORDER_*. Comment out
FRAME_BOTTOM_* for gutters and the paned area due to the
uncertainty over where the paned area actually begins. (Eventually
we should probably move the gutters outside the minibuffer so that
the paned area is contiguous.) Use FRAME_PANED_* more often in the
code to make things clearer.
Update the diagram to show that the bottom gutter is inside the
minibuffer (!) and that there are "junk boxes" when you have left
and/or right gutters (dead boxes that are mistakenly left uncleared,
unlike the corresponding scrollbar dead boxes). Update the text
appropriately to cover the bottom gutter position, etc.
Rewrite gutter-geometry code to use the FRAME_*_GUTTER_* in place of
equivalent expressions referencing other frame elements, to make the
code more portable in case we move around the gutter location.
Cleanup FRAME_*_GUTTER_BOUNDS() in gutter.h.
Add some #### GEOM! comments where I think code is incorrect --
typically, it wasn't fixed up properly when the gutter was added.
Some cosmetic changes.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Wed, 03 Mar 2010 05:07:47 -0600 |
parents | 6f2158fa75ed |
children | 308d34e9f07d |
rev | line source |
---|---|
428 | 1 /* Block-relocating memory allocator. |
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
428 | 4 |
5 This file is part of XEmacs. | |
6 | |
7 XEmacs is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 2, or (at your option) any | |
10 later version. | |
11 | |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
613 | 18 along with XEmacs; see the file COPYING. If not, write to |
428 | 19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
20 Boston, MA 02111-1307, USA. | |
21 | |
22 Synched Up with: FSF 20.2 (non-mmap portion only) | |
23 */ | |
24 | |
25 /* NOTES: | |
26 | |
27 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | |
28 rather than all of them. This means allowing for a possible | |
29 hole between the first bloc and the end of malloc storage. */ | |
30 | |
31 #ifdef HAVE_CONFIG_H | |
32 #include <config.h> | |
33 #endif | |
34 | |
35 #ifdef HAVE_UNISTD_H | |
36 #include <unistd.h> /* for getpagesize() */ | |
37 #endif | |
38 | |
39 #ifdef emacs | |
40 | |
41 #include "lisp.h" | |
42 | |
43 /* The important properties of this type are that 1) it's a pointer, and | |
44 2) arithmetic on it should work as if the size of the object pointed | |
45 to has a size of 1. */ | |
46 #if 0 /* Arithmetic on void* is a GCC extension. */ | |
47 #ifdef __STDC__ | |
48 typedef void *POINTER; | |
49 #else | |
50 typedef unsigned char *POINTER; | |
51 #endif | |
52 #endif /* 0 */ | |
53 | |
54 /* Unconditionally use unsigned char * for this. */ | |
55 typedef unsigned char *POINTER; | |
56 | |
57 #ifdef DOUG_LEA_MALLOC | |
58 #define M_TOP_PAD -2 | |
59 #include <malloc.h> | |
60 #endif | |
61 | |
62 #include "getpagesize.h" | |
63 | |
64 #include <string.h> | |
3263 | 65 #ifndef NEW_GC |
428 | 66 void refill_memory_reserve (void); |
3263 | 67 #endif /* not NEW_GC */ |
428 | 68 |
69 #else /* Not emacs. */ | |
70 | |
1333 | 71 #define REGEX_MALLOC_CHECK() |
72 | |
428 | 73 #include <stddef.h> |
74 | |
75 typedef void *POINTER; | |
76 | |
77 #include <unistd.h> | |
78 #include <malloc.h> | |
79 #include <string.h> | |
80 | |
81 #endif /* emacs. */ | |
82 | |
83 void init_ralloc (void); | |
84 | |
85 #define NIL ((POINTER) 0) | |
86 | |
87 | |
88 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | |
89 | |
90 /* A flag to indicate whether we have initialized ralloc yet. For | |
91 Emacs's sake, please do not make this local to malloc_init; on some | |
92 machines, the dumping procedure makes all static variables | |
93 read-only. On these machines, the word static is #defined to be | |
94 the empty string, meaning that r_alloc_initialized becomes an | |
95 automatic variable, and loses its value each time Emacs is started up. */ | |
96 static int r_alloc_initialized = 0; | |
97 | |
98 | |
99 /* Declarations for working with the malloc, ralloc, and system breaks. */ | |
100 | |
101 /* Function to set the real break value. */ | |
102 static POINTER (*real_morecore) (ptrdiff_t size); | |
103 | |
104 /* The break value, as seen by malloc (). */ | |
105 static POINTER virtual_break_value; | |
106 | |
107 /* The break value, viewed by the relocatable blocs. */ | |
108 static POINTER break_value; | |
109 | |
110 /* This is the size of a page. We round memory requests to this boundary. */ | |
111 static int page_size; | |
112 | |
113 /* Whenever we get memory from the system, get this many extra bytes. This | |
114 must be a multiple of page_size. */ | |
115 static int extra_bytes; | |
116 | |
117 /* Macros for rounding. Note that rounding to any value is possible | |
118 by changing the definition of PAGE. */ | |
119 #define PAGE (getpagesize ()) | |
120 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | |
121 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
122 & ~(page_size - 1)) | |
123 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | |
124 | |
125 #define MEM_ALIGN sizeof(double) | |
126 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
127 & ~(MEM_ALIGN - 1)) | |
128 | |
129 /* Data structures of heaps and blocs. */ | |
130 | |
131 /* The relocatable objects, or blocs, and the malloc data | |
132 both reside within one or more heaps. | |
133 Each heap contains malloc data, running from `start' to `bloc_start', | |
134 and relocatable objects, running from `bloc_start' to `free'. | |
135 | |
136 Relocatable objects may relocate within the same heap | |
137 or may move into another heap; the heaps themselves may grow | |
138 but they never move. | |
139 | |
140 We try to make just one heap and make it larger as necessary. | |
141 But sometimes we can't do that, because we can't get contiguous | |
142 space to add onto the heap. When that happens, we start a new heap. */ | |
143 | |
144 typedef struct heap | |
145 { | |
146 struct heap *next; | |
147 struct heap *prev; | |
148 /* Start of memory range of this heap. */ | |
149 POINTER start; | |
150 /* End of memory range of this heap. */ | |
151 POINTER end; | |
152 /* Start of relocatable data in this heap. */ | |
153 POINTER bloc_start; | |
154 /* Start of unused space in this heap. */ | |
155 POINTER free; | |
156 /* First bloc in this heap. */ | |
157 struct bp *first_bloc; | |
158 /* Last bloc in this heap. */ | |
159 struct bp *last_bloc; | |
160 } *heap_ptr; | |
161 | |
162 #define NIL_HEAP ((heap_ptr) 0) | |
163 #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
164 | |
165 /* This is the first heap object. | |
166 If we need additional heap objects, each one resides at the beginning of | |
167 the space it covers. */ | |
168 static struct heap heap_base; | |
169 | |
170 /* Head and tail of the list of heaps. */ | |
171 static heap_ptr first_heap, last_heap; | |
172 | |
173 /* These structures are allocated in the malloc arena. | |
174 The linked list is kept in order of increasing '.data' members. | |
175 The data blocks abut each other; if b->next is non-nil, then | |
176 b->data + b->size == b->next->data. | |
177 | |
178 An element with variable==NIL denotes a freed block, which has not yet | |
179 been collected. They may only appear while r_alloc_freeze > 0, and will be | |
180 freed when the arena is thawed. Currently, these blocs are not reusable, | |
181 while the arena is frozen. Very inefficient. */ | |
182 | |
183 typedef struct bp | |
184 { | |
185 struct bp *next; | |
186 struct bp *prev; | |
187 POINTER *variable; | |
188 POINTER data; | |
440 | 189 size_t size; |
428 | 190 POINTER new_data; /* temporarily used for relocation */ |
191 struct heap *heap; /* Heap this bloc is in. */ | |
192 } *bloc_ptr; | |
193 | |
194 #define NIL_BLOC ((bloc_ptr) 0) | |
195 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
196 | |
197 /* Head and tail of the list of relocatable blocs. */ | |
198 static bloc_ptr first_bloc, last_bloc; | |
199 | |
200 static int use_relocatable_buffers; | |
201 | |
202 /* If >0, no relocation whatsoever takes place. */ | |
203 static int r_alloc_freeze_level; | |
204 | |
205 /* Obtain SIZE bytes of space. If enough space is not presently available | |
206 in our process reserve, (i.e., (page_break_value - break_value)), | |
207 this means getting more page-aligned space from the system. | |
208 | |
209 Return non-zero if all went well, or zero if we couldn't allocate | |
210 the memory. */ | |
211 | |
212 /* Functions to get and return memory from the system. */ | |
213 | |
214 /* Find the heap that ADDRESS falls within. */ | |
215 | |
216 static heap_ptr | |
217 find_heap (POINTER address) | |
218 { | |
219 heap_ptr heap; | |
220 | |
221 for (heap = last_heap; heap; heap = heap->prev) | |
222 { | |
223 if (heap->start <= address && address <= heap->end) | |
224 return heap; | |
225 } | |
226 | |
227 return NIL_HEAP; | |
228 } | |
229 | |
230 /* Find SIZE bytes of space in a heap. | |
231 Try to get them at ADDRESS (which must fall within some heap's range) | |
232 if we can get that many within one heap. | |
233 | |
234 If enough space is not presently available in our reserve, this means | |
235 getting more page-aligned space from the system. If the returned space | |
236 is not contiguous to the last heap, allocate a new heap, and append it | |
237 | |
238 obtain does not try to keep track of whether space is in use | |
239 or not in use. It just returns the address of SIZE bytes that | |
240 fall within a single heap. If you call obtain twice in a row | |
241 with the same arguments, you typically get the same value. | |
242 to the heap list. It's the caller's responsibility to keep | |
243 track of what space is in use. | |
244 | |
245 Return the address of the space if all went well, or zero if we couldn't | |
246 allocate the memory. */ | |
247 | |
248 static POINTER | |
440 | 249 obtain (POINTER address, size_t size) |
428 | 250 { |
251 heap_ptr heap; | |
440 | 252 size_t already_available; |
428 | 253 |
254 /* Find the heap that ADDRESS falls within. */ | |
255 for (heap = last_heap; heap; heap = heap->prev) | |
256 { | |
257 if (heap->start <= address && address <= heap->end) | |
258 break; | |
259 } | |
260 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
261 assert (heap); |
428 | 262 |
263 /* If we can't fit SIZE bytes in that heap, | |
264 try successive later heaps. */ | |
265 while (heap && address + size > heap->end) | |
266 { | |
267 heap = heap->next; | |
268 if (heap == NIL_HEAP) | |
269 break; | |
270 address = heap->bloc_start; | |
271 } | |
272 | |
273 /* If we can't fit them within any existing heap, | |
274 get more space. */ | |
275 if (heap == NIL_HEAP) | |
276 { | |
3025 | 277 POINTER new_ = (*real_morecore)(0); |
440 | 278 size_t get; |
428 | 279 |
280 already_available = (char *)last_heap->end - (char *)address; | |
281 | |
3025 | 282 if (new_ != last_heap->end) |
428 | 283 { |
284 /* Someone else called sbrk. Make a new heap. */ | |
285 | |
3025 | 286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new_); |
428 | 287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); |
288 | |
3025 | 289 if ((*real_morecore) (bloc_start - new_) != new_) |
428 | 290 return 0; |
291 | |
3025 | 292 new_heap->start = new_; |
428 | 293 new_heap->end = bloc_start; |
294 new_heap->bloc_start = bloc_start; | |
295 new_heap->free = bloc_start; | |
296 new_heap->next = NIL_HEAP; | |
297 new_heap->prev = last_heap; | |
298 new_heap->first_bloc = NIL_BLOC; | |
299 new_heap->last_bloc = NIL_BLOC; | |
300 last_heap->next = new_heap; | |
301 last_heap = new_heap; | |
302 | |
303 address = bloc_start; | |
304 already_available = 0; | |
305 } | |
306 | |
307 /* Add space to the last heap (which we may have just created). | |
308 Get some extra, so we can come here less often. */ | |
309 | |
310 get = size + extra_bytes - already_available; | |
311 get = (char *) ROUNDUP ((char *)last_heap->end + get) | |
312 - (char *) last_heap->end; | |
313 | |
314 if ((*real_morecore) (get) != last_heap->end) | |
315 return 0; | |
316 | |
317 last_heap->end += get; | |
318 } | |
319 | |
320 return address; | |
321 } | |
322 | |
323 #if 0 | |
324 /* Obtain SIZE bytes of space and return a pointer to the new area. | |
325 If we could not allocate the space, return zero. */ | |
326 | |
327 static POINTER | |
440 | 328 get_more_space (size_t size) |
428 | 329 { |
330 POINTER ptr = break_value; | |
331 if (obtain (size)) | |
332 return ptr; | |
333 else | |
334 return 0; | |
335 } | |
336 #endif | |
337 | |
338 /* Note that SIZE bytes of space have been relinquished by the process. | |
339 If SIZE is more than a page, return the space to the system. */ | |
340 | |
341 static void | |
342 relinquish (void) | |
343 { | |
344 register heap_ptr h; | |
345 int excess = 0; | |
346 | |
347 /* Add the amount of space beyond break_value | |
348 in all heaps which have extend beyond break_value at all. */ | |
349 | |
350 for (h = last_heap; h && break_value < h->end; h = h->prev) | |
351 { | |
352 excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
353 ? h->bloc_start : break_value); | |
354 } | |
355 | |
356 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
357 { | |
358 /* Keep extra_bytes worth of empty space. | |
359 And don't free anything unless we can free at least extra_bytes. */ | |
360 excess -= extra_bytes; | |
361 | |
362 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) | |
363 { | |
364 /* This heap should have no blocs in it. */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
365 assert (last_heap->first_bloc == NIL_BLOC && |
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
366 last_heap->last_bloc == NIL_BLOC); |
428 | 367 |
368 /* Return the last heap, with its header, to the system. */ | |
369 excess = (char *)last_heap->end - (char *)last_heap->start; | |
370 last_heap = last_heap->prev; | |
371 last_heap->next = NIL_HEAP; | |
372 } | |
373 else | |
374 { | |
375 excess = (char *) last_heap->end | |
376 - (char *) ROUNDUP ((char *)last_heap->end - excess); | |
377 last_heap->end -= excess; | |
378 } | |
379 | |
380 if ((*real_morecore) (- excess) == 0) | |
2500 | 381 ABORT (); |
428 | 382 } |
383 } | |
384 | |
385 /* Return the total size in use by relocating allocator, | |
386 above where malloc gets space. */ | |
387 | |
388 long r_alloc_size_in_use (void); | |
389 long | |
440 | 390 r_alloc_size_in_use (void) |
428 | 391 { |
392 return break_value - virtual_break_value; | |
393 } | |
394 | |
395 /* The meat - allocating, freeing, and relocating blocs. */ | |
396 | |
397 | |
398 /* Find the bloc referenced by the address in PTR. Returns a pointer | |
399 to that block. */ | |
400 | |
401 static bloc_ptr | |
402 find_bloc (POINTER *ptr) | |
403 { | |
404 register bloc_ptr p = first_bloc; | |
405 | |
406 while (p != NIL_BLOC) | |
407 { | |
408 if (p->variable == ptr && p->data == *ptr) | |
409 return p; | |
410 | |
411 p = p->next; | |
412 } | |
413 | |
414 return p; | |
415 } | |
416 | |
417 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
418 Returns a pointer to the new bloc, or zero if we couldn't allocate | |
419 memory for the new block. */ | |
420 | |
421 static bloc_ptr | |
440 | 422 get_bloc (size_t size) |
428 | 423 { |
424 register bloc_ptr new_bloc; | |
425 register heap_ptr heap; | |
426 | |
427 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
428 || ! (new_bloc->data = obtain (break_value, size))) | |
429 { | |
430 if (new_bloc) | |
431 free (new_bloc); | |
432 | |
433 return 0; | |
434 } | |
435 | |
436 break_value = new_bloc->data + size; | |
437 | |
438 new_bloc->size = size; | |
439 new_bloc->next = NIL_BLOC; | |
440 new_bloc->variable = (POINTER *) NIL; | |
441 new_bloc->new_data = 0; | |
442 | |
443 /* Record in the heap that this space is in use. */ | |
444 heap = find_heap (new_bloc->data); | |
445 heap->free = break_value; | |
446 | |
447 /* Maintain the correspondence between heaps and blocs. */ | |
448 new_bloc->heap = heap; | |
449 heap->last_bloc = new_bloc; | |
450 if (heap->first_bloc == NIL_BLOC) | |
451 heap->first_bloc = new_bloc; | |
452 | |
453 /* Put this bloc on the doubly-linked list of blocs. */ | |
454 if (first_bloc) | |
455 { | |
456 new_bloc->prev = last_bloc; | |
457 last_bloc->next = new_bloc; | |
458 last_bloc = new_bloc; | |
459 } | |
460 else | |
461 { | |
462 first_bloc = last_bloc = new_bloc; | |
463 new_bloc->prev = NIL_BLOC; | |
464 } | |
465 | |
466 return new_bloc; | |
467 } | |
468 | |
469 /* Calculate new locations of blocs in the list beginning with BLOC, | |
470 relocating it to start at ADDRESS, in heap HEAP. If enough space is | |
471 not presently available in our reserve, call obtain for | |
472 more space. | |
473 | |
474 Store the new location of each bloc in its new_data field. | |
475 Do not touch the contents of blocs or break_value. */ | |
476 | |
477 static int | |
478 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) | |
479 { | |
480 register bloc_ptr b = bloc; | |
481 | |
482 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
483 assert (!r_alloc_freeze_level); |
428 | 484 |
485 while (b) | |
486 { | |
487 /* If bloc B won't fit within HEAP, | |
488 move to the next heap and try again. */ | |
489 while (heap && address + b->size > heap->end) | |
490 { | |
491 heap = heap->next; | |
492 if (heap == NIL_HEAP) | |
493 break; | |
494 address = heap->bloc_start; | |
495 } | |
496 | |
497 /* If BLOC won't fit in any heap, | |
498 get enough new space to hold BLOC and all following blocs. */ | |
499 if (heap == NIL_HEAP) | |
500 { | |
501 register bloc_ptr tb = b; | |
440 | 502 register size_t s = 0; |
428 | 503 |
504 /* Add up the size of all the following blocs. */ | |
505 while (tb != NIL_BLOC) | |
506 { | |
507 if (tb->variable) | |
508 s += tb->size; | |
509 | |
510 tb = tb->next; | |
511 } | |
512 | |
513 /* Get that space. */ | |
514 address = obtain (address, s); | |
515 if (address == 0) | |
516 return 0; | |
517 | |
518 heap = last_heap; | |
519 } | |
520 | |
521 /* Record the new address of this bloc | |
522 and update where the next bloc can start. */ | |
523 b->new_data = address; | |
524 if (b->variable) | |
525 address += b->size; | |
526 b = b->next; | |
527 } | |
528 | |
529 return 1; | |
530 } | |
531 | |
532 #if 0 /* unused */ | |
533 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. | |
534 This is necessary if we put the memory of space of BLOC | |
535 before that of BEFORE. */ | |
536 | |
537 static void | |
538 reorder_bloc (bloc_ptr bloc, bloc_ptr before) | |
539 { | |
540 bloc_ptr prev, next; | |
541 | |
542 /* Splice BLOC out from where it is. */ | |
543 prev = bloc->prev; | |
544 next = bloc->next; | |
545 | |
546 if (prev) | |
547 prev->next = next; | |
548 if (next) | |
549 next->prev = prev; | |
550 | |
551 /* Splice it in before BEFORE. */ | |
552 prev = before->prev; | |
553 | |
554 if (prev) | |
555 prev->next = bloc; | |
556 bloc->prev = prev; | |
557 | |
558 before->prev = bloc; | |
559 bloc->next = before; | |
560 } | |
561 #endif /* unused */ | |
562 | |
563 /* Update the records of which heaps contain which blocs, starting | |
564 with heap HEAP and bloc BLOC. */ | |
565 | |
566 static void | |
567 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) | |
568 { | |
569 register bloc_ptr b; | |
570 | |
571 /* Initialize HEAP's status to reflect blocs before BLOC. */ | |
572 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
573 { | |
574 /* The previous bloc is in HEAP. */ | |
575 heap->last_bloc = bloc->prev; | |
576 heap->free = bloc->prev->data + bloc->prev->size; | |
577 } | |
578 else | |
579 { | |
580 /* HEAP contains no blocs before BLOC. */ | |
581 heap->first_bloc = NIL_BLOC; | |
582 heap->last_bloc = NIL_BLOC; | |
583 heap->free = heap->bloc_start; | |
584 } | |
585 | |
586 /* Advance through blocs one by one. */ | |
587 for (b = bloc; b != NIL_BLOC; b = b->next) | |
588 { | |
589 /* Advance through heaps, marking them empty, | |
590 till we get to the one that B is in. */ | |
591 while (heap) | |
592 { | |
593 if (heap->bloc_start <= b->data && b->data <= heap->end) | |
594 break; | |
595 heap = heap->next; | |
596 /* We know HEAP is not null now, | |
597 because there has to be space for bloc B. */ | |
598 heap->first_bloc = NIL_BLOC; | |
599 heap->last_bloc = NIL_BLOC; | |
600 heap->free = heap->bloc_start; | |
601 } | |
602 | |
603 /* Update HEAP's status for bloc B. */ | |
604 heap->free = b->data + b->size; | |
605 heap->last_bloc = b; | |
606 if (heap->first_bloc == NIL_BLOC) | |
607 heap->first_bloc = b; | |
608 | |
609 /* Record that B is in HEAP. */ | |
610 b->heap = heap; | |
611 } | |
612 | |
613 /* If there are any remaining heaps and no blocs left, | |
614 mark those heaps as empty. */ | |
615 heap = heap->next; | |
616 while (heap) | |
617 { | |
618 heap->first_bloc = NIL_BLOC; | |
619 heap->last_bloc = NIL_BLOC; | |
620 heap->free = heap->bloc_start; | |
621 heap = heap->next; | |
622 } | |
623 } | |
624 | |
625 /* Resize BLOC to SIZE bytes. This relocates the blocs | |
626 that come after BLOC in memory. */ | |
627 | |
628 static int | |
440 | 629 resize_bloc (bloc_ptr bloc, size_t size) |
428 | 630 { |
631 register bloc_ptr b; | |
632 heap_ptr heap; | |
633 POINTER address; | |
440 | 634 size_t old_size; |
428 | 635 |
636 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
637 assert (!r_alloc_freeze_level); |
428 | 638 |
639 if (bloc == NIL_BLOC || size == bloc->size) | |
640 return 1; | |
641 | |
642 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
643 { | |
644 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
645 break; | |
646 } | |
647 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
648 assert (heap != NIL_HEAP); |
428 | 649 |
650 old_size = bloc->size; | |
651 bloc->size = size; | |
652 | |
653 /* Note that bloc could be moved into the previous heap. */ | |
654 address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
655 : first_heap->bloc_start); | |
656 while (heap) | |
657 { | |
658 if (heap->bloc_start <= address && address <= heap->end) | |
659 break; | |
660 heap = heap->prev; | |
661 } | |
662 | |
663 if (! relocate_blocs (bloc, heap, address)) | |
664 { | |
665 bloc->size = old_size; | |
666 return 0; | |
667 } | |
668 | |
669 if (size > old_size) | |
670 { | |
671 for (b = last_bloc; b != bloc; b = b->prev) | |
672 { | |
673 if (!b->variable) | |
674 { | |
675 b->size = 0; | |
676 b->data = b->new_data; | |
677 } | |
678 else | |
679 { | |
440 | 680 memmove (b->new_data, b->data, b->size); |
428 | 681 *b->variable = b->data = b->new_data; |
682 } | |
683 } | |
684 if (!bloc->variable) | |
685 { | |
686 bloc->size = 0; | |
687 bloc->data = bloc->new_data; | |
688 } | |
689 else | |
690 { | |
440 | 691 memmove (bloc->new_data, bloc->data, old_size); |
428 | 692 memset (bloc->new_data + old_size, 0, size - old_size); |
693 *bloc->variable = bloc->data = bloc->new_data; | |
694 } | |
695 } | |
696 else | |
697 { | |
698 for (b = bloc; b != NIL_BLOC; b = b->next) | |
699 { | |
700 if (!b->variable) | |
701 { | |
702 b->size = 0; | |
703 b->data = b->new_data; | |
704 } | |
705 else | |
706 { | |
440 | 707 memmove (b->new_data, b->data, b->size); |
428 | 708 *b->variable = b->data = b->new_data; |
709 } | |
710 } | |
711 } | |
712 | |
713 update_heap_bloc_correspondence (bloc, heap); | |
714 | |
715 break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
716 : first_heap->bloc_start); | |
717 return 1; | |
718 } | |
719 | |
720 /* Free BLOC from the chain of blocs, relocating any blocs above it | |
721 and returning BLOC->size bytes to the free area. */ | |
722 | |
723 static void | |
724 free_bloc (bloc_ptr bloc) | |
725 { | |
726 heap_ptr heap = bloc->heap; | |
727 | |
728 if (r_alloc_freeze_level) | |
729 { | |
730 bloc->variable = (POINTER *) NIL; | |
731 return; | |
732 } | |
733 | |
734 resize_bloc (bloc, 0); | |
735 | |
736 if (bloc == first_bloc && bloc == last_bloc) | |
737 { | |
738 first_bloc = last_bloc = NIL_BLOC; | |
739 } | |
740 else if (bloc == last_bloc) | |
741 { | |
742 last_bloc = bloc->prev; | |
743 last_bloc->next = NIL_BLOC; | |
744 } | |
745 else if (bloc == first_bloc) | |
746 { | |
747 first_bloc = bloc->next; | |
748 first_bloc->prev = NIL_BLOC; | |
749 } | |
750 else | |
751 { | |
752 bloc->next->prev = bloc->prev; | |
753 bloc->prev->next = bloc->next; | |
754 } | |
755 | |
756 /* Update the records of which blocs are in HEAP. */ | |
757 if (heap->first_bloc == bloc) | |
758 { | |
759 if (bloc->next != 0 && bloc->next->heap == heap) | |
760 heap->first_bloc = bloc->next; | |
761 else | |
762 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
763 } | |
764 if (heap->last_bloc == bloc) | |
765 { | |
766 if (bloc->prev != 0 && bloc->prev->heap == heap) | |
767 heap->last_bloc = bloc->prev; | |
768 else | |
769 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
770 } | |
771 | |
772 relinquish (); | |
773 free (bloc); | |
774 } | |
775 | |
776 /* Interface routines. */ | |
777 | |
778 /* Obtain SIZE bytes of storage from the free pool, or the system, as | |
779 necessary. If relocatable blocs are in use, this means relocating | |
780 them. This function gets plugged into the GNU malloc's __morecore | |
781 hook. | |
782 | |
783 We provide hysteresis, never relocating by less than extra_bytes. | |
784 | |
785 If we're out of memory, we should return zero, to imitate the other | |
786 __morecore hook values - in particular, __default_morecore in the | |
787 GNU malloc package. */ | |
788 | |
789 POINTER r_alloc_sbrk (ptrdiff_t size); | |
790 POINTER | |
791 r_alloc_sbrk (ptrdiff_t size) | |
792 { | |
793 register bloc_ptr b; | |
794 POINTER address; | |
795 | |
796 if (! r_alloc_initialized) | |
797 init_ralloc (); | |
798 | |
799 if (! use_relocatable_buffers) | |
800 return (*real_morecore) (size); | |
801 | |
802 if (size == 0) | |
803 return virtual_break_value; | |
804 | |
805 if (size > 0) | |
806 { | |
807 /* Allocate a page-aligned space. GNU malloc would reclaim an | |
808 extra space if we passed an unaligned one. But we could | |
809 not always find a space which is contiguous to the previous. */ | |
810 POINTER new_bloc_start; | |
811 heap_ptr h = first_heap; | |
440 | 812 size_t get = ROUNDUP (size); |
428 | 813 |
814 address = (POINTER) ROUNDUP (virtual_break_value); | |
815 | |
816 /* Search the list upward for a heap which is large enough. */ | |
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | |
818 { | |
819 h = h->next; | |
820 if (h == NIL_HEAP) | |
821 break; | |
822 address = (POINTER) ROUNDUP (h->start); | |
823 } | |
824 | |
825 /* If not found, obtain more space. */ | |
826 if (h == NIL_HEAP) | |
827 { | |
828 get += extra_bytes + page_size; | |
829 | |
830 if (! obtain (address, get)) | |
831 return 0; | |
832 | |
833 if (first_heap == last_heap) | |
834 address = (POINTER) ROUNDUP (virtual_break_value); | |
835 else | |
836 address = (POINTER) ROUNDUP (last_heap->start); | |
837 h = last_heap; | |
838 } | |
839 | |
840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); | |
841 | |
842 if (first_heap->bloc_start < new_bloc_start) | |
843 { | |
844 /* This is no clean solution - no idea how to do it better. */ | |
845 if (r_alloc_freeze_level) | |
846 return NIL; | |
847 | |
848 /* There is a bug here: if the above obtain call succeeded, but the | |
849 relocate_blocs call below does not succeed, we need to free | |
850 the memory that we got with obtain. */ | |
851 | |
852 /* Move all blocs upward. */ | |
853 if (! relocate_blocs (first_bloc, h, new_bloc_start)) | |
854 return 0; | |
855 | |
856 /* Note that (POINTER)(h+1) <= new_bloc_start since | |
857 get >= page_size, so the following does not destroy the heap | |
858 header. */ | |
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev) | |
860 { | |
440 | 861 memmove (b->new_data, b->data, b->size); |
428 | 862 *b->variable = b->data = b->new_data; |
863 } | |
864 | |
865 h->bloc_start = new_bloc_start; | |
866 | |
867 update_heap_bloc_correspondence (first_bloc, h); | |
868 } | |
869 if (h != first_heap) | |
870 { | |
871 /* Give up managing heaps below the one the new | |
872 virtual_break_value points to. */ | |
873 first_heap->prev = NIL_HEAP; | |
874 first_heap->next = h->next; | |
875 first_heap->start = h->start; | |
876 first_heap->end = h->end; | |
877 first_heap->free = h->free; | |
878 first_heap->first_bloc = h->first_bloc; | |
879 first_heap->last_bloc = h->last_bloc; | |
880 first_heap->bloc_start = h->bloc_start; | |
881 | |
882 if (first_heap->next) | |
883 first_heap->next->prev = first_heap; | |
884 else | |
885 last_heap = first_heap; | |
886 } | |
887 | |
888 memset (address, 0, size); | |
889 } | |
890 else /* size < 0 */ | |
891 { | |
440 | 892 size_t excess = (char *)first_heap->bloc_start |
428 | 893 - ((char *)virtual_break_value + size); |
894 | |
895 address = virtual_break_value; | |
896 | |
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
898 { | |
899 excess -= extra_bytes; | |
900 first_heap->bloc_start | |
901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); | |
902 | |
903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); | |
904 | |
905 for (b = first_bloc; b != NIL_BLOC; b = b->next) | |
906 { | |
440 | 907 memmove (b->new_data, b->data, b->size); |
428 | 908 *b->variable = b->data = b->new_data; |
909 } | |
910 } | |
911 | |
912 if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
913 { | |
914 /* We found an additional space below the first heap */ | |
915 first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
916 } | |
917 } | |
918 | |
919 virtual_break_value = (POINTER) ((char *)address + size); | |
920 break_value = (last_bloc | |
921 ? last_bloc->data + last_bloc->size | |
922 : first_heap->bloc_start); | |
923 if (size < 0) | |
924 relinquish (); | |
925 | |
926 return address; | |
927 } | |
928 | |
929 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
930 the data is returned in *PTR. PTR is thus the address of some variable | |
931 which will use the data area. | |
932 | |
933 The allocation of 0 bytes is valid. | |
934 In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
935 before allocating a new area. Not yet done. | |
936 | |
937 If we can't allocate the necessary memory, set *PTR to zero, and | |
938 return zero. */ | |
939 | |
440 | 940 POINTER r_alloc (POINTER *ptr, size_t size); |
428 | 941 POINTER |
440 | 942 r_alloc (POINTER *ptr, size_t size) |
428 | 943 { |
944 bloc_ptr new_bloc; | |
945 | |
1333 | 946 REGEX_MALLOC_CHECK (); |
947 | |
428 | 948 if (! r_alloc_initialized) |
949 init_ralloc (); | |
950 | |
951 new_bloc = get_bloc (size); | |
952 if (new_bloc) | |
953 { | |
954 new_bloc->variable = ptr; | |
955 *ptr = new_bloc->data; | |
956 } | |
957 else | |
958 *ptr = 0; | |
959 | |
960 return *ptr; | |
961 } | |
962 | |
963 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
964 Store 0 in *PTR to show there's no block allocated. */ | |
965 | |
966 void r_alloc_free (POINTER *ptr); | |
967 void | |
968 r_alloc_free (POINTER *ptr) | |
969 { | |
970 register bloc_ptr dead_bloc; | |
971 | |
1333 | 972 REGEX_MALLOC_CHECK (); |
973 | |
428 | 974 if (! r_alloc_initialized) |
975 init_ralloc (); | |
976 | |
977 dead_bloc = find_bloc (ptr); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
978 assert (dead_bloc != NIL_BLOC); |
428 | 979 |
980 free_bloc (dead_bloc); | |
981 *ptr = 0; | |
982 | |
983 #ifdef emacs | |
3263 | 984 #ifndef NEW_GC |
428 | 985 refill_memory_reserve (); |
3263 | 986 #endif /* not NEW_GC */ |
428 | 987 #endif |
988 } | |
989 | |
990 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
991 Do this by shifting all blocks above this one up in memory, unless | |
992 SIZE is less than or equal to the current bloc size, in which case | |
993 do nothing. | |
994 | |
995 In case r_alloc_freeze is set, a new bloc is allocated, and the | |
996 memory copied to it. Not very efficient. We could traverse the | |
997 bloc_list for a best fit of free blocs first. | |
998 | |
999 Change *PTR to reflect the new bloc, and return this value. | |
1000 | |
1001 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1002 return zero. */ | |
1003 | |
440 | 1004 POINTER r_re_alloc (POINTER *ptr, size_t size); |
428 | 1005 POINTER |
440 | 1006 r_re_alloc (POINTER *ptr, size_t size) |
428 | 1007 { |
1008 register bloc_ptr bloc; | |
1009 | |
1333 | 1010 REGEX_MALLOC_CHECK (); |
1011 | |
428 | 1012 if (! r_alloc_initialized) |
1013 init_ralloc (); | |
1014 | |
1015 if (!*ptr) | |
1016 return r_alloc (ptr, size); | |
1017 if (!size) | |
1018 { | |
1019 r_alloc_free (ptr); | |
1020 return r_alloc (ptr, 0); | |
1021 } | |
1022 | |
1023 bloc = find_bloc (ptr); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1024 assert (bloc != NIL_BLOC); |
428 | 1025 |
1026 if (size < bloc->size) | |
1027 { | |
1028 /* Wouldn't it be useful to actually resize the bloc here? */ | |
1029 /* I think so too, but not if it's too expensive... */ | |
1030 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | |
1031 && r_alloc_freeze_level == 0) | |
1032 { | |
1033 resize_bloc (bloc, MEM_ROUNDUP (size)); | |
1034 /* Never mind if this fails, just do nothing... */ | |
1035 /* It *should* be infallible! */ | |
1036 } | |
1037 } | |
1038 else if (size > bloc->size) | |
1039 { | |
1040 if (r_alloc_freeze_level) | |
1041 { | |
1042 bloc_ptr new_bloc; | |
1043 new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1044 if (new_bloc) | |
1045 { | |
1046 new_bloc->variable = ptr; | |
1047 *ptr = new_bloc->data; | |
1048 bloc->variable = (POINTER *) NIL; | |
1049 } | |
1050 else | |
1051 return NIL; | |
1052 } | |
1053 else | |
1054 { | |
1055 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1056 return NIL; | |
1057 } | |
1058 } | |
1059 return *ptr; | |
1060 } | |
1061 | |
1062 /* Disable relocations, after making room for at least SIZE bytes | |
1063 of non-relocatable heap if possible. The relocatable blocs are | |
1064 guaranteed to hold still until thawed, even if this means that | |
1065 malloc must return a null pointer. */ | |
1066 | |
1067 void r_alloc_freeze (long size); | |
1068 void | |
1069 r_alloc_freeze (long size) | |
1070 { | |
1071 if (! r_alloc_initialized) | |
1072 init_ralloc (); | |
1073 | |
1074 /* If already frozen, we can't make any more room, so don't try. */ | |
1075 if (r_alloc_freeze_level > 0) | |
1076 size = 0; | |
1077 /* If we can't get the amount requested, half is better than nothing. */ | |
1078 while (size > 0 && r_alloc_sbrk (size) == 0) | |
1079 size /= 2; | |
1080 ++r_alloc_freeze_level; | |
1081 if (size > 0) | |
1082 r_alloc_sbrk (-size); | |
1083 } | |
1084 | |
1085 void r_alloc_thaw (void); | |
1086 void | |
1087 r_alloc_thaw (void) | |
1088 { | |
1089 | |
1090 if (! r_alloc_initialized) | |
1091 init_ralloc (); | |
1092 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1093 assert (--r_alloc_freeze_level >= 0); |
428 | 1094 |
1095 /* This frees all unused blocs. It is not too inefficient, as the resize | |
440 | 1096 and memmove is done only once. Afterwards, all unreferenced blocs are |
428 | 1097 already shrunk to zero size. */ |
1098 if (!r_alloc_freeze_level) | |
1099 { | |
1100 bloc_ptr *b = &first_bloc; | |
1101 while (*b) | |
1102 if (!(*b)->variable) | |
1103 free_bloc (*b); | |
1104 else | |
1105 b = &(*b)->next; | |
1106 } | |
1107 } | |
1108 | |
1109 | |
1110 /* The hook `malloc' uses for the function which gets more space | |
1111 from the system. */ | |
1112 #ifndef DOUG_LEA_MALLOC | |
1113 extern POINTER (*__morecore) (ptrdiff_t size); | |
1114 #endif | |
1115 | |
1116 /* Initialize various things for memory allocation. */ | |
1117 | |
1118 void | |
1119 init_ralloc (void) | |
1120 { | |
1121 if (r_alloc_initialized) | |
1122 return; | |
1123 | |
1124 r_alloc_initialized = 1; | |
1125 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1126 __morecore = | |
1799 | 1127 #ifdef TYPEOF |
1128 (TYPEOF (__morecore)) | |
428 | 1129 #endif |
1130 r_alloc_sbrk; | |
1131 | |
1132 first_heap = last_heap = &heap_base; | |
1133 first_heap->next = first_heap->prev = NIL_HEAP; | |
1134 first_heap->start = first_heap->bloc_start | |
1135 = virtual_break_value = break_value = (*real_morecore) (0); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1136 assert (break_value != NIL); |
428 | 1137 |
1138 page_size = PAGE; | |
1139 extra_bytes = ROUNDUP (50000); | |
1140 | |
1141 #ifdef DOUG_LEA_MALLOC | |
1142 mallopt (M_TOP_PAD, 64 * 4096); | |
1143 #else | |
1144 #if 0 /* Hasn't been synched yet */ | |
1145 /* Give GNU malloc's morecore some hysteresis | |
1146 so that we move all the relocatable blocks much less often. */ | |
1147 __malloc_extra_blocks = 64; | |
1148 #endif | |
1149 #endif | |
1150 | |
1151 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
1152 | |
1153 /* The extra call to real_morecore guarantees that the end of the | |
1154 address space is a multiple of page_size, even if page_size is | |
1155 not really the page size of the system running the binary in | |
1156 which page_size is stored. This allows a binary to be built on a | |
1157 system with one page size and run on a system with a smaller page | |
1158 size. */ | |
1159 (*real_morecore) (first_heap->end - first_heap->start); | |
1160 | |
1161 /* Clear the rest of the last page; this memory is in our address space | |
1162 even though it is after the sbrk value. */ | |
1163 /* Doubly true, with the additional call that explicitly adds the | |
1164 rest of that page to the address space. */ | |
1165 memset (first_heap->start, 0, first_heap->end - first_heap->start); | |
1166 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
1167 use_relocatable_buffers = 1; | |
1168 } | |
1169 | |
1170 #if defined (emacs) && defined (DOUG_LEA_MALLOC) | |
1171 | |
1172 /* Reinitialize the morecore hook variables after restarting a dumped | |
1173 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | |
1174 void r_alloc_reinit (void); | |
1175 void | |
1176 r_alloc_reinit (void) | |
1177 { | |
1178 /* Only do this if the hook has been reset, so that we don't get an | |
1179 infinite loop, in case Emacs was linked statically. */ | |
1180 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) | |
1181 { | |
1182 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1183 __morecore = | |
1799 | 1184 #ifdef TYPEOF |
1185 (TYPEOF (__morecore)) | |
428 | 1186 #endif |
1187 r_alloc_sbrk; | |
1188 } | |
1189 } | |
1190 #if 0 | |
1191 #ifdef DEBUG | |
1192 | |
1193 void | |
1194 r_alloc_check (void) | |
1195 { | |
1196 int found = 0; | |
1197 heap_ptr h, ph = 0; | |
1198 bloc_ptr b, pb = 0; | |
1199 | |
1200 if (!r_alloc_initialized) | |
1201 return; | |
1202 | |
1203 assert (first_heap); | |
1204 assert (last_heap->end <= (POINTER) sbrk (0)); | |
1205 assert ((POINTER) first_heap < first_heap->start); | |
1206 assert (first_heap->start <= virtual_break_value); | |
1207 assert (virtual_break_value <= first_heap->end); | |
1208 | |
1209 for (h = first_heap; h; h = h->next) | |
1210 { | |
1211 assert (h->prev == ph); | |
1212 assert ((POINTER) ROUNDUP (h->end) == h->end); | |
1213 #if 0 /* ??? The code in ralloc.c does not really try to ensure | |
1214 the heap start has any sort of alignment. | |
1215 Perhaps it should. */ | |
1216 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); | |
1217 #endif | |
1218 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); | |
1219 assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1220 | |
1221 if (ph) | |
1222 { | |
1223 assert (ph->end < h->start); | |
1224 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1225 } | |
1226 | |
1227 if (h->bloc_start <= break_value && break_value <= h->end) | |
1228 found = 1; | |
1229 | |
1230 ph = h; | |
1231 } | |
1232 | |
1233 assert (found); | |
1234 assert (last_heap == ph); | |
1235 | |
1236 for (b = first_bloc; b; b = b->next) | |
1237 { | |
1238 assert (b->prev == pb); | |
1239 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
440 | 1240 assert ((size_t) MEM_ROUNDUP (b->size) == b->size); |
428 | 1241 |
1242 ph = 0; | |
1243 for (h = first_heap; h; h = h->next) | |
1244 { | |
1245 if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1246 break; | |
1247 ph = h; | |
1248 } | |
1249 | |
1250 assert (h); | |
1251 | |
1252 if (pb && pb->data + pb->size != b->data) | |
1253 { | |
1254 assert (ph && b->data == h->bloc_start); | |
1255 while (ph) | |
1256 { | |
1257 if (ph->bloc_start <= pb->data | |
1258 && pb->data + pb->size <= ph->end) | |
1259 { | |
1260 assert (pb->data + pb->size + b->size > ph->end); | |
1261 break; | |
1262 } | |
1263 else | |
1264 { | |
1265 assert (ph->bloc_start + b->size > ph->end); | |
1266 } | |
1267 ph = ph->prev; | |
1268 } | |
1269 } | |
1270 pb = b; | |
1271 } | |
1272 | |
1273 assert (last_bloc == pb); | |
1274 | |
1275 if (last_bloc) | |
1276 assert (last_bloc->data + last_bloc->size == break_value); | |
1277 else | |
1278 assert (first_heap->bloc_start == break_value); | |
1279 } | |
1280 #endif /* DEBUG */ | |
1281 #endif /* 0 */ | |
1282 | |
1283 #endif | |
1284 | |
1285 #else /* HAVE_MMAP */ | |
1286 | |
1287 /* | |
1288 A relocating allocator built using the mmap(2) facility available | |
1289 in some OSes. Based on another version written by Paul Flinders, | |
1290 from which code (and comments) are snarfed. | |
1291 | |
1292 The OS should support mmap() with MAP_ANONYMOUS attribute, or have | |
1293 /dev/zero. It should support private memory mapping. | |
1294 | |
1295 Paul Flinders wrote a version which works well for systems that | |
1296 allow callers to specify (virtual) addresses to mmap(). | |
1297 Unfortunately, such a scheme doesn't work for certain systems like | |
1298 HP-UX that have a system-wide virtual->real address map, and | |
1299 consequently impose restrictions on the virtual address values | |
1300 permitted. | |
1301 | |
1302 NB: The mapping scheme in HP-UX is motivated by the inverted page | |
1303 table design in some HP processors. | |
1304 | |
1305 This alternate implementation allows for the addresses to be | |
1306 optionally chosen by the system. Fortunately, buffer allocation | |
1307 doesn't insist upon contiguous memory which Flinders' scheme | |
1308 provides, and this one doesn't. | |
1309 | |
1310 We don't really provide for hysteresis here, but add some metering | |
1311 to monitor how poorly the allocator actually works. See the | |
1312 documentation for `mmap-hysteresis'. | |
1313 | |
1314 This implementation actually cycles through the blocks allocated | |
1315 via mmap() and only sends it to free() if it wasn't one of them. | |
1316 Unfortunately, this is O(n) in the number of mmapped blocks. (Not | |
1317 really, as we have a hash table which tries to reduce the cost.) | |
1318 Also, this dereferences the pointer passed, so it would cause a | |
1319 segfault if garbage was passed to it. */ | |
1320 | |
1321 #include <fcntl.h> | |
1322 #include <sys/mman.h> | |
1323 #include <stdio.h> | |
1324 | |
1325 typedef void *VM_ADDR; /* VM addresses */ | |
442 | 1326 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ |
428 | 1327 |
1328 /* Configuration for relocating allocator. */ | |
1329 | |
1330 /* #define MMAP_GENERATE_ADDRESSES */ | |
1331 /* Define this if you want Emacs to manage the address table. | |
1332 It is not recommended unless you have major problems with the | |
1333 default scheme, which allows the OS to pick addresses. */ | |
1334 | |
1335 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be | |
1336 discarded while computing the hash, as they're always zero. The | |
1337 default is appropriate for a page size of 4096 bytes. */ | |
1338 | |
1339 #define USELESS_LOWER_ADDRESS_BITS 12 | |
1340 | |
1341 | |
1342 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */ | |
1343 | |
1344 #define MHASH_PRIME 89 | |
1345 | |
1346 | |
1347 /* Whether we want to enable metering of some ralloc performance. | |
1348 This incurs a constant penalty for each mmap operation. */ | |
1349 | |
1350 #define MMAP_METERING | |
1351 | |
1352 | |
1353 /* Rename the following to protect against a some smartness elsewhere. | |
1354 We need access to the allocator used for non-mmap allocation | |
1355 elsewhere, in case we get passed a handle that we didn't allocate | |
1356 ourselves. Currently, this default allocator is also used to | |
1357 maintain local structures for relocatable blocks. */ | |
1358 | |
1359 #define UNDERLYING_MALLOC malloc | |
1360 #define UNDERLYING_FREE free | |
1361 #define UNDERLYING_REALLOC realloc | |
1362 | |
1363 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES | |
1364 is defined, and MAP_VARIABLE otherwise. Some losing systems don't | |
1365 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */ | |
1366 | |
1367 #ifdef MMAP_GENERATE_ADDRESSES | |
1368 # ifdef MAP_FIXED | |
1369 # define MAP_ADDRCHOICE_FLAG MAP_FIXED | |
1370 # endif | |
1371 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1372 # ifdef MAP_VARIABLE | |
1373 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE | |
1374 # endif | |
1375 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1376 | |
1377 /* Default case. */ | |
1378 #ifndef MAP_ADDRCHOICE_FLAG | |
1379 # define MAP_ADDRCHOICE_FLAG 0 | |
1380 #endif /* MAP_ADDRCHOICE_FLAG */ | |
1381 | |
1382 #ifdef MAP_ANONYMOUS | |
1383 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS) | |
1384 #else | |
1385 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG) | |
1386 #endif /* MAP_ANONYMOUS */ | |
1387 | |
1388 | |
1389 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For | |
1390 Emacs's sake, please do not make this local to malloc_init; on some | |
1391 machines, the dumping procedure makes all static variables | |
1392 read-only. On these machines, the word static is #defined to be | |
1393 the empty string, meaning that r_alloc_initialized becomes an | |
1394 automatic variable, and loses its value each time Emacs is started up. | |
1395 | |
1396 If we're using mmap this flag has three possible values | |
1397 0 - initial value | |
1398 1 - Normal value when running temacs. In this case buffers | |
1399 are allocated using malloc so that any data that they | |
1400 contain becomes part of the undumped executable. | |
1401 2 - Normal value when running emacs */ | |
1402 static int r_alloc_initialized = 0; | |
1403 | |
1404 /* (ptf): Macros for rounding. Note that rounding to any value is possible | |
1405 by changing the definition of PAGE. */ | |
1406 #define PAGE (getpagesize ()) | |
1407 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size) | |
1408 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size) | |
1409 | |
1410 | |
1411 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS | |
1412 points to a file descriptor opened on /dev/zero */ | |
1413 | |
1414 static int DEV_ZERO_FD = -1; | |
1415 | |
1416 | |
1417 /* We actually need a data structure that can be usefully structured | |
1418 based on the VM address, and allows an ~O(1) lookup on an arbitrary | |
1419 address, i.e. a hash table. Maybe the XEmacs hash table can be | |
1420 coaxed enough. At the moment, we use lookup on a hash table to | |
1421 decide whether to do an O(n) search on the malloced block list. | |
1422 Addresses are hashed to a bucket modulo MHASH_PRIME. */ | |
1423 | |
1424 | |
1425 /* We settle for a standard doubly-linked-list. The dynarr type isn't | |
1426 very amenable to deletion of items in the middle, so we conjure up | |
1427 yet another stupid datastructure. The structure is maintained as a | |
1428 ring, and the singleton ring has the sole element as its left and | |
1429 right neighbours. */ | |
1430 | |
1431 static void init_MHASH_table (void); /* Forward reference */ | |
1432 | |
1433 typedef struct alloc_dll | |
1434 { | |
1435 size_t size; /* #bytes currently in use */ | |
1436 size_t space_for; /* #bytes we really have */ | |
1437 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */ | |
1438 VM_ADDR vm_addr; /* VM address returned by mmap */ | |
1439 struct alloc_dll *left; /* Left link in circular doubly linked list */ | |
1440 struct alloc_dll *right; | |
1441 } *MMAP_HANDLE; | |
1442 | |
1443 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ | |
1444 static size_t page_size = 0; /* Size of VM pages */ | |
458 | 1445 static Fixnum mmap_hysteresis; /* Logically a "size_t" */ |
428 | 1446 |
1447 /* Get a new handle for a fresh block. */ | |
1448 static MMAP_HANDLE | |
1449 new_mmap_handle (size_t nsiz) | |
1450 { | |
1451 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll)); | |
1452 if ( h == 0) return 0; | |
1453 h->size = nsiz; | |
1454 if (mmap_start == 0) | |
1455 { | |
1456 init_MHASH_table (); | |
1457 mmap_start = h; mmap_start->left = h; mmap_start->right = h; | |
1458 } | |
1459 { | |
1460 MMAP_HANDLE prev = mmap_start->left; | |
1461 MMAP_HANDLE nex = mmap_start; | |
1462 | |
1463 /* Four pointers need fixing. */ | |
1464 h->right = nex; | |
1465 h->left = prev; | |
1466 prev->right = h; | |
1467 nex->left = h; | |
1468 } | |
1469 return h; | |
1470 } | |
1471 | |
1472 /* Find a handle given the aliased address using linear search. */ | |
1473 static MMAP_HANDLE | |
1474 find_mmap_handle_lsearch (POINTER *alias) | |
1475 { | |
1476 MMAP_HANDLE h = mmap_start; | |
1477 if (h == 0) return 0; | |
1478 do { | |
1479 if (h->aliased_address == alias && *alias == h->vm_addr) | |
1480 return h; | |
1481 h = h->right; | |
1482 } while( h != mmap_start ); | |
1483 return 0; /* Bogus alias passed. */ | |
1484 } | |
1485 | |
1486 /* Free a handle. */ | |
1487 static void | |
1488 free_mmap_handle (MMAP_HANDLE h) | |
1489 { | |
1490 MMAP_HANDLE prev = h->left; | |
1491 MMAP_HANDLE nex = h->right; | |
1492 if (prev == h || nex == h) /* In fact, this should be && */ | |
1493 { /* We're the singleton dll */ | |
1494 UNDERLYING_FREE( h ); /* Free the sole item */ | |
1495 mmap_start = 0; return; | |
1496 } | |
1497 else if (h == mmap_start) | |
1498 { | |
1499 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ | |
1500 } | |
1501 prev->right = nex; | |
1502 nex->left = prev; | |
1503 UNDERLYING_FREE( h ); | |
1504 } | |
1505 | |
1506 /* A simple hash table to speed up the inverted lookup of | |
1507 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a | |
1508 particular bucket. We invalidate a hash table entry during block | |
1509 deletion if the hash has cached the deleted block's address. */ | |
1510 | |
1511 /* Simple hash check. */ | |
1512 struct { | |
1513 int n_hits; /* How many addresses map to this? */ | |
1514 MMAP_HANDLE handle; /* What is the current handle? */ | |
1515 VM_ADDR addr; /* What is its VM address? */ | |
1516 } MHASH_HITS[ MHASH_PRIME ]; | |
1517 | |
1518 static void | |
1519 init_MHASH_table (void) | |
1520 { | |
1521 int i = 0; | |
1522 for (; i < MHASH_PRIME; i++) | |
1523 { | |
1524 MHASH_HITS[i].n_hits = 0; | |
1525 MHASH_HITS[i].addr = 0; | |
1526 MHASH_HITS[i].handle = 0; | |
1527 } | |
1528 } | |
1529 | |
1530 /* Compute the hash value for an address. */ | |
1531 static int | |
1532 MHASH (VM_ADDR addr) | |
1533 { | |
1534 #if (LONGBITS == 64) | |
1535 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1536 #else | |
1537 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1538 #endif | |
1539 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve | |
1540 when converted to signed ints */ | |
1541 return ((hval >= 0) ? hval : MHASH_PRIME + hval); | |
1542 } | |
1543 | |
1544 /* Add a VM address with its corresponding handle to the table. */ | |
1545 static void | |
1546 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h) | |
1547 { | |
1548 int kVal = MHASH( addr ); | |
1549 if (MHASH_HITS[kVal].n_hits++ == 0) | |
1550 { /* Only overwrite the table if there were no hits so far. */ | |
1551 MHASH_HITS[kVal].addr = addr; | |
1552 MHASH_HITS[kVal].handle = h; | |
1553 } | |
1554 } | |
1555 | |
1556 /* Delete a VM address entry from the hash table. */ | |
1557 static void | |
1558 MHASH_DEL (VM_ADDR addr) | |
1559 { | |
1560 int kVal = MHASH( addr ); | |
1561 MHASH_HITS[kVal].n_hits--; | |
1562 if (addr == MHASH_HITS[kVal].addr) | |
1563 { | |
1564 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */ | |
1565 MHASH_HITS[kVal].handle = 0; | |
1566 } | |
1567 } | |
1568 | |
1569 /* End of hash buckets */ | |
1570 | |
1571 /* Metering malloc performance. */ | |
1572 #ifdef MMAP_METERING | |
1573 /* If we're metering, we introduce some extra symbols to aid the noble | |
1574 cause of bloating XEmacs core size. */ | |
1575 | |
1576 static Lisp_Object Qmmap_times_mapped; | |
1577 static Lisp_Object Qmmap_pages_mapped; | |
1578 static Lisp_Object Qmmap_times_unmapped; | |
1579 static Lisp_Object Qmmap_times_remapped; | |
1580 static Lisp_Object Qmmap_didnt_copy; | |
1581 static Lisp_Object Qmmap_pages_copied; | |
1582 static Lisp_Object Qmmap_average_bumpval; | |
1583 static Lisp_Object Qmmap_wastage; | |
1584 static Lisp_Object Qmmap_live_pages; | |
1585 static Lisp_Object Qmmap_addr_looked_up; | |
1586 static Lisp_Object Qmmap_hash_worked; | |
1587 static Lisp_Object Qmmap_addrlist_size; | |
1588 | |
1589 #define M_Map 0 /* How many times allocated? */ | |
1590 #define M_Pages_Map 1 /* How many pages allocated? */ | |
1591 #define M_Unmap 2 /* How many times freed? */ | |
1592 #define M_Remap 3 /* How many times increased in size? */ | |
1593 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */ | |
1594 #define M_Copy_Pages 5 /* Total # pages copied */ | |
1595 #define M_Average_Bumpval 6 /* Average bump value */ | |
1596 #define M_Wastage 7 /* Remaining (unused space) */ | |
1597 #define M_Live_Pages 8 /* #live pages */ | |
1598 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */ | |
1599 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */ | |
1600 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */ | |
1601 | |
1602 #define N_Meterables 12 /* Total number of meterables */ | |
1603 #define MEMMETER(x) {x;} | |
1604 #define MVAL(x) (meter[x]) | |
1605 #define MLVAL(x) (make_int (meter[x])) | |
1606 static int meter[N_Meterables]; | |
1607 | |
1608 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /* | |
1609 Return some information about mmap-based allocator. | |
1610 | |
1611 mmap-times-mapped: number of times r_alloc was called. | |
1612 mmap-pages-mapped: number of pages mapped by r_alloc calls only. | |
1613 mmap-times-unmapped: number of times r_free was called. | |
1614 mmap-times-remapped: number of times r_re_alloc was called. | |
1615 mmap-didnt-copy: number of times re-alloc did NOT have to move the block. | |
1616 mmap-pages-copied: total number of pages copied. | |
1617 mmap-average-bumpval: average increase in size demanded to re-alloc. | |
1618 mmap-wastage: total number of bytes allocated, but not currently in use. | |
1619 mmap-live-pages: total number of pages live. | |
1620 mmap-addr-looked-up: total number of times needed to check if addr is in block. | |
1621 mmap-hash-worked: total number of times the simple hash check worked. | |
1622 mmap-addrlist-size: number of entries in address picking list. | |
1623 */ | |
1624 ()) | |
1625 { | |
1626 Lisp_Object result = Qnil; | |
1627 | |
1628 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result); | |
1629 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result); | |
1630 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result); | |
1631 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result); | |
1632 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result); | |
1633 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result); | |
1634 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result); | |
1635 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result); | |
1636 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result); | |
1637 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result); | |
1638 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result); | |
1639 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result); | |
1640 | |
1641 return result; | |
1642 } | |
1643 | |
1644 #else /* !MMAP_METERING */ | |
1645 | |
1646 #define MEMMETER(x) | |
1647 #define MVAL(x) | |
1648 | |
1649 #endif /* MMAP_METERING */ | |
1650 | |
1651 static MMAP_HANDLE | |
1652 find_mmap_handle (POINTER *alias) | |
1653 { | |
1654 int kval = MHASH( *alias ); | |
1655 MEMMETER( MVAL(M_Address_Lookup)++ ) | |
1656 switch( MHASH_HITS[kval].n_hits) | |
1657 { | |
1658 case 0: | |
1659 MEMMETER( MVAL( M_Hash_Worked )++ ) | |
1660 return 0; | |
1661 | |
1662 case 1: | |
1663 if (*alias == MHASH_HITS[kval].addr) | |
1664 { | |
1665 MEMMETER( MVAL( M_Hash_Worked) ++ ); | |
1666 return MHASH_HITS[kval].handle; | |
1667 } | |
1668 /* FALL THROUGH */ | |
1669 default: | |
1670 return find_mmap_handle_lsearch( alias ); | |
1671 } /* switch */ | |
1672 } | |
1673 | |
1674 /* | |
1675 Some kernels don't like being asked to pick addresses for mapping | |
1676 themselves---IRIX is known to become extremely slow if mmap is | |
1677 passed a ZERO as the first argument. In such cases, we use an | |
1678 address map which is managed local to the XEmacs process. The | |
1679 address map maintains an ordered linked list of (address, size, | |
1680 occupancy) triples ordered by the absolute address. Initially, a | |
1681 large address area is marked as being empty. The address picking | |
1682 scheme takes bites off the first block which is still empty and | |
1683 large enough. If mmap with the specified address fails, it is | |
1684 marked unavailable and not attempted thereafter. The scheme will | |
1685 keep fragmenting the large empty block until it finds an address | |
1686 which can be successfully mmapped, or until there are no free | |
1687 blocks of the given size left. | |
1688 | |
1689 Note that this scheme, given its first-fit strategy, is prone to | |
1690 fragmentation of the first part of memory earmarked for this | |
1691 purpose. [ACP Vol I]. We can't use the workaround of using a | |
1692 randomized first fit because we don't want to presume too much | |
1693 about the memory map. Instead, we try to coalesce empty or | |
1694 unavailable blocks at any available opportunity. */ | |
1695 | |
1696 /* Initialization procedure for address picking scheme */ | |
1697 static void Addr_Block_initialize(void); | |
1698 | |
1699 /* Get a suitable VM_ADDR via mmap */ | |
440 | 1700 static VM_ADDR New_Addr_Block (size_t sz); |
428 | 1701 |
1702 /* Free a VM_ADDR allocated via New_Addr_Block */ | |
440 | 1703 static void Free_Addr_Block (VM_ADDR addr, size_t sz); |
428 | 1704 |
1705 #ifdef MMAP_GENERATE_ADDRESSES | |
1706 /* Implementation of the three calls for address picking when XEmacs is incharge */ | |
1707 | |
1708 /* The enum denotes the status of the following block. */ | |
1709 typedef enum { empty = 0, occupied, unavailable } addr_status; | |
1710 | |
1711 typedef struct addr_chain | |
1712 { | |
1713 POINTER addr; | |
440 | 1714 size_t sz; |
428 | 1715 addr_status flag; |
1716 struct addr_chain *next; | |
1717 } ADDRESS_BLOCK, *ADDRESS_CHAIN; | |
1718 /* NB: empty and unavailable blocks are concatenated. */ | |
1719 | |
1720 static ADDRESS_CHAIN addr_chain = 0; | |
1721 /* Start off the address block chain with a humongous address block | |
1722 which is empty to start with. Note that addr_chain is invariant | |
1723 WRT the addition/deletion of address blocks because of the assert | |
1724 in Coalesce() and the strict ordering of blocks by their address | |
1725 */ | |
440 | 1726 static void |
1727 Addr_Block_initialize (void) | |
428 | 1728 { |
1729 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1730 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | |
1731 addr_chain->next = 0; /* Last block in chain */ | |
1732 addr_chain->sz = 0x0c000000; /* Size */ | |
442 | 1733 addr_chain->addr = (POINTER) (0x04000000); |
428 | 1734 addr_chain->flag = empty; |
1735 } | |
1736 | |
1737 /* Coalesce address blocks if they are contiguous. Only empty and | |
1738 unavailable slots are coalesced. */ | |
440 | 1739 static void |
1740 Coalesce_Addr_Blocks (void) | |
428 | 1741 { |
1742 ADDRESS_CHAIN p; | |
1743 for (p = addr_chain; p; p = p->next) | |
1744 { | |
1745 while (p->next && p->flag == p->next->flag) | |
1746 { | |
1747 ADDRESS_CHAIN np; | |
1748 np = p->next; | |
1749 | |
1750 if (p->flag == occupied) break; /* No cigar */ | |
1751 | |
1752 /* Check if the addresses are contiguous. */ | |
1753 if (p->addr + p->sz != np->addr) break; | |
1754 | |
1755 MEMMETER( MVAL( M_Addrlist_Size )--) | |
1756 /* We can coalesce these two. */ | |
1757 p->sz += np->sz; | |
1758 p->next = np->next; | |
1759 assert( np != addr_chain ); /* We're not freeing the head of the list. */ | |
1760 UNDERLYING_FREE( np ); | |
1761 } | |
1762 } /* for all p */ | |
1763 } | |
1764 | |
1765 /* Get an empty address block of specified size. */ | |
440 | 1766 static VM_ADDR |
1767 New_Addr_Block (size_t sz) | |
428 | 1768 { |
1769 ADDRESS_CHAIN p = addr_chain; | |
1770 VM_ADDR new_addr = VM_FAILURE_ADDR; | |
1771 for (; p; p = p->next) | |
1772 { | |
1773 if (p->flag == empty && p->sz > sz) | |
1774 { | |
1775 /* Create a new entry following p which is empty. */ | |
1776 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) ); | |
1777 remainder->next = p->next; | |
1778 remainder->flag = empty; | |
1779 remainder->addr = p->addr + sz; | |
1780 remainder->sz = p->sz - sz; | |
1781 | |
1782 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1783 | |
1784 /* Now make p become an occupied block with the appropriate size */ | |
1785 p->next = remainder; | |
1786 p->sz = sz; | |
1787 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, | |
1788 MAP_FLAGS, DEV_ZERO_FD, 0 ); | |
1789 if (new_addr == VM_FAILURE_ADDR) | |
1790 { | |
1791 p->flag = unavailable; | |
1792 continue; | |
1793 } | |
1794 p->flag = occupied; | |
1795 break; | |
1796 } | |
1797 } | |
1798 Coalesce_Addr_Blocks(); | |
1799 return new_addr; | |
1800 } | |
1801 | |
1802 /* Free an address block. We mark the block as being empty, and attempt to | |
1803 do any coalescing that may have resulted from this. */ | |
440 | 1804 static void |
1805 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
428 | 1806 { |
1807 ADDRESS_CHAIN p = addr_chain; | |
1808 for (; p; p = p->next ) | |
1809 { | |
1810 if (p->addr == addr) | |
1811 { | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1812 assert (p->sz == sz); /* ACK! Shouldn't happen at all. */ |
428 | 1813 munmap( (VM_ADDR) p->addr, p->sz ); |
1814 p->flag = empty; | |
1815 break; | |
1816 } | |
1817 } | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1818 assert (p); /* Can't happen... we've got a block to free which is not in |
428 | 1819 the address list. */ |
1820 Coalesce_Addr_Blocks(); | |
1821 } | |
1822 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1823 /* This is an alternate (simpler) implementation in cases where the | |
1824 address is picked by the kernel. */ | |
1825 | |
440 | 1826 static void |
1827 Addr_Block_initialize (void) | |
428 | 1828 { |
1829 /* Nothing. */ | |
1830 } | |
1831 | |
440 | 1832 static VM_ADDR |
1833 New_Addr_Block (size_t sz) | |
428 | 1834 { |
1835 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | |
1836 DEV_ZERO_FD, 0 ); | |
1837 } | |
1838 | |
440 | 1839 static void |
1840 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
428 | 1841 { |
1842 munmap ((caddr_t) addr, sz ); | |
1843 } | |
1844 | |
1845 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1846 | |
1847 | |
1848 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | |
1849 | |
1850 /* | |
440 | 1851 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start |
428 | 1852 address aliased to the first parameter. |
1853 */ | |
1854 | |
440 | 1855 POINTER r_alloc (POINTER *ptr, size_t size); |
428 | 1856 POINTER |
440 | 1857 r_alloc (POINTER *ptr, size_t size) |
428 | 1858 { |
1859 MMAP_HANDLE mh; | |
1860 | |
1333 | 1861 REGEX_MALLOC_CHECK (); |
1862 | |
428 | 1863 switch(r_alloc_initialized) |
1864 { | |
1865 case 0: | |
2500 | 1866 ABORT(); |
428 | 1867 case 1: |
1868 *ptr = (POINTER) UNDERLYING_MALLOC(size); | |
1869 break; | |
1870 default: | |
1871 mh = new_mmap_handle( size ); | |
1872 if (mh) | |
1873 { | |
440 | 1874 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1875 size_t mmapped_size = ROUNDUP( size + hysteresis ); | |
428 | 1876 MEMMETER( MVAL(M_Map)++ ) |
1877 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) | |
1878 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) | |
1879 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) | |
1880 mh->vm_addr = New_Addr_Block( mmapped_size ); | |
1881 if (mh->vm_addr == VM_FAILURE_ADDR) { | |
1882 free_mmap_handle( mh ); /* Free the loser */ | |
1883 *ptr = 0; | |
1884 return 0; /* ralloc failed due to mmap() failure. */ | |
1885 } | |
1886 MHASH_ADD( mh->vm_addr, mh ); | |
1887 mh->space_for = mmapped_size; | |
1888 mh->aliased_address = ptr; | |
1889 *ptr = (POINTER) mh->vm_addr; | |
1890 } | |
1891 else | |
1892 *ptr = 0; /* Malloc of block failed */ | |
1893 break; | |
1894 } | |
1895 return *ptr; | |
1896 } | |
1897 | |
1898 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
1899 Store 0 in *PTR to show there's no block allocated. */ | |
1900 | |
1901 void r_alloc_free (POINTER *ptr); | |
1902 void | |
1903 r_alloc_free (POINTER *ptr) | |
1904 { | |
1333 | 1905 REGEX_MALLOC_CHECK (); |
1906 | |
428 | 1907 switch( r_alloc_initialized) { |
1908 case 0: | |
2500 | 1909 ABORT(); |
428 | 1910 |
1911 case 1: | |
1912 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ | |
1913 break; | |
1914 | |
1915 default: | |
1916 { | |
1917 MMAP_HANDLE dead_handle = find_mmap_handle( ptr ); | |
1918 /* Check if we've got it. */ | |
1919 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */ | |
1920 { | |
1921 UNDERLYING_FREE( *ptr ); | |
1922 } | |
1923 else | |
1924 { | |
1925 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) ) | |
1926 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size )) | |
1927 MEMMETER(MVAL(M_Unmap)++) | |
1928 MHASH_DEL( dead_handle->vm_addr ); | |
1929 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for ); | |
1930 free_mmap_handle (dead_handle); | |
1931 } | |
1932 } | |
1933 break; | |
1934 } /* r_alloc_initialized */ | |
1935 *ptr = 0; /* Zap the pointer's contents. */ | |
1936 } | |
1937 | |
1938 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
1939 | |
1940 Change *PTR to reflect the new bloc, and return this value. | |
1941 | |
1942 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1943 return zero. */ | |
1944 | |
440 | 1945 POINTER r_re_alloc (POINTER *ptr, size_t sz); |
428 | 1946 POINTER |
440 | 1947 r_re_alloc (POINTER *ptr, size_t sz) |
428 | 1948 { |
1333 | 1949 REGEX_MALLOC_CHECK (); |
1950 | |
428 | 1951 if (r_alloc_initialized == 0) |
1952 { | |
2500 | 1953 ABORT (); |
428 | 1954 return 0; /* suppress compiler warning */ |
1955 } | |
1956 else if (r_alloc_initialized == 1) | |
1957 { | |
1958 POINTER tmp = (POINTER) realloc(*ptr, sz); | |
1959 if (tmp) | |
1960 *ptr = tmp; | |
1961 return tmp; | |
1962 } | |
1963 else | |
1964 { | |
440 | 1965 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1966 size_t actual_sz = ROUNDUP( sz + hysteresis ); | |
428 | 1967 MMAP_HANDLE h = find_mmap_handle( ptr ); |
1968 VM_ADDR new_vm_addr; | |
1969 | |
1970 if ( h == 0 ) /* Was allocated using malloc. */ | |
1971 { | |
1972 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz); | |
1973 if (tmp) | |
1974 *ptr = tmp; | |
1975 return tmp; | |
1976 } | |
1977 | |
1978 MEMMETER( | |
1979 MVAL(M_Average_Bumpval) = | |
1980 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size)) | |
1981 / (double) (MVAL(M_Remap) + 1)) | |
1982 MEMMETER(MVAL(M_Remap)++) | |
1983 if (h->space_for > sz) /* We've got some more room */ | |
1984 { /* Also, if a shrinkage was asked for. */ | |
1985 MEMMETER( MVAL(M_Didnt_Copy)++ ) | |
1986 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) | |
1987 /* We're pretty dumb at handling shrinkage. We should check for | |
1988 a larger gap than the standard hysteresis allowable, and if so, | |
1989 shrink the number of pages. Right now, we simply reset the size | |
1990 component and return. */ | |
1991 h->size = sz; | |
1992 return *ptr; | |
1993 } | |
1994 | |
1995 new_vm_addr = New_Addr_Block( actual_sz ); | |
1996 if (new_vm_addr == VM_FAILURE_ADDR) | |
1997 {/* Failed to realloc. */ | |
1998 /* *ptr = 0; */ | |
1999 return 0; | |
2000 } | |
2001 | |
2002 MHASH_ADD( new_vm_addr, h ); | |
2003 /* We got a block OK: now we should move the old contents to the | |
2004 new address. We use the old size of this block. */ | |
2005 memmove(new_vm_addr, h->vm_addr, h->size); | |
2006 MHASH_DEL( h->vm_addr ); | |
2007 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */ | |
2008 | |
2009 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) ) | |
2010 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size)) | |
2011 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size)) | |
2012 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size)) | |
2013 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) ) | |
2014 | |
2015 /* Update block datastructure. */ | |
2016 h->space_for = actual_sz; /* New total space */ | |
2017 h->size = sz; /* New (requested) size */ | |
2018 h->vm_addr = new_vm_addr; /* New VM start address */ | |
2019 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ | |
2020 *ptr = (POINTER) h->vm_addr; | |
2021 return *ptr; | |
2022 } | |
2023 } | |
2024 | |
2025 | |
2026 /* Initialize various things for memory allocation. | |
2027 */ | |
2028 void | |
2029 init_ralloc (void) | |
2030 { | |
2031 int i = 0; | |
2032 if (r_alloc_initialized > 1) | |
2033 return; /* used to return 1 */ | |
2034 | |
909 | 2035 #ifdef PDUMP |
2036 /* Under pdump, we need to activate ralloc on the first go. */ | |
2037 ++r_alloc_initialized; | |
2038 #endif | |
428 | 2039 if (++r_alloc_initialized == 1) |
2040 return; /* used to return 1 */ | |
2041 | |
2042 Addr_Block_initialize(); /* Initialize the address picker, if required. */ | |
2043 page_size = PAGE; | |
2044 assert( page_size > 0 ); /* getpagesize() bogosity check. */ | |
2045 | |
2046 #ifndef MAP_ANONYMOUS | |
2047 DEV_ZERO_FD = open( "/dev/zero", O_RDWR ); | |
2048 if (DEV_ZERO_FD < 0) | |
2049 /* Failed. Perhaps we should abort here? */ | |
2050 return; /* used to return 0 */ | |
2051 #endif | |
2052 | |
2053 #ifdef MMAP_METERING | |
2054 for(i = 0; i < N_Meterables; i++ ) | |
2055 { | |
2056 meter[i] = 0; | |
2057 } | |
2058 #endif /* MMAP_METERING */ | |
2059 } | |
2060 | |
2061 void | |
2062 syms_of_ralloc (void) | |
2063 { | |
2064 #ifdef MMAP_METERING | |
563 | 2065 DEFSYMBOL (Qmmap_times_mapped); |
2066 DEFSYMBOL (Qmmap_pages_mapped); | |
2067 DEFSYMBOL (Qmmap_times_unmapped); | |
2068 DEFSYMBOL (Qmmap_times_remapped); | |
2069 DEFSYMBOL (Qmmap_didnt_copy); | |
2070 DEFSYMBOL (Qmmap_pages_copied); | |
2071 DEFSYMBOL (Qmmap_average_bumpval); | |
2072 DEFSYMBOL (Qmmap_wastage); | |
2073 DEFSYMBOL (Qmmap_live_pages); | |
2074 DEFSYMBOL (Qmmap_addr_looked_up); | |
2075 DEFSYMBOL (Qmmap_hash_worked); | |
2076 DEFSYMBOL (Qmmap_addrlist_size); | |
428 | 2077 DEFSUBR (Fmmap_allocator_status); |
2078 #endif /* MMAP_METERING */ | |
2079 } | |
2080 | |
2081 void | |
2082 vars_of_ralloc (void) | |
2083 { | |
2084 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* | |
2085 Extra room left at the end of an allocated arena, | |
2086 so that a re-alloc requesting extra space smaller than this | |
2087 does not actually cause a new arena to be allocated. | |
2088 | |
2089 A negative value is considered equal to zero. This is the | |
2090 minimum amount of space guaranteed to be left at the end of | |
2091 the arena. Because allocation happens in multiples of the OS | |
2092 page size, it is possible for more space to be left unused. | |
2093 */ ); | |
2094 mmap_hysteresis = 0; | |
2095 } | |
2096 | |
2097 #endif /* HAVE_MMAP */ |