Mercurial > hg > xemacs-beta
comparison src/ralloc.c @ 428:3ecd8885ac67 r21-2-22
Import from CVS: tag r21-2-22
author | cvs |
---|---|
date | Mon, 13 Aug 2007 11:28:15 +0200 |
parents | |
children | 8de8e3f6228a |
comparison
equal
deleted
inserted
replaced
427:0a0253eac470 | 428:3ecd8885ac67 |
---|---|
1 /* Block-relocating memory allocator. | |
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
3 | |
4 This file is part of XEmacs. | |
5 | |
6 XEmacs is free software; you can redistribute it and/or modify it | |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 2, or (at your option) any | |
9 later version. | |
10 | |
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GNU Emacs; see the file COPYING. If not, write to | |
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 Boston, MA 02111-1307, USA. | |
20 | |
21 Synched Up with: FSF 20.2 (non-mmap portion only) | |
22 */ | |
23 | |
24 /* NOTES: | |
25 | |
26 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | |
27 rather than all of them. This means allowing for a possible | |
28 hole between the first bloc and the end of malloc storage. */ | |
29 | |
30 #ifdef HAVE_CONFIG_H | |
31 #include <config.h> | |
32 #endif | |
33 | |
34 #ifdef HAVE_UNISTD_H | |
35 #include <unistd.h> /* for getpagesize() */ | |
36 #endif | |
37 | |
38 #ifdef emacs | |
39 | |
40 #include "lisp.h" | |
41 | |
42 /* The important properties of this type are that 1) it's a pointer, and | |
43 2) arithmetic on it should work as if the size of the object pointed | |
44 to has a size of 1. */ | |
45 #if 0 /* Arithmetic on void* is a GCC extension. */ | |
46 #ifdef __STDC__ | |
47 typedef void *POINTER; | |
48 #else | |
49 typedef unsigned char *POINTER; | |
50 #endif | |
51 #endif /* 0 */ | |
52 | |
53 /* Unconditionally use unsigned char * for this. */ | |
54 typedef unsigned char *POINTER; | |
55 | |
56 typedef unsigned long SIZE; | |
57 | |
58 #ifdef DOUG_LEA_MALLOC | |
59 #define M_TOP_PAD -2 | |
60 #include <malloc.h> | |
61 #endif | |
62 | |
63 #include "getpagesize.h" | |
64 | |
65 #include <string.h> | |
66 void refill_memory_reserve (void); | |
67 | |
68 #else /* Not emacs. */ | |
69 | |
70 #include <stddef.h> | |
71 | |
72 typedef size_t SIZE; | |
73 typedef void *POINTER; | |
74 | |
75 #include <unistd.h> | |
76 #include <malloc.h> | |
77 #include <string.h> | |
78 | |
79 #endif /* emacs. */ | |
80 | |
81 void init_ralloc (void); | |
82 #define safe_bcopy(x, y, z) memmove (y, x, z) | |
83 | |
84 #define NIL ((POINTER) 0) | |
85 | |
86 | |
87 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | |
88 | |
89 /* A flag to indicate whether we have initialized ralloc yet. For | |
90 Emacs's sake, please do not make this local to malloc_init; on some | |
91 machines, the dumping procedure makes all static variables | |
92 read-only. On these machines, the word static is #defined to be | |
93 the empty string, meaning that r_alloc_initialized becomes an | |
94 automatic variable, and loses its value each time Emacs is started up. */ | |
95 static int r_alloc_initialized = 0; | |
96 | |
97 | |
98 /* Declarations for working with the malloc, ralloc, and system breaks. */ | |
99 | |
100 /* Function to set the real break value. */ | |
101 static POINTER (*real_morecore) (ptrdiff_t size); | |
102 | |
103 /* The break value, as seen by malloc (). */ | |
104 static POINTER virtual_break_value; | |
105 | |
106 /* The break value, viewed by the relocatable blocs. */ | |
107 static POINTER break_value; | |
108 | |
109 /* This is the size of a page. We round memory requests to this boundary. */ | |
110 static int page_size; | |
111 | |
112 /* Whenever we get memory from the system, get this many extra bytes. This | |
113 must be a multiple of page_size. */ | |
114 static int extra_bytes; | |
115 | |
116 /* Macros for rounding. Note that rounding to any value is possible | |
117 by changing the definition of PAGE. */ | |
118 #define PAGE (getpagesize ()) | |
119 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | |
120 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
121 & ~(page_size - 1)) | |
122 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | |
123 | |
124 #define MEM_ALIGN sizeof(double) | |
125 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
126 & ~(MEM_ALIGN - 1)) | |
127 | |
128 /* Data structures of heaps and blocs. */ | |
129 | |
130 /* The relocatable objects, or blocs, and the malloc data | |
131 both reside within one or more heaps. | |
132 Each heap contains malloc data, running from `start' to `bloc_start', | |
133 and relocatable objects, running from `bloc_start' to `free'. | |
134 | |
135 Relocatable objects may relocate within the same heap | |
136 or may move into another heap; the heaps themselves may grow | |
137 but they never move. | |
138 | |
139 We try to make just one heap and make it larger as necessary. | |
140 But sometimes we can't do that, because we can't get contiguous | |
141 space to add onto the heap. When that happens, we start a new heap. */ | |
142 | |
143 typedef struct heap | |
144 { | |
145 struct heap *next; | |
146 struct heap *prev; | |
147 /* Start of memory range of this heap. */ | |
148 POINTER start; | |
149 /* End of memory range of this heap. */ | |
150 POINTER end; | |
151 /* Start of relocatable data in this heap. */ | |
152 POINTER bloc_start; | |
153 /* Start of unused space in this heap. */ | |
154 POINTER free; | |
155 /* First bloc in this heap. */ | |
156 struct bp *first_bloc; | |
157 /* Last bloc in this heap. */ | |
158 struct bp *last_bloc; | |
159 } *heap_ptr; | |
160 | |
161 #define NIL_HEAP ((heap_ptr) 0) | |
162 #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
163 | |
164 /* This is the first heap object. | |
165 If we need additional heap objects, each one resides at the beginning of | |
166 the space it covers. */ | |
167 static struct heap heap_base; | |
168 | |
169 /* Head and tail of the list of heaps. */ | |
170 static heap_ptr first_heap, last_heap; | |
171 | |
172 /* These structures are allocated in the malloc arena. | |
173 The linked list is kept in order of increasing '.data' members. | |
174 The data blocks abut each other; if b->next is non-nil, then | |
175 b->data + b->size == b->next->data. | |
176 | |
177 An element with variable==NIL denotes a freed block, which has not yet | |
178 been collected. They may only appear while r_alloc_freeze > 0, and will be | |
179 freed when the arena is thawed. Currently, these blocs are not reusable, | |
180 while the arena is frozen. Very inefficient. */ | |
181 | |
182 typedef struct bp | |
183 { | |
184 struct bp *next; | |
185 struct bp *prev; | |
186 POINTER *variable; | |
187 POINTER data; | |
188 SIZE size; | |
189 POINTER new_data; /* temporarily used for relocation */ | |
190 struct heap *heap; /* Heap this bloc is in. */ | |
191 } *bloc_ptr; | |
192 | |
193 #define NIL_BLOC ((bloc_ptr) 0) | |
194 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
195 | |
196 /* Head and tail of the list of relocatable blocs. */ | |
197 static bloc_ptr first_bloc, last_bloc; | |
198 | |
199 static int use_relocatable_buffers; | |
200 | |
201 /* If >0, no relocation whatsoever takes place. */ | |
202 static int r_alloc_freeze_level; | |
203 | |
204 /* Obtain SIZE bytes of space. If enough space is not presently available | |
205 in our process reserve, (i.e., (page_break_value - break_value)), | |
206 this means getting more page-aligned space from the system. | |
207 | |
208 Return non-zero if all went well, or zero if we couldn't allocate | |
209 the memory. */ | |
210 | |
211 /* Functions to get and return memory from the system. */ | |
212 | |
213 /* Find the heap that ADDRESS falls within. */ | |
214 | |
215 static heap_ptr | |
216 find_heap (POINTER address) | |
217 { | |
218 heap_ptr heap; | |
219 | |
220 for (heap = last_heap; heap; heap = heap->prev) | |
221 { | |
222 if (heap->start <= address && address <= heap->end) | |
223 return heap; | |
224 } | |
225 | |
226 return NIL_HEAP; | |
227 } | |
228 | |
229 /* Find SIZE bytes of space in a heap. | |
230 Try to get them at ADDRESS (which must fall within some heap's range) | |
231 if we can get that many within one heap. | |
232 | |
233 If enough space is not presently available in our reserve, this means | |
234 getting more page-aligned space from the system. If the returned space | |
235 is not contiguous to the last heap, allocate a new heap, and append it | |
236 | |
237 obtain does not try to keep track of whether space is in use | |
238 or not in use. It just returns the address of SIZE bytes that | |
239 fall within a single heap. If you call obtain twice in a row | |
240 with the same arguments, you typically get the same value. | |
241 to the heap list. It's the caller's responsibility to keep | |
242 track of what space is in use. | |
243 | |
244 Return the address of the space if all went well, or zero if we couldn't | |
245 allocate the memory. */ | |
246 | |
247 static POINTER | |
248 obtain (POINTER address, SIZE size) | |
249 { | |
250 heap_ptr heap; | |
251 SIZE already_available; | |
252 | |
253 /* Find the heap that ADDRESS falls within. */ | |
254 for (heap = last_heap; heap; heap = heap->prev) | |
255 { | |
256 if (heap->start <= address && address <= heap->end) | |
257 break; | |
258 } | |
259 | |
260 if (! heap) | |
261 abort (); | |
262 | |
263 /* If we can't fit SIZE bytes in that heap, | |
264 try successive later heaps. */ | |
265 while (heap && address + size > heap->end) | |
266 { | |
267 heap = heap->next; | |
268 if (heap == NIL_HEAP) | |
269 break; | |
270 address = heap->bloc_start; | |
271 } | |
272 | |
273 /* If we can't fit them within any existing heap, | |
274 get more space. */ | |
275 if (heap == NIL_HEAP) | |
276 { | |
277 POINTER new = (*real_morecore)(0); | |
278 SIZE get; | |
279 | |
280 already_available = (char *)last_heap->end - (char *)address; | |
281 | |
282 if (new != last_heap->end) | |
283 { | |
284 /* Someone else called sbrk. Make a new heap. */ | |
285 | |
286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new); | |
287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); | |
288 | |
289 if ((*real_morecore) (bloc_start - new) != new) | |
290 return 0; | |
291 | |
292 new_heap->start = new; | |
293 new_heap->end = bloc_start; | |
294 new_heap->bloc_start = bloc_start; | |
295 new_heap->free = bloc_start; | |
296 new_heap->next = NIL_HEAP; | |
297 new_heap->prev = last_heap; | |
298 new_heap->first_bloc = NIL_BLOC; | |
299 new_heap->last_bloc = NIL_BLOC; | |
300 last_heap->next = new_heap; | |
301 last_heap = new_heap; | |
302 | |
303 address = bloc_start; | |
304 already_available = 0; | |
305 } | |
306 | |
307 /* Add space to the last heap (which we may have just created). | |
308 Get some extra, so we can come here less often. */ | |
309 | |
310 get = size + extra_bytes - already_available; | |
311 get = (char *) ROUNDUP ((char *)last_heap->end + get) | |
312 - (char *) last_heap->end; | |
313 | |
314 if ((*real_morecore) (get) != last_heap->end) | |
315 return 0; | |
316 | |
317 last_heap->end += get; | |
318 } | |
319 | |
320 return address; | |
321 } | |
322 | |
323 #if 0 | |
324 /* Obtain SIZE bytes of space and return a pointer to the new area. | |
325 If we could not allocate the space, return zero. */ | |
326 | |
327 static POINTER | |
328 get_more_space (SIZE size) | |
329 { | |
330 POINTER ptr = break_value; | |
331 if (obtain (size)) | |
332 return ptr; | |
333 else | |
334 return 0; | |
335 } | |
336 #endif | |
337 | |
338 /* Note that SIZE bytes of space have been relinquished by the process. | |
339 If SIZE is more than a page, return the space to the system. */ | |
340 | |
341 static void | |
342 relinquish (void) | |
343 { | |
344 register heap_ptr h; | |
345 int excess = 0; | |
346 | |
347 /* Add the amount of space beyond break_value | |
348 in all heaps which have extend beyond break_value at all. */ | |
349 | |
350 for (h = last_heap; h && break_value < h->end; h = h->prev) | |
351 { | |
352 excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
353 ? h->bloc_start : break_value); | |
354 } | |
355 | |
356 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
357 { | |
358 /* Keep extra_bytes worth of empty space. | |
359 And don't free anything unless we can free at least extra_bytes. */ | |
360 excess -= extra_bytes; | |
361 | |
362 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) | |
363 { | |
364 /* This heap should have no blocs in it. */ | |
365 if (last_heap->first_bloc != NIL_BLOC | |
366 || last_heap->last_bloc != NIL_BLOC) | |
367 abort (); | |
368 | |
369 /* Return the last heap, with its header, to the system. */ | |
370 excess = (char *)last_heap->end - (char *)last_heap->start; | |
371 last_heap = last_heap->prev; | |
372 last_heap->next = NIL_HEAP; | |
373 } | |
374 else | |
375 { | |
376 excess = (char *) last_heap->end | |
377 - (char *) ROUNDUP ((char *)last_heap->end - excess); | |
378 last_heap->end -= excess; | |
379 } | |
380 | |
381 if ((*real_morecore) (- excess) == 0) | |
382 abort (); | |
383 } | |
384 } | |
385 | |
386 /* Return the total size in use by relocating allocator, | |
387 above where malloc gets space. */ | |
388 | |
389 long r_alloc_size_in_use (void); | |
390 long | |
391 r_alloc_size_in_use () | |
392 { | |
393 return break_value - virtual_break_value; | |
394 } | |
395 | |
396 /* The meat - allocating, freeing, and relocating blocs. */ | |
397 | |
398 | |
399 /* Find the bloc referenced by the address in PTR. Returns a pointer | |
400 to that block. */ | |
401 | |
402 static bloc_ptr | |
403 find_bloc (POINTER *ptr) | |
404 { | |
405 register bloc_ptr p = first_bloc; | |
406 | |
407 while (p != NIL_BLOC) | |
408 { | |
409 if (p->variable == ptr && p->data == *ptr) | |
410 return p; | |
411 | |
412 p = p->next; | |
413 } | |
414 | |
415 return p; | |
416 } | |
417 | |
418 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
419 Returns a pointer to the new bloc, or zero if we couldn't allocate | |
420 memory for the new block. */ | |
421 | |
422 static bloc_ptr | |
423 get_bloc (SIZE size) | |
424 { | |
425 register bloc_ptr new_bloc; | |
426 register heap_ptr heap; | |
427 | |
428 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
429 || ! (new_bloc->data = obtain (break_value, size))) | |
430 { | |
431 if (new_bloc) | |
432 free (new_bloc); | |
433 | |
434 return 0; | |
435 } | |
436 | |
437 break_value = new_bloc->data + size; | |
438 | |
439 new_bloc->size = size; | |
440 new_bloc->next = NIL_BLOC; | |
441 new_bloc->variable = (POINTER *) NIL; | |
442 new_bloc->new_data = 0; | |
443 | |
444 /* Record in the heap that this space is in use. */ | |
445 heap = find_heap (new_bloc->data); | |
446 heap->free = break_value; | |
447 | |
448 /* Maintain the correspondence between heaps and blocs. */ | |
449 new_bloc->heap = heap; | |
450 heap->last_bloc = new_bloc; | |
451 if (heap->first_bloc == NIL_BLOC) | |
452 heap->first_bloc = new_bloc; | |
453 | |
454 /* Put this bloc on the doubly-linked list of blocs. */ | |
455 if (first_bloc) | |
456 { | |
457 new_bloc->prev = last_bloc; | |
458 last_bloc->next = new_bloc; | |
459 last_bloc = new_bloc; | |
460 } | |
461 else | |
462 { | |
463 first_bloc = last_bloc = new_bloc; | |
464 new_bloc->prev = NIL_BLOC; | |
465 } | |
466 | |
467 return new_bloc; | |
468 } | |
469 | |
470 /* Calculate new locations of blocs in the list beginning with BLOC, | |
471 relocating it to start at ADDRESS, in heap HEAP. If enough space is | |
472 not presently available in our reserve, call obtain for | |
473 more space. | |
474 | |
475 Store the new location of each bloc in its new_data field. | |
476 Do not touch the contents of blocs or break_value. */ | |
477 | |
478 static int | |
479 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) | |
480 { | |
481 register bloc_ptr b = bloc; | |
482 | |
483 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
484 if (r_alloc_freeze_level) | |
485 abort(); | |
486 | |
487 while (b) | |
488 { | |
489 /* If bloc B won't fit within HEAP, | |
490 move to the next heap and try again. */ | |
491 while (heap && address + b->size > heap->end) | |
492 { | |
493 heap = heap->next; | |
494 if (heap == NIL_HEAP) | |
495 break; | |
496 address = heap->bloc_start; | |
497 } | |
498 | |
499 /* If BLOC won't fit in any heap, | |
500 get enough new space to hold BLOC and all following blocs. */ | |
501 if (heap == NIL_HEAP) | |
502 { | |
503 register bloc_ptr tb = b; | |
504 register SIZE s = 0; | |
505 | |
506 /* Add up the size of all the following blocs. */ | |
507 while (tb != NIL_BLOC) | |
508 { | |
509 if (tb->variable) | |
510 s += tb->size; | |
511 | |
512 tb = tb->next; | |
513 } | |
514 | |
515 /* Get that space. */ | |
516 address = obtain (address, s); | |
517 if (address == 0) | |
518 return 0; | |
519 | |
520 heap = last_heap; | |
521 } | |
522 | |
523 /* Record the new address of this bloc | |
524 and update where the next bloc can start. */ | |
525 b->new_data = address; | |
526 if (b->variable) | |
527 address += b->size; | |
528 b = b->next; | |
529 } | |
530 | |
531 return 1; | |
532 } | |
533 | |
534 #if 0 /* unused */ | |
535 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. | |
536 This is necessary if we put the memory of space of BLOC | |
537 before that of BEFORE. */ | |
538 | |
539 static void | |
540 reorder_bloc (bloc_ptr bloc, bloc_ptr before) | |
541 { | |
542 bloc_ptr prev, next; | |
543 | |
544 /* Splice BLOC out from where it is. */ | |
545 prev = bloc->prev; | |
546 next = bloc->next; | |
547 | |
548 if (prev) | |
549 prev->next = next; | |
550 if (next) | |
551 next->prev = prev; | |
552 | |
553 /* Splice it in before BEFORE. */ | |
554 prev = before->prev; | |
555 | |
556 if (prev) | |
557 prev->next = bloc; | |
558 bloc->prev = prev; | |
559 | |
560 before->prev = bloc; | |
561 bloc->next = before; | |
562 } | |
563 #endif /* unused */ | |
564 | |
565 /* Update the records of which heaps contain which blocs, starting | |
566 with heap HEAP and bloc BLOC. */ | |
567 | |
568 static void | |
569 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) | |
570 { | |
571 register bloc_ptr b; | |
572 | |
573 /* Initialize HEAP's status to reflect blocs before BLOC. */ | |
574 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
575 { | |
576 /* The previous bloc is in HEAP. */ | |
577 heap->last_bloc = bloc->prev; | |
578 heap->free = bloc->prev->data + bloc->prev->size; | |
579 } | |
580 else | |
581 { | |
582 /* HEAP contains no blocs before BLOC. */ | |
583 heap->first_bloc = NIL_BLOC; | |
584 heap->last_bloc = NIL_BLOC; | |
585 heap->free = heap->bloc_start; | |
586 } | |
587 | |
588 /* Advance through blocs one by one. */ | |
589 for (b = bloc; b != NIL_BLOC; b = b->next) | |
590 { | |
591 /* Advance through heaps, marking them empty, | |
592 till we get to the one that B is in. */ | |
593 while (heap) | |
594 { | |
595 if (heap->bloc_start <= b->data && b->data <= heap->end) | |
596 break; | |
597 heap = heap->next; | |
598 /* We know HEAP is not null now, | |
599 because there has to be space for bloc B. */ | |
600 heap->first_bloc = NIL_BLOC; | |
601 heap->last_bloc = NIL_BLOC; | |
602 heap->free = heap->bloc_start; | |
603 } | |
604 | |
605 /* Update HEAP's status for bloc B. */ | |
606 heap->free = b->data + b->size; | |
607 heap->last_bloc = b; | |
608 if (heap->first_bloc == NIL_BLOC) | |
609 heap->first_bloc = b; | |
610 | |
611 /* Record that B is in HEAP. */ | |
612 b->heap = heap; | |
613 } | |
614 | |
615 /* If there are any remaining heaps and no blocs left, | |
616 mark those heaps as empty. */ | |
617 heap = heap->next; | |
618 while (heap) | |
619 { | |
620 heap->first_bloc = NIL_BLOC; | |
621 heap->last_bloc = NIL_BLOC; | |
622 heap->free = heap->bloc_start; | |
623 heap = heap->next; | |
624 } | |
625 } | |
626 | |
627 /* Resize BLOC to SIZE bytes. This relocates the blocs | |
628 that come after BLOC in memory. */ | |
629 | |
630 static int | |
631 resize_bloc (bloc_ptr bloc, SIZE size) | |
632 { | |
633 register bloc_ptr b; | |
634 heap_ptr heap; | |
635 POINTER address; | |
636 SIZE old_size; | |
637 | |
638 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
639 if (r_alloc_freeze_level) | |
640 abort(); | |
641 | |
642 if (bloc == NIL_BLOC || size == bloc->size) | |
643 return 1; | |
644 | |
645 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
646 { | |
647 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
648 break; | |
649 } | |
650 | |
651 if (heap == NIL_HEAP) | |
652 abort (); | |
653 | |
654 old_size = bloc->size; | |
655 bloc->size = size; | |
656 | |
657 /* Note that bloc could be moved into the previous heap. */ | |
658 address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
659 : first_heap->bloc_start); | |
660 while (heap) | |
661 { | |
662 if (heap->bloc_start <= address && address <= heap->end) | |
663 break; | |
664 heap = heap->prev; | |
665 } | |
666 | |
667 if (! relocate_blocs (bloc, heap, address)) | |
668 { | |
669 bloc->size = old_size; | |
670 return 0; | |
671 } | |
672 | |
673 if (size > old_size) | |
674 { | |
675 for (b = last_bloc; b != bloc; b = b->prev) | |
676 { | |
677 if (!b->variable) | |
678 { | |
679 b->size = 0; | |
680 b->data = b->new_data; | |
681 } | |
682 else | |
683 { | |
684 safe_bcopy (b->data, b->new_data, b->size); | |
685 *b->variable = b->data = b->new_data; | |
686 } | |
687 } | |
688 if (!bloc->variable) | |
689 { | |
690 bloc->size = 0; | |
691 bloc->data = bloc->new_data; | |
692 } | |
693 else | |
694 { | |
695 safe_bcopy (bloc->data, bloc->new_data, old_size); | |
696 memset (bloc->new_data + old_size, 0, size - old_size); | |
697 *bloc->variable = bloc->data = bloc->new_data; | |
698 } | |
699 } | |
700 else | |
701 { | |
702 for (b = bloc; b != NIL_BLOC; b = b->next) | |
703 { | |
704 if (!b->variable) | |
705 { | |
706 b->size = 0; | |
707 b->data = b->new_data; | |
708 } | |
709 else | |
710 { | |
711 safe_bcopy (b->data, b->new_data, b->size); | |
712 *b->variable = b->data = b->new_data; | |
713 } | |
714 } | |
715 } | |
716 | |
717 update_heap_bloc_correspondence (bloc, heap); | |
718 | |
719 break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
720 : first_heap->bloc_start); | |
721 return 1; | |
722 } | |
723 | |
724 /* Free BLOC from the chain of blocs, relocating any blocs above it | |
725 and returning BLOC->size bytes to the free area. */ | |
726 | |
727 static void | |
728 free_bloc (bloc_ptr bloc) | |
729 { | |
730 heap_ptr heap = bloc->heap; | |
731 | |
732 if (r_alloc_freeze_level) | |
733 { | |
734 bloc->variable = (POINTER *) NIL; | |
735 return; | |
736 } | |
737 | |
738 resize_bloc (bloc, 0); | |
739 | |
740 if (bloc == first_bloc && bloc == last_bloc) | |
741 { | |
742 first_bloc = last_bloc = NIL_BLOC; | |
743 } | |
744 else if (bloc == last_bloc) | |
745 { | |
746 last_bloc = bloc->prev; | |
747 last_bloc->next = NIL_BLOC; | |
748 } | |
749 else if (bloc == first_bloc) | |
750 { | |
751 first_bloc = bloc->next; | |
752 first_bloc->prev = NIL_BLOC; | |
753 } | |
754 else | |
755 { | |
756 bloc->next->prev = bloc->prev; | |
757 bloc->prev->next = bloc->next; | |
758 } | |
759 | |
760 /* Update the records of which blocs are in HEAP. */ | |
761 if (heap->first_bloc == bloc) | |
762 { | |
763 if (bloc->next != 0 && bloc->next->heap == heap) | |
764 heap->first_bloc = bloc->next; | |
765 else | |
766 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
767 } | |
768 if (heap->last_bloc == bloc) | |
769 { | |
770 if (bloc->prev != 0 && bloc->prev->heap == heap) | |
771 heap->last_bloc = bloc->prev; | |
772 else | |
773 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
774 } | |
775 | |
776 relinquish (); | |
777 free (bloc); | |
778 } | |
779 | |
780 /* Interface routines. */ | |
781 | |
782 /* Obtain SIZE bytes of storage from the free pool, or the system, as | |
783 necessary. If relocatable blocs are in use, this means relocating | |
784 them. This function gets plugged into the GNU malloc's __morecore | |
785 hook. | |
786 | |
787 We provide hysteresis, never relocating by less than extra_bytes. | |
788 | |
789 If we're out of memory, we should return zero, to imitate the other | |
790 __morecore hook values - in particular, __default_morecore in the | |
791 GNU malloc package. */ | |
792 | |
793 POINTER r_alloc_sbrk (ptrdiff_t size); | |
794 POINTER | |
795 r_alloc_sbrk (ptrdiff_t size) | |
796 { | |
797 register bloc_ptr b; | |
798 POINTER address; | |
799 | |
800 if (! r_alloc_initialized) | |
801 init_ralloc (); | |
802 | |
803 if (! use_relocatable_buffers) | |
804 return (*real_morecore) (size); | |
805 | |
806 if (size == 0) | |
807 return virtual_break_value; | |
808 | |
809 if (size > 0) | |
810 { | |
811 /* Allocate a page-aligned space. GNU malloc would reclaim an | |
812 extra space if we passed an unaligned one. But we could | |
813 not always find a space which is contiguous to the previous. */ | |
814 POINTER new_bloc_start; | |
815 heap_ptr h = first_heap; | |
816 SIZE get = ROUNDUP (size); | |
817 | |
818 address = (POINTER) ROUNDUP (virtual_break_value); | |
819 | |
820 /* Search the list upward for a heap which is large enough. */ | |
821 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | |
822 { | |
823 h = h->next; | |
824 if (h == NIL_HEAP) | |
825 break; | |
826 address = (POINTER) ROUNDUP (h->start); | |
827 } | |
828 | |
829 /* If not found, obtain more space. */ | |
830 if (h == NIL_HEAP) | |
831 { | |
832 get += extra_bytes + page_size; | |
833 | |
834 if (! obtain (address, get)) | |
835 return 0; | |
836 | |
837 if (first_heap == last_heap) | |
838 address = (POINTER) ROUNDUP (virtual_break_value); | |
839 else | |
840 address = (POINTER) ROUNDUP (last_heap->start); | |
841 h = last_heap; | |
842 } | |
843 | |
844 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); | |
845 | |
846 if (first_heap->bloc_start < new_bloc_start) | |
847 { | |
848 /* This is no clean solution - no idea how to do it better. */ | |
849 if (r_alloc_freeze_level) | |
850 return NIL; | |
851 | |
852 /* There is a bug here: if the above obtain call succeeded, but the | |
853 relocate_blocs call below does not succeed, we need to free | |
854 the memory that we got with obtain. */ | |
855 | |
856 /* Move all blocs upward. */ | |
857 if (! relocate_blocs (first_bloc, h, new_bloc_start)) | |
858 return 0; | |
859 | |
860 /* Note that (POINTER)(h+1) <= new_bloc_start since | |
861 get >= page_size, so the following does not destroy the heap | |
862 header. */ | |
863 for (b = last_bloc; b != NIL_BLOC; b = b->prev) | |
864 { | |
865 safe_bcopy (b->data, b->new_data, b->size); | |
866 *b->variable = b->data = b->new_data; | |
867 } | |
868 | |
869 h->bloc_start = new_bloc_start; | |
870 | |
871 update_heap_bloc_correspondence (first_bloc, h); | |
872 } | |
873 if (h != first_heap) | |
874 { | |
875 /* Give up managing heaps below the one the new | |
876 virtual_break_value points to. */ | |
877 first_heap->prev = NIL_HEAP; | |
878 first_heap->next = h->next; | |
879 first_heap->start = h->start; | |
880 first_heap->end = h->end; | |
881 first_heap->free = h->free; | |
882 first_heap->first_bloc = h->first_bloc; | |
883 first_heap->last_bloc = h->last_bloc; | |
884 first_heap->bloc_start = h->bloc_start; | |
885 | |
886 if (first_heap->next) | |
887 first_heap->next->prev = first_heap; | |
888 else | |
889 last_heap = first_heap; | |
890 } | |
891 | |
892 memset (address, 0, size); | |
893 } | |
894 else /* size < 0 */ | |
895 { | |
896 SIZE excess = (char *)first_heap->bloc_start | |
897 - ((char *)virtual_break_value + size); | |
898 | |
899 address = virtual_break_value; | |
900 | |
901 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
902 { | |
903 excess -= extra_bytes; | |
904 first_heap->bloc_start | |
905 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); | |
906 | |
907 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); | |
908 | |
909 for (b = first_bloc; b != NIL_BLOC; b = b->next) | |
910 { | |
911 safe_bcopy (b->data, b->new_data, b->size); | |
912 *b->variable = b->data = b->new_data; | |
913 } | |
914 } | |
915 | |
916 if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
917 { | |
918 /* We found an additional space below the first heap */ | |
919 first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
920 } | |
921 } | |
922 | |
923 virtual_break_value = (POINTER) ((char *)address + size); | |
924 break_value = (last_bloc | |
925 ? last_bloc->data + last_bloc->size | |
926 : first_heap->bloc_start); | |
927 if (size < 0) | |
928 relinquish (); | |
929 | |
930 return address; | |
931 } | |
932 | |
933 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
934 the data is returned in *PTR. PTR is thus the address of some variable | |
935 which will use the data area. | |
936 | |
937 The allocation of 0 bytes is valid. | |
938 In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
939 before allocating a new area. Not yet done. | |
940 | |
941 If we can't allocate the necessary memory, set *PTR to zero, and | |
942 return zero. */ | |
943 | |
944 POINTER r_alloc (POINTER *ptr, SIZE size); | |
945 POINTER | |
946 r_alloc (POINTER *ptr, SIZE size) | |
947 { | |
948 bloc_ptr new_bloc; | |
949 | |
950 if (! r_alloc_initialized) | |
951 init_ralloc (); | |
952 | |
953 new_bloc = get_bloc (size); | |
954 if (new_bloc) | |
955 { | |
956 new_bloc->variable = ptr; | |
957 *ptr = new_bloc->data; | |
958 } | |
959 else | |
960 *ptr = 0; | |
961 | |
962 return *ptr; | |
963 } | |
964 | |
965 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
966 Store 0 in *PTR to show there's no block allocated. */ | |
967 | |
968 void r_alloc_free (POINTER *ptr); | |
969 void | |
970 r_alloc_free (POINTER *ptr) | |
971 { | |
972 register bloc_ptr dead_bloc; | |
973 | |
974 if (! r_alloc_initialized) | |
975 init_ralloc (); | |
976 | |
977 dead_bloc = find_bloc (ptr); | |
978 if (dead_bloc == NIL_BLOC) | |
979 abort (); | |
980 | |
981 free_bloc (dead_bloc); | |
982 *ptr = 0; | |
983 | |
984 #ifdef emacs | |
985 refill_memory_reserve (); | |
986 #endif | |
987 } | |
988 | |
989 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
990 Do this by shifting all blocks above this one up in memory, unless | |
991 SIZE is less than or equal to the current bloc size, in which case | |
992 do nothing. | |
993 | |
994 In case r_alloc_freeze is set, a new bloc is allocated, and the | |
995 memory copied to it. Not very efficient. We could traverse the | |
996 bloc_list for a best fit of free blocs first. | |
997 | |
998 Change *PTR to reflect the new bloc, and return this value. | |
999 | |
1000 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1001 return zero. */ | |
1002 | |
1003 POINTER r_re_alloc (POINTER *ptr, SIZE size); | |
1004 POINTER | |
1005 r_re_alloc (POINTER *ptr, SIZE size) | |
1006 { | |
1007 register bloc_ptr bloc; | |
1008 | |
1009 if (! r_alloc_initialized) | |
1010 init_ralloc (); | |
1011 | |
1012 if (!*ptr) | |
1013 return r_alloc (ptr, size); | |
1014 if (!size) | |
1015 { | |
1016 r_alloc_free (ptr); | |
1017 return r_alloc (ptr, 0); | |
1018 } | |
1019 | |
1020 bloc = find_bloc (ptr); | |
1021 if (bloc == NIL_BLOC) | |
1022 abort (); | |
1023 | |
1024 if (size < bloc->size) | |
1025 { | |
1026 /* Wouldn't it be useful to actually resize the bloc here? */ | |
1027 /* I think so too, but not if it's too expensive... */ | |
1028 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | |
1029 && r_alloc_freeze_level == 0) | |
1030 { | |
1031 resize_bloc (bloc, MEM_ROUNDUP (size)); | |
1032 /* Never mind if this fails, just do nothing... */ | |
1033 /* It *should* be infallible! */ | |
1034 } | |
1035 } | |
1036 else if (size > bloc->size) | |
1037 { | |
1038 if (r_alloc_freeze_level) | |
1039 { | |
1040 bloc_ptr new_bloc; | |
1041 new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1042 if (new_bloc) | |
1043 { | |
1044 new_bloc->variable = ptr; | |
1045 *ptr = new_bloc->data; | |
1046 bloc->variable = (POINTER *) NIL; | |
1047 } | |
1048 else | |
1049 return NIL; | |
1050 } | |
1051 else | |
1052 { | |
1053 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1054 return NIL; | |
1055 } | |
1056 } | |
1057 return *ptr; | |
1058 } | |
1059 | |
1060 /* Disable relocations, after making room for at least SIZE bytes | |
1061 of non-relocatable heap if possible. The relocatable blocs are | |
1062 guaranteed to hold still until thawed, even if this means that | |
1063 malloc must return a null pointer. */ | |
1064 | |
1065 void r_alloc_freeze (long size); | |
1066 void | |
1067 r_alloc_freeze (long size) | |
1068 { | |
1069 if (! r_alloc_initialized) | |
1070 init_ralloc (); | |
1071 | |
1072 /* If already frozen, we can't make any more room, so don't try. */ | |
1073 if (r_alloc_freeze_level > 0) | |
1074 size = 0; | |
1075 /* If we can't get the amount requested, half is better than nothing. */ | |
1076 while (size > 0 && r_alloc_sbrk (size) == 0) | |
1077 size /= 2; | |
1078 ++r_alloc_freeze_level; | |
1079 if (size > 0) | |
1080 r_alloc_sbrk (-size); | |
1081 } | |
1082 | |
1083 void r_alloc_thaw (void); | |
1084 void | |
1085 r_alloc_thaw (void) | |
1086 { | |
1087 | |
1088 if (! r_alloc_initialized) | |
1089 init_ralloc (); | |
1090 | |
1091 if (--r_alloc_freeze_level < 0) | |
1092 abort (); | |
1093 | |
1094 /* This frees all unused blocs. It is not too inefficient, as the resize | |
1095 and bcopy is done only once. Afterwards, all unreferenced blocs are | |
1096 already shrunk to zero size. */ | |
1097 if (!r_alloc_freeze_level) | |
1098 { | |
1099 bloc_ptr *b = &first_bloc; | |
1100 while (*b) | |
1101 if (!(*b)->variable) | |
1102 free_bloc (*b); | |
1103 else | |
1104 b = &(*b)->next; | |
1105 } | |
1106 } | |
1107 | |
1108 | |
1109 /* The hook `malloc' uses for the function which gets more space | |
1110 from the system. */ | |
1111 #ifndef DOUG_LEA_MALLOC | |
1112 extern POINTER (*__morecore) (ptrdiff_t size); | |
1113 #endif | |
1114 | |
1115 /* Initialize various things for memory allocation. */ | |
1116 | |
1117 void | |
1118 init_ralloc (void) | |
1119 { | |
1120 if (r_alloc_initialized) | |
1121 return; | |
1122 | |
1123 r_alloc_initialized = 1; | |
1124 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1125 __morecore = | |
1126 #ifdef __GNUC__ | |
1127 (__typeof__ (__morecore)) | |
1128 #endif | |
1129 r_alloc_sbrk; | |
1130 | |
1131 first_heap = last_heap = &heap_base; | |
1132 first_heap->next = first_heap->prev = NIL_HEAP; | |
1133 first_heap->start = first_heap->bloc_start | |
1134 = virtual_break_value = break_value = (*real_morecore) (0); | |
1135 if (break_value == NIL) | |
1136 abort (); | |
1137 | |
1138 page_size = PAGE; | |
1139 extra_bytes = ROUNDUP (50000); | |
1140 | |
1141 #ifdef DOUG_LEA_MALLOC | |
1142 mallopt (M_TOP_PAD, 64 * 4096); | |
1143 #else | |
1144 #if 0 /* Hasn't been synched yet */ | |
1145 /* Give GNU malloc's morecore some hysteresis | |
1146 so that we move all the relocatable blocks much less often. */ | |
1147 __malloc_extra_blocks = 64; | |
1148 #endif | |
1149 #endif | |
1150 | |
1151 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
1152 | |
1153 /* The extra call to real_morecore guarantees that the end of the | |
1154 address space is a multiple of page_size, even if page_size is | |
1155 not really the page size of the system running the binary in | |
1156 which page_size is stored. This allows a binary to be built on a | |
1157 system with one page size and run on a system with a smaller page | |
1158 size. */ | |
1159 (*real_morecore) (first_heap->end - first_heap->start); | |
1160 | |
1161 /* Clear the rest of the last page; this memory is in our address space | |
1162 even though it is after the sbrk value. */ | |
1163 /* Doubly true, with the additional call that explicitly adds the | |
1164 rest of that page to the address space. */ | |
1165 memset (first_heap->start, 0, first_heap->end - first_heap->start); | |
1166 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
1167 use_relocatable_buffers = 1; | |
1168 } | |
1169 | |
1170 #if defined (emacs) && defined (DOUG_LEA_MALLOC) | |
1171 | |
1172 /* Reinitialize the morecore hook variables after restarting a dumped | |
1173 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | |
1174 void r_alloc_reinit (void); | |
1175 void | |
1176 r_alloc_reinit (void) | |
1177 { | |
1178 /* Only do this if the hook has been reset, so that we don't get an | |
1179 infinite loop, in case Emacs was linked statically. */ | |
1180 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) | |
1181 { | |
1182 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1183 __morecore = | |
1184 #ifdef __GNUC__ | |
1185 (__typeof__ (__morecore)) | |
1186 #endif | |
1187 r_alloc_sbrk; | |
1188 } | |
1189 } | |
1190 #if 0 | |
1191 #ifdef DEBUG | |
1192 | |
1193 void | |
1194 r_alloc_check (void) | |
1195 { | |
1196 int found = 0; | |
1197 heap_ptr h, ph = 0; | |
1198 bloc_ptr b, pb = 0; | |
1199 | |
1200 if (!r_alloc_initialized) | |
1201 return; | |
1202 | |
1203 assert (first_heap); | |
1204 assert (last_heap->end <= (POINTER) sbrk (0)); | |
1205 assert ((POINTER) first_heap < first_heap->start); | |
1206 assert (first_heap->start <= virtual_break_value); | |
1207 assert (virtual_break_value <= first_heap->end); | |
1208 | |
1209 for (h = first_heap; h; h = h->next) | |
1210 { | |
1211 assert (h->prev == ph); | |
1212 assert ((POINTER) ROUNDUP (h->end) == h->end); | |
1213 #if 0 /* ??? The code in ralloc.c does not really try to ensure | |
1214 the heap start has any sort of alignment. | |
1215 Perhaps it should. */ | |
1216 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); | |
1217 #endif | |
1218 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); | |
1219 assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1220 | |
1221 if (ph) | |
1222 { | |
1223 assert (ph->end < h->start); | |
1224 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1225 } | |
1226 | |
1227 if (h->bloc_start <= break_value && break_value <= h->end) | |
1228 found = 1; | |
1229 | |
1230 ph = h; | |
1231 } | |
1232 | |
1233 assert (found); | |
1234 assert (last_heap == ph); | |
1235 | |
1236 for (b = first_bloc; b; b = b->next) | |
1237 { | |
1238 assert (b->prev == pb); | |
1239 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
1240 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); | |
1241 | |
1242 ph = 0; | |
1243 for (h = first_heap; h; h = h->next) | |
1244 { | |
1245 if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1246 break; | |
1247 ph = h; | |
1248 } | |
1249 | |
1250 assert (h); | |
1251 | |
1252 if (pb && pb->data + pb->size != b->data) | |
1253 { | |
1254 assert (ph && b->data == h->bloc_start); | |
1255 while (ph) | |
1256 { | |
1257 if (ph->bloc_start <= pb->data | |
1258 && pb->data + pb->size <= ph->end) | |
1259 { | |
1260 assert (pb->data + pb->size + b->size > ph->end); | |
1261 break; | |
1262 } | |
1263 else | |
1264 { | |
1265 assert (ph->bloc_start + b->size > ph->end); | |
1266 } | |
1267 ph = ph->prev; | |
1268 } | |
1269 } | |
1270 pb = b; | |
1271 } | |
1272 | |
1273 assert (last_bloc == pb); | |
1274 | |
1275 if (last_bloc) | |
1276 assert (last_bloc->data + last_bloc->size == break_value); | |
1277 else | |
1278 assert (first_heap->bloc_start == break_value); | |
1279 } | |
1280 #endif /* DEBUG */ | |
1281 #endif /* 0 */ | |
1282 | |
1283 #endif | |
1284 | |
1285 #else /* HAVE_MMAP */ | |
1286 | |
1287 /* | |
1288 A relocating allocator built using the mmap(2) facility available | |
1289 in some OSes. Based on another version written by Paul Flinders, | |
1290 from which code (and comments) are snarfed. | |
1291 | |
1292 The OS should support mmap() with MAP_ANONYMOUS attribute, or have | |
1293 /dev/zero. It should support private memory mapping. | |
1294 | |
1295 Paul Flinders wrote a version which works well for systems that | |
1296 allow callers to specify (virtual) addresses to mmap(). | |
1297 Unfortunately, such a scheme doesn't work for certain systems like | |
1298 HP-UX that have a system-wide virtual->real address map, and | |
1299 consequently impose restrictions on the virtual address values | |
1300 permitted. | |
1301 | |
1302 NB: The mapping scheme in HP-UX is motivated by the inverted page | |
1303 table design in some HP processors. | |
1304 | |
1305 This alternate implementation allows for the addresses to be | |
1306 optionally chosen by the system. Fortunately, buffer allocation | |
1307 doesn't insist upon contiguous memory which Flinders' scheme | |
1308 provides, and this one doesn't. | |
1309 | |
1310 We don't really provide for hysteresis here, but add some metering | |
1311 to monitor how poorly the allocator actually works. See the | |
1312 documentation for `mmap-hysteresis'. | |
1313 | |
1314 This implementation actually cycles through the blocks allocated | |
1315 via mmap() and only sends it to free() if it wasn't one of them. | |
1316 Unfortunately, this is O(n) in the number of mmapped blocks. (Not | |
1317 really, as we have a hash table which tries to reduce the cost.) | |
1318 Also, this dereferences the pointer passed, so it would cause a | |
1319 segfault if garbage was passed to it. */ | |
1320 | |
1321 #include <fcntl.h> | |
1322 #include <sys/mman.h> | |
1323 #include <stdio.h> | |
1324 | |
1325 typedef void *VM_ADDR; /* VM addresses */ | |
1326 static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ | |
1327 | |
1328 /* Configuration for relocating allocator. */ | |
1329 | |
1330 /* #define MMAP_GENERATE_ADDRESSES */ | |
1331 /* Define this if you want Emacs to manage the address table. | |
1332 It is not recommended unless you have major problems with the | |
1333 default scheme, which allows the OS to pick addresses. */ | |
1334 | |
1335 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be | |
1336 discarded while computing the hash, as they're always zero. The | |
1337 default is appropriate for a page size of 4096 bytes. */ | |
1338 | |
1339 #define USELESS_LOWER_ADDRESS_BITS 12 | |
1340 | |
1341 | |
1342 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */ | |
1343 | |
1344 #define MHASH_PRIME 89 | |
1345 | |
1346 | |
1347 /* Whether we want to enable metering of some ralloc performance. | |
1348 This incurs a constant penalty for each mmap operation. */ | |
1349 | |
1350 #define MMAP_METERING | |
1351 | |
1352 | |
1353 /* Rename the following to protect against a some smartness elsewhere. | |
1354 We need access to the allocator used for non-mmap allocation | |
1355 elsewhere, in case we get passed a handle that we didn't allocate | |
1356 ourselves. Currently, this default allocator is also used to | |
1357 maintain local structures for relocatable blocks. */ | |
1358 | |
1359 #define UNDERLYING_MALLOC malloc | |
1360 #define UNDERLYING_FREE free | |
1361 #define UNDERLYING_REALLOC realloc | |
1362 | |
1363 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES | |
1364 is defined, and MAP_VARIABLE otherwise. Some losing systems don't | |
1365 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */ | |
1366 | |
1367 #ifdef MMAP_GENERATE_ADDRESSES | |
1368 # ifdef MAP_FIXED | |
1369 # define MAP_ADDRCHOICE_FLAG MAP_FIXED | |
1370 # endif | |
1371 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1372 # ifdef MAP_VARIABLE | |
1373 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE | |
1374 # endif | |
1375 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1376 | |
1377 /* Default case. */ | |
1378 #ifndef MAP_ADDRCHOICE_FLAG | |
1379 # define MAP_ADDRCHOICE_FLAG 0 | |
1380 #endif /* MAP_ADDRCHOICE_FLAG */ | |
1381 | |
1382 #ifdef MAP_ANONYMOUS | |
1383 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS) | |
1384 #else | |
1385 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG) | |
1386 #endif /* MAP_ANONYMOUS */ | |
1387 | |
1388 | |
1389 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For | |
1390 Emacs's sake, please do not make this local to malloc_init; on some | |
1391 machines, the dumping procedure makes all static variables | |
1392 read-only. On these machines, the word static is #defined to be | |
1393 the empty string, meaning that r_alloc_initialized becomes an | |
1394 automatic variable, and loses its value each time Emacs is started up. | |
1395 | |
1396 If we're using mmap this flag has three possible values | |
1397 0 - initial value | |
1398 1 - Normal value when running temacs. In this case buffers | |
1399 are allocated using malloc so that any data that they | |
1400 contain becomes part of the undumped executable. | |
1401 2 - Normal value when running emacs */ | |
1402 static int r_alloc_initialized = 0; | |
1403 | |
1404 /* (ptf): Macros for rounding. Note that rounding to any value is possible | |
1405 by changing the definition of PAGE. */ | |
1406 #define PAGE (getpagesize ()) | |
1407 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size) | |
1408 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size) | |
1409 | |
1410 | |
1411 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS | |
1412 points to a file descriptor opened on /dev/zero */ | |
1413 | |
1414 static int DEV_ZERO_FD = -1; | |
1415 | |
1416 | |
1417 /* We actually need a data structure that can be usefully structured | |
1418 based on the VM address, and allows an ~O(1) lookup on an arbitrary | |
1419 address, i.e. a hash table. Maybe the XEmacs hash table can be | |
1420 coaxed enough. At the moment, we use lookup on a hash table to | |
1421 decide whether to do an O(n) search on the malloced block list. | |
1422 Addresses are hashed to a bucket modulo MHASH_PRIME. */ | |
1423 | |
1424 | |
1425 /* We settle for a standard doubly-linked-list. The dynarr type isn't | |
1426 very amenable to deletion of items in the middle, so we conjure up | |
1427 yet another stupid datastructure. The structure is maintained as a | |
1428 ring, and the singleton ring has the sole element as its left and | |
1429 right neighbours. */ | |
1430 | |
1431 static void init_MHASH_table (void); /* Forward reference */ | |
1432 | |
1433 typedef struct alloc_dll | |
1434 { | |
1435 size_t size; /* #bytes currently in use */ | |
1436 size_t space_for; /* #bytes we really have */ | |
1437 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */ | |
1438 VM_ADDR vm_addr; /* VM address returned by mmap */ | |
1439 struct alloc_dll *left; /* Left link in circular doubly linked list */ | |
1440 struct alloc_dll *right; | |
1441 } *MMAP_HANDLE; | |
1442 | |
1443 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ | |
1444 static size_t page_size = 0; /* Size of VM pages */ | |
1445 static int mmap_hysteresis; /* Should be size_t, really. */ | |
1446 | |
1447 /* Get a new handle for a fresh block. */ | |
1448 static MMAP_HANDLE | |
1449 new_mmap_handle (size_t nsiz) | |
1450 { | |
1451 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll)); | |
1452 if ( h == 0) return 0; | |
1453 h->size = nsiz; | |
1454 if (mmap_start == 0) | |
1455 { | |
1456 init_MHASH_table (); | |
1457 mmap_start = h; mmap_start->left = h; mmap_start->right = h; | |
1458 } | |
1459 { | |
1460 MMAP_HANDLE prev = mmap_start->left; | |
1461 MMAP_HANDLE nex = mmap_start; | |
1462 | |
1463 /* Four pointers need fixing. */ | |
1464 h->right = nex; | |
1465 h->left = prev; | |
1466 prev->right = h; | |
1467 nex->left = h; | |
1468 } | |
1469 return h; | |
1470 } | |
1471 | |
1472 /* Find a handle given the aliased address using linear search. */ | |
1473 static MMAP_HANDLE | |
1474 find_mmap_handle_lsearch (POINTER *alias) | |
1475 { | |
1476 MMAP_HANDLE h = mmap_start; | |
1477 if (h == 0) return 0; | |
1478 do { | |
1479 if (h->aliased_address == alias && *alias == h->vm_addr) | |
1480 return h; | |
1481 h = h->right; | |
1482 } while( h != mmap_start ); | |
1483 return 0; /* Bogus alias passed. */ | |
1484 } | |
1485 | |
1486 /* Free a handle. */ | |
1487 static void | |
1488 free_mmap_handle (MMAP_HANDLE h) | |
1489 { | |
1490 MMAP_HANDLE prev = h->left; | |
1491 MMAP_HANDLE nex = h->right; | |
1492 if (prev == h || nex == h) /* In fact, this should be && */ | |
1493 { /* We're the singleton dll */ | |
1494 UNDERLYING_FREE( h ); /* Free the sole item */ | |
1495 mmap_start = 0; return; | |
1496 } | |
1497 else if (h == mmap_start) | |
1498 { | |
1499 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ | |
1500 } | |
1501 prev->right = nex; | |
1502 nex->left = prev; | |
1503 UNDERLYING_FREE( h ); | |
1504 } | |
1505 | |
1506 /* A simple hash table to speed up the inverted lookup of | |
1507 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a | |
1508 particular bucket. We invalidate a hash table entry during block | |
1509 deletion if the hash has cached the deleted block's address. */ | |
1510 | |
1511 /* Simple hash check. */ | |
1512 struct { | |
1513 int n_hits; /* How many addresses map to this? */ | |
1514 MMAP_HANDLE handle; /* What is the current handle? */ | |
1515 VM_ADDR addr; /* What is its VM address? */ | |
1516 } MHASH_HITS[ MHASH_PRIME ]; | |
1517 | |
1518 static void | |
1519 init_MHASH_table (void) | |
1520 { | |
1521 int i = 0; | |
1522 for (; i < MHASH_PRIME; i++) | |
1523 { | |
1524 MHASH_HITS[i].n_hits = 0; | |
1525 MHASH_HITS[i].addr = 0; | |
1526 MHASH_HITS[i].handle = 0; | |
1527 } | |
1528 } | |
1529 | |
1530 /* Compute the hash value for an address. */ | |
1531 static int | |
1532 MHASH (VM_ADDR addr) | |
1533 { | |
1534 #if (LONGBITS == 64) | |
1535 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1536 #else | |
1537 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1538 #endif | |
1539 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve | |
1540 when converted to signed ints */ | |
1541 return ((hval >= 0) ? hval : MHASH_PRIME + hval); | |
1542 } | |
1543 | |
1544 /* Add a VM address with its corresponding handle to the table. */ | |
1545 static void | |
1546 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h) | |
1547 { | |
1548 int kVal = MHASH( addr ); | |
1549 if (MHASH_HITS[kVal].n_hits++ == 0) | |
1550 { /* Only overwrite the table if there were no hits so far. */ | |
1551 MHASH_HITS[kVal].addr = addr; | |
1552 MHASH_HITS[kVal].handle = h; | |
1553 } | |
1554 } | |
1555 | |
1556 /* Delete a VM address entry from the hash table. */ | |
1557 static void | |
1558 MHASH_DEL (VM_ADDR addr) | |
1559 { | |
1560 int kVal = MHASH( addr ); | |
1561 MHASH_HITS[kVal].n_hits--; | |
1562 if (addr == MHASH_HITS[kVal].addr) | |
1563 { | |
1564 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */ | |
1565 MHASH_HITS[kVal].handle = 0; | |
1566 } | |
1567 } | |
1568 | |
1569 /* End of hash buckets */ | |
1570 | |
1571 /* Metering malloc performance. */ | |
1572 #ifdef MMAP_METERING | |
1573 /* If we're metering, we introduce some extra symbols to aid the noble | |
1574 cause of bloating XEmacs core size. */ | |
1575 | |
1576 static Lisp_Object Qmmap_times_mapped; | |
1577 static Lisp_Object Qmmap_pages_mapped; | |
1578 static Lisp_Object Qmmap_times_unmapped; | |
1579 static Lisp_Object Qmmap_times_remapped; | |
1580 static Lisp_Object Qmmap_didnt_copy; | |
1581 static Lisp_Object Qmmap_pages_copied; | |
1582 static Lisp_Object Qmmap_average_bumpval; | |
1583 static Lisp_Object Qmmap_wastage; | |
1584 static Lisp_Object Qmmap_live_pages; | |
1585 static Lisp_Object Qmmap_addr_looked_up; | |
1586 static Lisp_Object Qmmap_hash_worked; | |
1587 static Lisp_Object Qmmap_addrlist_size; | |
1588 | |
1589 #define M_Map 0 /* How many times allocated? */ | |
1590 #define M_Pages_Map 1 /* How many pages allocated? */ | |
1591 #define M_Unmap 2 /* How many times freed? */ | |
1592 #define M_Remap 3 /* How many times increased in size? */ | |
1593 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */ | |
1594 #define M_Copy_Pages 5 /* Total # pages copied */ | |
1595 #define M_Average_Bumpval 6 /* Average bump value */ | |
1596 #define M_Wastage 7 /* Remaining (unused space) */ | |
1597 #define M_Live_Pages 8 /* #live pages */ | |
1598 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */ | |
1599 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */ | |
1600 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */ | |
1601 | |
1602 #define N_Meterables 12 /* Total number of meterables */ | |
1603 #define MEMMETER(x) {x;} | |
1604 #define MVAL(x) (meter[x]) | |
1605 #define MLVAL(x) (make_int (meter[x])) | |
1606 static int meter[N_Meterables]; | |
1607 | |
1608 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /* | |
1609 Return some information about mmap-based allocator. | |
1610 | |
1611 mmap-times-mapped: number of times r_alloc was called. | |
1612 mmap-pages-mapped: number of pages mapped by r_alloc calls only. | |
1613 mmap-times-unmapped: number of times r_free was called. | |
1614 mmap-times-remapped: number of times r_re_alloc was called. | |
1615 mmap-didnt-copy: number of times re-alloc did NOT have to move the block. | |
1616 mmap-pages-copied: total number of pages copied. | |
1617 mmap-average-bumpval: average increase in size demanded to re-alloc. | |
1618 mmap-wastage: total number of bytes allocated, but not currently in use. | |
1619 mmap-live-pages: total number of pages live. | |
1620 mmap-addr-looked-up: total number of times needed to check if addr is in block. | |
1621 mmap-hash-worked: total number of times the simple hash check worked. | |
1622 mmap-addrlist-size: number of entries in address picking list. | |
1623 */ | |
1624 ()) | |
1625 { | |
1626 Lisp_Object result = Qnil; | |
1627 | |
1628 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result); | |
1629 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result); | |
1630 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result); | |
1631 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result); | |
1632 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result); | |
1633 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result); | |
1634 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result); | |
1635 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result); | |
1636 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result); | |
1637 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result); | |
1638 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result); | |
1639 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result); | |
1640 | |
1641 return result; | |
1642 } | |
1643 | |
1644 #else /* !MMAP_METERING */ | |
1645 | |
1646 #define MEMMETER(x) | |
1647 #define MVAL(x) | |
1648 | |
1649 #endif /* MMAP_METERING */ | |
1650 | |
1651 static MMAP_HANDLE | |
1652 find_mmap_handle (POINTER *alias) | |
1653 { | |
1654 int kval = MHASH( *alias ); | |
1655 MEMMETER( MVAL(M_Address_Lookup)++ ) | |
1656 switch( MHASH_HITS[kval].n_hits) | |
1657 { | |
1658 case 0: | |
1659 MEMMETER( MVAL( M_Hash_Worked )++ ) | |
1660 return 0; | |
1661 | |
1662 case 1: | |
1663 if (*alias == MHASH_HITS[kval].addr) | |
1664 { | |
1665 MEMMETER( MVAL( M_Hash_Worked) ++ ); | |
1666 return MHASH_HITS[kval].handle; | |
1667 } | |
1668 /* FALL THROUGH */ | |
1669 default: | |
1670 return find_mmap_handle_lsearch( alias ); | |
1671 } /* switch */ | |
1672 } | |
1673 | |
1674 /* | |
1675 Some kernels don't like being asked to pick addresses for mapping | |
1676 themselves---IRIX is known to become extremely slow if mmap is | |
1677 passed a ZERO as the first argument. In such cases, we use an | |
1678 address map which is managed local to the XEmacs process. The | |
1679 address map maintains an ordered linked list of (address, size, | |
1680 occupancy) triples ordered by the absolute address. Initially, a | |
1681 large address area is marked as being empty. The address picking | |
1682 scheme takes bites off the first block which is still empty and | |
1683 large enough. If mmap with the specified address fails, it is | |
1684 marked unavailable and not attempted thereafter. The scheme will | |
1685 keep fragmenting the large empty block until it finds an address | |
1686 which can be successfully mmapped, or until there are no free | |
1687 blocks of the given size left. | |
1688 | |
1689 Note that this scheme, given its first-fit strategy, is prone to | |
1690 fragmentation of the first part of memory earmarked for this | |
1691 purpose. [ACP Vol I]. We can't use the workaround of using a | |
1692 randomized first fit because we don't want to presume too much | |
1693 about the memory map. Instead, we try to coalesce empty or | |
1694 unavailable blocks at any available opportunity. */ | |
1695 | |
1696 /* Initialization procedure for address picking scheme */ | |
1697 static void Addr_Block_initialize(void); | |
1698 | |
1699 /* Get a suitable VM_ADDR via mmap */ | |
1700 static VM_ADDR New_Addr_Block( SIZE sz ); | |
1701 | |
1702 /* Free a VM_ADDR allocated via New_Addr_Block */ | |
1703 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); | |
1704 | |
1705 #ifdef MMAP_GENERATE_ADDRESSES | |
1706 /* Implementation of the three calls for address picking when XEmacs is incharge */ | |
1707 | |
1708 /* The enum denotes the status of the following block. */ | |
1709 typedef enum { empty = 0, occupied, unavailable } addr_status; | |
1710 | |
1711 typedef struct addr_chain | |
1712 { | |
1713 POINTER addr; | |
1714 SIZE sz; | |
1715 addr_status flag; | |
1716 struct addr_chain *next; | |
1717 } ADDRESS_BLOCK, *ADDRESS_CHAIN; | |
1718 /* NB: empty and unavailable blocks are concatenated. */ | |
1719 | |
1720 static ADDRESS_CHAIN addr_chain = 0; | |
1721 /* Start off the address block chain with a humongous address block | |
1722 which is empty to start with. Note that addr_chain is invariant | |
1723 WRT the addition/deletion of address blocks because of the assert | |
1724 in Coalesce() and the strict ordering of blocks by their address | |
1725 */ | |
1726 static void Addr_Block_initialize() | |
1727 { | |
1728 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1729 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | |
1730 addr_chain->next = 0; /* Last block in chain */ | |
1731 addr_chain->sz = 0x0c000000; /* Size */ | |
1732 addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS); | |
1733 addr_chain->flag = empty; | |
1734 } | |
1735 | |
1736 /* Coalesce address blocks if they are contiguous. Only empty and | |
1737 unavailable slots are coalesced. */ | |
1738 static void Coalesce_Addr_Blocks() | |
1739 { | |
1740 ADDRESS_CHAIN p; | |
1741 for (p = addr_chain; p; p = p->next) | |
1742 { | |
1743 while (p->next && p->flag == p->next->flag) | |
1744 { | |
1745 ADDRESS_CHAIN np; | |
1746 np = p->next; | |
1747 | |
1748 if (p->flag == occupied) break; /* No cigar */ | |
1749 | |
1750 /* Check if the addresses are contiguous. */ | |
1751 if (p->addr + p->sz != np->addr) break; | |
1752 | |
1753 MEMMETER( MVAL( M_Addrlist_Size )--) | |
1754 /* We can coalesce these two. */ | |
1755 p->sz += np->sz; | |
1756 p->next = np->next; | |
1757 assert( np != addr_chain ); /* We're not freeing the head of the list. */ | |
1758 UNDERLYING_FREE( np ); | |
1759 } | |
1760 } /* for all p */ | |
1761 } | |
1762 | |
1763 /* Get an empty address block of specified size. */ | |
1764 static VM_ADDR New_Addr_Block( SIZE sz ) | |
1765 { | |
1766 ADDRESS_CHAIN p = addr_chain; | |
1767 VM_ADDR new_addr = VM_FAILURE_ADDR; | |
1768 for (; p; p = p->next) | |
1769 { | |
1770 if (p->flag == empty && p->sz > sz) | |
1771 { | |
1772 /* Create a new entry following p which is empty. */ | |
1773 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) ); | |
1774 remainder->next = p->next; | |
1775 remainder->flag = empty; | |
1776 remainder->addr = p->addr + sz; | |
1777 remainder->sz = p->sz - sz; | |
1778 | |
1779 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1780 | |
1781 /* Now make p become an occupied block with the appropriate size */ | |
1782 p->next = remainder; | |
1783 p->sz = sz; | |
1784 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, | |
1785 MAP_FLAGS, DEV_ZERO_FD, 0 ); | |
1786 if (new_addr == VM_FAILURE_ADDR) | |
1787 { | |
1788 p->flag = unavailable; | |
1789 continue; | |
1790 } | |
1791 p->flag = occupied; | |
1792 break; | |
1793 } | |
1794 } | |
1795 Coalesce_Addr_Blocks(); | |
1796 return new_addr; | |
1797 } | |
1798 | |
1799 /* Free an address block. We mark the block as being empty, and attempt to | |
1800 do any coalescing that may have resulted from this. */ | |
1801 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | |
1802 { | |
1803 ADDRESS_CHAIN p = addr_chain; | |
1804 for (; p; p = p->next ) | |
1805 { | |
1806 if (p->addr == addr) | |
1807 { | |
1808 if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */ | |
1809 munmap( (VM_ADDR) p->addr, p->sz ); | |
1810 p->flag = empty; | |
1811 break; | |
1812 } | |
1813 } | |
1814 if (!p) abort(); /* Can't happen... we've got a block to free which is not in | |
1815 the address list. */ | |
1816 Coalesce_Addr_Blocks(); | |
1817 } | |
1818 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1819 /* This is an alternate (simpler) implementation in cases where the | |
1820 address is picked by the kernel. */ | |
1821 | |
1822 static void Addr_Block_initialize(void) | |
1823 { | |
1824 /* Nothing. */ | |
1825 } | |
1826 | |
1827 static VM_ADDR New_Addr_Block( SIZE sz ) | |
1828 { | |
1829 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | |
1830 DEV_ZERO_FD, 0 ); | |
1831 } | |
1832 | |
1833 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | |
1834 { | |
1835 munmap ((caddr_t) addr, sz ); | |
1836 } | |
1837 | |
1838 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1839 | |
1840 | |
1841 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | |
1842 | |
1843 /* | |
1844 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start | |
1845 address aliased to the first parameter. | |
1846 */ | |
1847 | |
1848 POINTER r_alloc (POINTER *ptr, SIZE size); | |
1849 POINTER | |
1850 r_alloc (POINTER *ptr, SIZE size) | |
1851 { | |
1852 MMAP_HANDLE mh; | |
1853 | |
1854 switch(r_alloc_initialized) | |
1855 { | |
1856 case 0: | |
1857 abort(); | |
1858 case 1: | |
1859 *ptr = (POINTER) UNDERLYING_MALLOC(size); | |
1860 break; | |
1861 default: | |
1862 mh = new_mmap_handle( size ); | |
1863 if (mh) | |
1864 { | |
1865 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | |
1866 SIZE mmapped_size = ROUNDUP( size + hysteresis ); | |
1867 MEMMETER( MVAL(M_Map)++ ) | |
1868 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) | |
1869 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) | |
1870 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) | |
1871 mh->vm_addr = New_Addr_Block( mmapped_size ); | |
1872 if (mh->vm_addr == VM_FAILURE_ADDR) { | |
1873 free_mmap_handle( mh ); /* Free the loser */ | |
1874 *ptr = 0; | |
1875 return 0; /* ralloc failed due to mmap() failure. */ | |
1876 } | |
1877 MHASH_ADD( mh->vm_addr, mh ); | |
1878 mh->space_for = mmapped_size; | |
1879 mh->aliased_address = ptr; | |
1880 *ptr = (POINTER) mh->vm_addr; | |
1881 } | |
1882 else | |
1883 *ptr = 0; /* Malloc of block failed */ | |
1884 break; | |
1885 } | |
1886 return *ptr; | |
1887 } | |
1888 | |
1889 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
1890 Store 0 in *PTR to show there's no block allocated. */ | |
1891 | |
1892 void r_alloc_free (POINTER *ptr); | |
1893 void | |
1894 r_alloc_free (POINTER *ptr) | |
1895 { | |
1896 switch( r_alloc_initialized) { | |
1897 case 0: | |
1898 abort(); | |
1899 | |
1900 case 1: | |
1901 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ | |
1902 break; | |
1903 | |
1904 default: | |
1905 { | |
1906 MMAP_HANDLE dead_handle = find_mmap_handle( ptr ); | |
1907 /* Check if we've got it. */ | |
1908 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */ | |
1909 { | |
1910 UNDERLYING_FREE( *ptr ); | |
1911 } | |
1912 else | |
1913 { | |
1914 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) ) | |
1915 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size )) | |
1916 MEMMETER(MVAL(M_Unmap)++) | |
1917 MHASH_DEL( dead_handle->vm_addr ); | |
1918 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for ); | |
1919 free_mmap_handle (dead_handle); | |
1920 } | |
1921 } | |
1922 break; | |
1923 } /* r_alloc_initialized */ | |
1924 *ptr = 0; /* Zap the pointer's contents. */ | |
1925 } | |
1926 | |
1927 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
1928 | |
1929 Change *PTR to reflect the new bloc, and return this value. | |
1930 | |
1931 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1932 return zero. */ | |
1933 | |
1934 POINTER r_re_alloc (POINTER *ptr, SIZE sz); | |
1935 POINTER | |
1936 r_re_alloc (POINTER *ptr, SIZE sz) | |
1937 { | |
1938 if (r_alloc_initialized == 0) | |
1939 { | |
1940 abort (); | |
1941 return 0; /* suppress compiler warning */ | |
1942 } | |
1943 else if (r_alloc_initialized == 1) | |
1944 { | |
1945 POINTER tmp = (POINTER) realloc(*ptr, sz); | |
1946 if (tmp) | |
1947 *ptr = tmp; | |
1948 return tmp; | |
1949 } | |
1950 else | |
1951 { | |
1952 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | |
1953 SIZE actual_sz = ROUNDUP( sz + hysteresis ); | |
1954 MMAP_HANDLE h = find_mmap_handle( ptr ); | |
1955 VM_ADDR new_vm_addr; | |
1956 | |
1957 if ( h == 0 ) /* Was allocated using malloc. */ | |
1958 { | |
1959 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz); | |
1960 if (tmp) | |
1961 *ptr = tmp; | |
1962 return tmp; | |
1963 } | |
1964 | |
1965 MEMMETER( | |
1966 MVAL(M_Average_Bumpval) = | |
1967 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size)) | |
1968 / (double) (MVAL(M_Remap) + 1)) | |
1969 MEMMETER(MVAL(M_Remap)++) | |
1970 if (h->space_for > sz) /* We've got some more room */ | |
1971 { /* Also, if a shrinkage was asked for. */ | |
1972 MEMMETER( MVAL(M_Didnt_Copy)++ ) | |
1973 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) | |
1974 /* We're pretty dumb at handling shrinkage. We should check for | |
1975 a larger gap than the standard hysteresis allowable, and if so, | |
1976 shrink the number of pages. Right now, we simply reset the size | |
1977 component and return. */ | |
1978 h->size = sz; | |
1979 return *ptr; | |
1980 } | |
1981 | |
1982 new_vm_addr = New_Addr_Block( actual_sz ); | |
1983 if (new_vm_addr == VM_FAILURE_ADDR) | |
1984 {/* Failed to realloc. */ | |
1985 /* *ptr = 0; */ | |
1986 return 0; | |
1987 } | |
1988 | |
1989 MHASH_ADD( new_vm_addr, h ); | |
1990 /* We got a block OK: now we should move the old contents to the | |
1991 new address. We use the old size of this block. */ | |
1992 memmove(new_vm_addr, h->vm_addr, h->size); | |
1993 MHASH_DEL( h->vm_addr ); | |
1994 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */ | |
1995 | |
1996 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) ) | |
1997 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size)) | |
1998 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size)) | |
1999 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size)) | |
2000 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) ) | |
2001 | |
2002 /* Update block datastructure. */ | |
2003 h->space_for = actual_sz; /* New total space */ | |
2004 h->size = sz; /* New (requested) size */ | |
2005 h->vm_addr = new_vm_addr; /* New VM start address */ | |
2006 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ | |
2007 *ptr = (POINTER) h->vm_addr; | |
2008 return *ptr; | |
2009 } | |
2010 } | |
2011 | |
2012 | |
2013 /* Initialize various things for memory allocation. | |
2014 */ | |
2015 void | |
2016 init_ralloc (void) | |
2017 { | |
2018 int i = 0; | |
2019 if (r_alloc_initialized > 1) | |
2020 return; /* used to return 1 */ | |
2021 | |
2022 if (++r_alloc_initialized == 1) | |
2023 return; /* used to return 1 */ | |
2024 | |
2025 Addr_Block_initialize(); /* Initialize the address picker, if required. */ | |
2026 page_size = PAGE; | |
2027 assert( page_size > 0 ); /* getpagesize() bogosity check. */ | |
2028 | |
2029 #ifndef MAP_ANONYMOUS | |
2030 DEV_ZERO_FD = open( "/dev/zero", O_RDWR ); | |
2031 if (DEV_ZERO_FD < 0) | |
2032 /* Failed. Perhaps we should abort here? */ | |
2033 return; /* used to return 0 */ | |
2034 #endif | |
2035 | |
2036 #ifdef MMAP_METERING | |
2037 for(i = 0; i < N_Meterables; i++ ) | |
2038 { | |
2039 meter[i] = 0; | |
2040 } | |
2041 #endif /* MMAP_METERING */ | |
2042 } | |
2043 | |
2044 void | |
2045 syms_of_ralloc (void) | |
2046 { | |
2047 #ifdef MMAP_METERING | |
2048 defsymbol (&Qmmap_times_mapped, "mmap-times-mapped"); | |
2049 defsymbol (&Qmmap_pages_mapped, "mmap-pages-mapped"); | |
2050 defsymbol (&Qmmap_times_unmapped, "mmap-times-unmapped"); | |
2051 defsymbol (&Qmmap_times_remapped, "mmap-times-remapped"); | |
2052 defsymbol (&Qmmap_didnt_copy, "mmap-didnt-copy"); | |
2053 defsymbol (&Qmmap_pages_copied, "mmap-pages-copied"); | |
2054 defsymbol (&Qmmap_average_bumpval, "mmap-average-bumpval"); | |
2055 defsymbol (&Qmmap_wastage, "mmap-wastage"); | |
2056 defsymbol (&Qmmap_live_pages, "mmap-live-pages"); | |
2057 defsymbol (&Qmmap_addr_looked_up, "mmap-addr-looked-up"); | |
2058 defsymbol (&Qmmap_hash_worked, "mmap-hash-worked"); | |
2059 defsymbol (&Qmmap_addrlist_size, "mmap-addrlist-size"); | |
2060 DEFSUBR (Fmmap_allocator_status); | |
2061 #endif /* MMAP_METERING */ | |
2062 } | |
2063 | |
2064 void | |
2065 vars_of_ralloc (void) | |
2066 { | |
2067 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* | |
2068 Extra room left at the end of an allocated arena, | |
2069 so that a re-alloc requesting extra space smaller than this | |
2070 does not actually cause a new arena to be allocated. | |
2071 | |
2072 A negative value is considered equal to zero. This is the | |
2073 minimum amount of space guaranteed to be left at the end of | |
2074 the arena. Because allocation happens in multiples of the OS | |
2075 page size, it is possible for more space to be left unused. | |
2076 */ ); | |
2077 mmap_hysteresis = 0; | |
2078 } | |
2079 | |
2080 #endif /* HAVE_MMAP */ |