Mercurial > hg > xemacs-beta
comparison src/ralloc.c @ 0:376386a54a3c r19-14
Import from CVS: tag r19-14
author | cvs |
---|---|
date | Mon, 13 Aug 2007 08:45:50 +0200 |
parents | |
children | ac2d302a0011 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:376386a54a3c |
---|---|
1 /* Block-relocating memory allocator. | |
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
3 | |
4 This file is part of XEmacs. | |
5 | |
6 XEmacs is free software; you can redistribute it and/or modify it | |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 2, or (at your option) any | |
9 later version. | |
10 | |
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GNU Emacs; see the file COPYING. If not, write to | |
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 Boston, MA 02111-1307, USA. */ | |
20 | |
21 /* NOTES: | |
22 | |
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | |
24 rather than all of them. This means allowing for a possible | |
25 hole between the first bloc and the end of malloc storage. */ | |
26 | |
27 #ifdef emacs | |
28 | |
29 #include <config.h> | |
30 #include "lisp.h" /* Needed for VALBITS. */ | |
31 | |
32 #undef NULL | |
33 | |
34 /* The important properties of this type are that 1) it's a pointer, and | |
35 2) arithmetic on it should work as if the size of the object pointed | |
36 to has a size of 1. */ | |
37 #if 0 /* Arithmetic on void* is a GCC extension. */ | |
38 #ifdef __STDC__ | |
39 typedef void *POINTER; | |
40 #else | |
41 typedef unsigned char *POINTER; | |
42 #endif | |
43 #endif /* 0 */ | |
44 | |
45 /* Unconditionally use unsigned char * for this. */ | |
46 typedef unsigned char *POINTER; | |
47 | |
48 typedef unsigned long SIZE; | |
49 | |
50 #include "getpagesize.h" | |
51 | |
52 #include <string.h> | |
53 | |
54 #else /* Not emacs. */ | |
55 | |
56 #include <stddef.h> | |
57 | |
58 typedef size_t SIZE; | |
59 typedef void *POINTER; | |
60 | |
61 #include <unistd.h> | |
62 #include <malloc.h> | |
63 #include <string.h> | |
64 | |
65 #endif /* emacs. */ | |
66 | |
67 #define safe_bcopy(x, y, z) memmove (y, x, z) | |
68 | |
69 #define NIL ((POINTER) 0) | |
70 | |
71 | |
72 #ifndef HAVE_MMAP | |
73 | |
74 /* A flag to indicate whether we have initialized ralloc yet. For | |
75 Emacs's sake, please do not make this local to malloc_init; on some | |
76 machines, the dumping procedure makes all static variables | |
77 read-only. On these machines, the word static is #defined to be | |
78 the empty string, meaning that r_alloc_initialized becomes an | |
79 automatic variable, and loses its value each time Emacs is started up. */ | |
80 static int r_alloc_initialized = 0; | |
81 | |
82 | |
83 /* Declarations for working with the malloc, ralloc, and system breaks. */ | |
84 | |
85 /* Function to set the real break value. */ | |
86 static POINTER (*real_morecore) (); | |
87 | |
88 /* The break value, as seen by malloc (). */ | |
89 static POINTER virtual_break_value; | |
90 | |
91 /* The break value, viewed by the relocatable blocs. */ | |
92 static POINTER break_value; | |
93 | |
94 /* The REAL (i.e., page aligned) break value of the process. */ | |
95 static POINTER page_break_value; | |
96 | |
97 /* This is the size of a page. We round memory requests to this boundary. */ | |
98 static int page_size; | |
99 | |
100 /* Whenever we get memory from the system, get this many extra bytes. This | |
101 must be a multiple of page_size. */ | |
102 static int extra_bytes; | |
103 | |
104 /* Macros for rounding. Note that rounding to any value is possible | |
105 by changing the definition of PAGE. */ | |
106 #define PAGE (getpagesize ()) | |
107 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | |
108 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
109 & ~(page_size - 1)) | |
110 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | |
111 | |
112 /* Functions to get and return memory from the system. */ | |
113 | |
114 /* Obtain SIZE bytes of space. If enough space is not presently available | |
115 in our process reserve, (i.e., (page_break_value - break_value)), | |
116 this means getting more page-aligned space from the system. | |
117 | |
118 Return non-zero if all went well, or zero if we couldn't allocate | |
119 the memory. */ | |
120 static int | |
121 obtain (SIZE size) | |
122 { | |
123 SIZE already_available = page_break_value - break_value; | |
124 | |
125 if (already_available < size) | |
126 { | |
127 SIZE get = ROUNDUP (size - already_available); | |
128 /* Get some extra, so we can come here less often. */ | |
129 get += extra_bytes; | |
130 | |
131 if ((*real_morecore) (get) == 0) | |
132 return 0; | |
133 | |
134 page_break_value += get; | |
135 } | |
136 | |
137 break_value += size; | |
138 | |
139 return 1; | |
140 } | |
141 | |
142 /* Obtain SIZE bytes of space and return a pointer to the new area. | |
143 If we could not allocate the space, return zero. */ | |
144 | |
145 static POINTER | |
146 get_more_space (SIZE size) | |
147 { | |
148 POINTER ptr = break_value; | |
149 if (obtain (size)) | |
150 return ptr; | |
151 else | |
152 return 0; | |
153 } | |
154 | |
155 /* Note that SIZE bytes of space have been relinquished by the process. | |
156 If SIZE is more than a page, return the space to the system. */ | |
157 | |
158 static void | |
159 relinquish (SIZE size) | |
160 { | |
161 POINTER new_page_break; | |
162 int excess; | |
163 | |
164 break_value -= size; | |
165 new_page_break = (POINTER) ROUNDUP (break_value); | |
166 excess = (char *) page_break_value - (char *) new_page_break; | |
167 | |
168 if (excess > extra_bytes * 2) | |
169 { | |
170 /* Keep extra_bytes worth of empty space. | |
171 And don't free anything unless we can free at least extra_bytes. */ | |
172 if ((*real_morecore) (extra_bytes - excess) == 0) | |
173 abort (); | |
174 | |
175 page_break_value += extra_bytes - excess; | |
176 } | |
177 | |
178 /* Zero the space from the end of the "official" break to the actual | |
179 break, so that bugs show up faster. */ | |
180 memset (break_value, 0, ((char *) page_break_value - (char *) break_value)); | |
181 } | |
182 | |
183 /* The meat - allocating, freeing, and relocating blocs. */ | |
184 | |
185 /* These structures are allocated in the malloc arena. | |
186 The linked list is kept in order of increasing '.data' members. | |
187 The data blocks abut each other; if b->next is non-nil, then | |
188 b->data + b->size == b->next->data. */ | |
189 typedef struct bp | |
190 { | |
191 struct bp *next; | |
192 struct bp *prev; | |
193 POINTER *variable; | |
194 POINTER data; | |
195 SIZE size; | |
196 } *bloc_ptr; | |
197 | |
198 #define NIL_BLOC ((bloc_ptr) 0) | |
199 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
200 | |
201 /* Head and tail of the list of relocatable blocs. */ | |
202 static bloc_ptr first_bloc, last_bloc; | |
203 | |
204 /* Find the bloc referenced by the address in PTR. Returns a pointer | |
205 to that block. */ | |
206 | |
207 static bloc_ptr | |
208 find_bloc (POINTER *ptr) | |
209 { | |
210 bloc_ptr p = first_bloc; | |
211 | |
212 while (p != NIL_BLOC) | |
213 { | |
214 if (p->variable == ptr && p->data == *ptr) | |
215 return p; | |
216 | |
217 p = p->next; | |
218 } | |
219 | |
220 return p; | |
221 } | |
222 | |
223 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
224 Returns a pointer to the new bloc, or zero if we couldn't allocate | |
225 memory for the new block. */ | |
226 | |
227 static bloc_ptr | |
228 get_bloc (SIZE size) | |
229 { | |
230 bloc_ptr new_bloc; | |
231 | |
232 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
233 || ! (new_bloc->data = get_more_space (size))) | |
234 { | |
235 if (new_bloc) | |
236 free (new_bloc); | |
237 | |
238 return 0; | |
239 } | |
240 | |
241 new_bloc->size = size; | |
242 new_bloc->next = NIL_BLOC; | |
243 new_bloc->variable = (POINTER *) NIL; | |
244 | |
245 if (first_bloc) | |
246 { | |
247 new_bloc->prev = last_bloc; | |
248 last_bloc->next = new_bloc; | |
249 last_bloc = new_bloc; | |
250 } | |
251 else | |
252 { | |
253 first_bloc = last_bloc = new_bloc; | |
254 new_bloc->prev = NIL_BLOC; | |
255 } | |
256 | |
257 return new_bloc; | |
258 } | |
259 | |
260 /* Relocate all blocs from BLOC on upward in the list to the zone | |
261 indicated by ADDRESS. Direction of relocation is determined by | |
262 the position of ADDRESS relative to BLOC->data. | |
263 | |
264 If BLOC is NIL_BLOC, nothing is done. | |
265 | |
266 Note that ordering of blocs is not affected by this function. */ | |
267 | |
268 static void | |
269 relocate_some_blocs (bloc_ptr bloc, POINTER address) | |
270 { | |
271 if (bloc != NIL_BLOC) | |
272 { | |
273 SIZE offset = address - bloc->data; | |
274 SIZE data_size = 0; | |
275 bloc_ptr b; | |
276 | |
277 for (b = bloc; b != NIL_BLOC; b = b->next) | |
278 { | |
279 data_size += b->size; | |
280 b->data += offset; | |
281 *b->variable = b->data; | |
282 } | |
283 | |
284 memmove (address, address - offset, data_size); | |
285 } | |
286 } | |
287 | |
288 /* Free BLOC from the chain of blocs, relocating any blocs above it | |
289 and returning BLOC->size bytes to the free area. */ | |
290 | |
291 static void | |
292 free_bloc (bloc_ptr bloc) | |
293 { | |
294 if (bloc == first_bloc && bloc == last_bloc) | |
295 { | |
296 first_bloc = last_bloc = NIL_BLOC; | |
297 } | |
298 else if (bloc == last_bloc) | |
299 { | |
300 last_bloc = bloc->prev; | |
301 last_bloc->next = NIL_BLOC; | |
302 } | |
303 else if (bloc == first_bloc) | |
304 { | |
305 first_bloc = bloc->next; | |
306 first_bloc->prev = NIL_BLOC; | |
307 } | |
308 else | |
309 { | |
310 bloc->next->prev = bloc->prev; | |
311 bloc->prev->next = bloc->next; | |
312 } | |
313 | |
314 relocate_some_blocs (bloc->next, bloc->data); | |
315 relinquish (bloc->size); | |
316 free (bloc); | |
317 } | |
318 | |
319 /* Interface routines. */ | |
320 | |
321 static int use_relocatable_buffers; | |
322 | |
323 /* Obtain SIZE bytes of storage from the free pool, or the system, as | |
324 necessary. If relocatable blocs are in use, this means relocating | |
325 them. This function gets plugged into the GNU malloc's __morecore | |
326 hook. | |
327 | |
328 We provide hysteresis, never relocating by less than extra_bytes. | |
329 | |
330 If we're out of memory, we should return zero, to imitate the other | |
331 __morecore hook values - in particular, __default_morecore in the | |
332 GNU malloc package. */ | |
333 | |
334 POINTER | |
335 r_alloc_sbrk (long size) | |
336 { | |
337 /* This is the first address not currently available for the heap. */ | |
338 POINTER top; | |
339 /* Amount of empty space below that. */ | |
340 /* It is not correct to use SIZE here, because that is usually unsigned. | |
341 ptrdiff_t would be okay, but is not always available. | |
342 `long' will work in all cases, in practice. */ | |
343 long already_available; | |
344 POINTER ptr; | |
345 | |
346 if (! use_relocatable_buffers) | |
347 return (*real_morecore) (size); | |
348 | |
349 top = first_bloc ? first_bloc->data : page_break_value; | |
350 already_available = (char *) top - (char *) virtual_break_value; | |
351 | |
352 /* Do we not have enough gap already? */ | |
353 if (size > 0 && already_available < size) | |
354 { | |
355 /* Get what we need, plus some extra so we can come here less often. */ | |
356 SIZE get = size - already_available + extra_bytes; | |
357 | |
358 if (! obtain (get)) | |
359 return 0; | |
360 | |
361 if (first_bloc) | |
362 relocate_some_blocs (first_bloc, first_bloc->data + get); | |
363 | |
364 /* Zero out the space we just allocated, to help catch bugs | |
365 quickly. */ | |
366 memset (virtual_break_value, 0, get); | |
367 } | |
368 /* Can we keep extra_bytes of gap while freeing at least extra_bytes? */ | |
369 else if (size < 0 && already_available - size > 2 * extra_bytes) | |
370 { | |
371 /* Ok, do so. This is how many to free. */ | |
372 SIZE give_back = already_available - size - extra_bytes; | |
373 | |
374 if (first_bloc) | |
375 relocate_some_blocs (first_bloc, first_bloc->data - give_back); | |
376 relinquish (give_back); | |
377 } | |
378 | |
379 ptr = virtual_break_value; | |
380 virtual_break_value += size; | |
381 | |
382 return ptr; | |
383 } | |
384 | |
385 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
386 the data is returned in *PTR. PTR is thus the address of some variable | |
387 which will use the data area. | |
388 | |
389 If we can't allocate the necessary memory, set *PTR to zero, and | |
390 return zero. */ | |
391 | |
392 POINTER | |
393 r_alloc (POINTER *ptr, SIZE size) | |
394 { | |
395 bloc_ptr new_bloc; | |
396 | |
397 if (! r_alloc_initialized) | |
398 init_ralloc (); | |
399 | |
400 new_bloc = get_bloc (size); | |
401 if (new_bloc) | |
402 { | |
403 new_bloc->variable = ptr; | |
404 *ptr = new_bloc->data; | |
405 } | |
406 else | |
407 *ptr = 0; | |
408 | |
409 return *ptr; | |
410 } | |
411 | |
412 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
413 Store 0 in *PTR to show there's no block allocated. */ | |
414 | |
415 void | |
416 r_alloc_free (POINTER *ptr) | |
417 { | |
418 bloc_ptr dead_bloc; | |
419 | |
420 dead_bloc = find_bloc (ptr); | |
421 if (dead_bloc == NIL_BLOC) | |
422 abort (); | |
423 | |
424 free_bloc (dead_bloc); | |
425 *ptr = 0; | |
426 } | |
427 | |
428 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
429 Do this by shifting all blocks above this one up in memory, unless | |
430 SIZE is less than or equal to the current bloc size, in which case | |
431 do nothing. | |
432 | |
433 Change *PTR to reflect the new bloc, and return this value. | |
434 | |
435 If more memory cannot be allocated, then leave *PTR unchanged, and | |
436 return zero. */ | |
437 | |
438 POINTER | |
439 r_re_alloc (POINTER *ptr, SIZE size) | |
440 { | |
441 bloc_ptr bloc; | |
442 | |
443 bloc = find_bloc (ptr); | |
444 if (bloc == NIL_BLOC) | |
445 abort (); | |
446 | |
447 if (size <= bloc->size) | |
448 /* Wouldn't it be useful to actually resize the bloc here? */ | |
449 return *ptr; | |
450 | |
451 if (! obtain (size - bloc->size)) | |
452 return 0; | |
453 | |
454 relocate_some_blocs (bloc->next, bloc->data + size); | |
455 | |
456 /* Zero out the new space in the bloc, to help catch bugs faster. */ | |
457 memset (bloc->data + bloc->size, 0, size - bloc->size); | |
458 | |
459 /* Indicate that this block has a new size. */ | |
460 bloc->size = size; | |
461 | |
462 return *ptr; | |
463 } | |
464 | |
465 /* The hook `malloc' uses for the function which gets more space | |
466 from the system. */ | |
467 extern POINTER (*__morecore) (); | |
468 | |
469 /* Initialize various things for memory allocation. */ | |
470 | |
471 void | |
472 init_ralloc (void) | |
473 { | |
474 if (r_alloc_initialized) | |
475 return; | |
476 | |
477 r_alloc_initialized = 1; | |
478 real_morecore = __morecore; | |
479 __morecore = r_alloc_sbrk; | |
480 | |
481 virtual_break_value = break_value = (*real_morecore) (0); | |
482 if (break_value == NIL) | |
483 abort (); | |
484 | |
485 page_size = PAGE; | |
486 extra_bytes = ROUNDUP (50000); | |
487 | |
488 page_break_value = (POINTER) ROUNDUP (break_value); | |
489 | |
490 /* From eirik@elf.IThaca.ny.US (Eirik Fuller): | |
491 The extra call to real_morecore guarantees that the end of the | |
492 address space is a multiple of page_size, even if page_size is | |
493 not really the page size of the system running the binary in | |
494 which page_size is stored. This allows a binary to be built on a | |
495 system with one page size and run on a system with a smaller page | |
496 size. (Such as compiling on a Sun 4/260 4.1.3 and running on a | |
497 Sun 4/65 4.1.3: 8k pages at compile time, 4k pages at run time.) | |
498 */ | |
499 (*real_morecore) (page_break_value - break_value); | |
500 | |
501 /* Clear the rest of the last page; this memory is in our address space | |
502 even though it is after the sbrk value. */ | |
503 | |
504 /* Doubly true, with the additional call that explicitly adds the | |
505 rest of that page to the address space. */ | |
506 memset (break_value, 0, (page_break_value - break_value)); | |
507 /* Also from eirik@elf.IThaca.ny.US */ | |
508 virtual_break_value = break_value = page_break_value; | |
509 use_relocatable_buffers = 1; | |
510 } | |
511 #else /* HAVE_MMAP */ | |
512 | |
513 /* | |
514 A relocating allocator built using the mmap(2) facility available | |
515 in some OSes. Based on another version written by Paul Flinders, | |
516 from which code (and comments) are snarfed. | |
517 | |
518 The OS should support mmap() with MAP_ANONYMOUS attribute, or have | |
519 /dev/zero. It should support private memory mapping. | |
520 | |
521 Paul Flinders wrote a version which works well for systems that | |
522 allow callers to specify (virtual) addresses to mmap(). | |
523 Unfortunately, such a scheme doesn't work for certain systems like | |
524 HP-UX that have a system-wide virtual->real address map, and | |
525 consequently impose restrictions on the virtual address values | |
526 permitted. | |
527 | |
528 NB: The mapping scheme in HP-UX is motivated by the inverted page | |
529 table design in some HP processors. | |
530 | |
531 This alternate implementation allows for the addresses to be | |
532 optionally chosen by the system. Fortunately, buffer allocation | |
533 doesn't insist upon contiguous memory which Flinders' scheme | |
534 provides, and this one doesn't. | |
535 | |
536 We don't really provide for hysteresis here, but add some metering | |
537 to monitor how poorly the allocator actually works. See the | |
538 documentation for `mmap-hysteresis'. | |
539 | |
540 This implementation actually cycles through the blocks allocated | |
541 via mmap() and only sends it to free() if it wasn't one of them. | |
542 Unfortunately, this is O(n) in the number of mmapped blocks. (Not | |
543 really, as we have a hash table which tries to reduce the cost.) | |
544 Also, this dereferences the pointer passed, so it would cause a | |
545 segfault if garbage was passed to it. */ | |
546 | |
547 #include <fcntl.h> | |
548 #include <sys/mman.h> | |
549 #include <stdio.h> | |
550 | |
551 typedef void *VM_ADDR; /* VM addresses */ | |
552 static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ | |
553 | |
554 /* Configuration for relocating allocator. */ | |
555 | |
556 /* #define MMAP_GENERATE_ADDRESSES */ | |
557 /* Define this if you want Emacs to manage the address table. | |
558 It is not recommended unless you have major problems with the | |
559 default scheme, which allows the OS to pick addresses. */ | |
560 | |
561 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be | |
562 discarded while computing the hash, as they're always zero. The | |
563 default is appropriate for a page size of 4096 bytes. */ | |
564 | |
565 #define USELESS_LOWER_ADDRESS_BITS 12 | |
566 | |
567 | |
568 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */ | |
569 | |
570 #define MHASH_PRIME 89 | |
571 | |
572 | |
573 /* Whether we want to enable metering of some ralloc performance. | |
574 This incurs a constant penalty for each mmap operation. */ | |
575 | |
576 #define MMAP_METERING | |
577 | |
578 | |
579 /* Rename the following to protect against a some smartness elsewhere. | |
580 We need access to the allocator used for non-mmap allocation | |
581 elsewhere, in case we get passed a handle that we didn't allocate | |
582 ourselves. Currently, this default allocator is also used to | |
583 maintain local structures for relocatable blocks. */ | |
584 | |
585 #define UNDERLYING_MALLOC malloc | |
586 #define UNDERLYING_FREE free | |
587 #define UNDERLYING_REALLOC realloc | |
588 | |
589 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES | |
590 is defined, and MAP_VARIABLE otherwise. Some losing systems don't | |
591 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */ | |
592 | |
593 #ifdef MMAP_GENERATE_ADDRESSES | |
594 # ifdef MAP_FIXED | |
595 # define MAP_ADDRCHOICE_FLAG MAP_FIXED | |
596 # endif | |
597 #else /* !MMAP_GENERATE_ADDRESSES */ | |
598 # ifdef MAP_VARIABLE | |
599 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE | |
600 # endif | |
601 #endif /* MMAP_GENERATE_ADDRESSES */ | |
602 | |
603 /* Default case. */ | |
604 #ifndef MAP_ADDRCHOICE_FLAG | |
605 # define MAP_ADDRCHOICE_FLAG 0 | |
606 #endif /* MAP_ADDRCHOICE_FLAG */ | |
607 | |
608 #ifdef MAP_ANONYMOUS | |
609 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS) | |
610 #else | |
611 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG) | |
612 #endif /* MAP_ANONYMOUS */ | |
613 | |
614 | |
615 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For | |
616 Emacs's sake, please do not make this local to malloc_init; on some | |
617 machines, the dumping procedure makes all static variables | |
618 read-only. On these machines, the word static is #defined to be | |
619 the empty string, meaning that r_alloc_initialized becomes an | |
620 automatic variable, and loses its value each time Emacs is started up. | |
621 | |
622 If we're using mmap this flag has three possible values | |
623 0 - initial value | |
624 1 - Normal value when running temacs. In this case buffers | |
625 are allocated using malloc so that any data that they | |
626 contain becomes part of the undumped executable. | |
627 2 - Normal value when running emacs */ | |
628 static int r_alloc_initialized = 0; | |
629 | |
630 /* (ptf): Macros for rounding. Note that rounding to any value is possible | |
631 by changing the definition of PAGE. */ | |
632 #define PAGE (getpagesize ()) | |
633 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size) | |
634 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size) | |
635 | |
636 | |
637 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS | |
638 points to a file descriptor opened on /dev/zero */ | |
639 | |
640 static int DEV_ZERO_FD = -1; | |
641 | |
642 | |
643 /* We actually need a datastructure that can be usefully structured | |
644 based on the VM address, and allows an ~O(1) lookup on an arbitrary | |
645 address, ie a hash-table. Maybe the XEmacs hash table can be | |
646 coaxed enough. At the moment, we use lookup on a hash-table to | |
647 decide whether to do an O(n) search on the malloced block list. | |
648 Addresses are hashed to a bucket modulo MHASH_PRIME */ | |
649 | |
650 | |
651 /* We settle for a standard doubly-linked-list. The dynarr type isn't | |
652 very amenable to deletion of items in the middle, so we conjure up | |
653 yet another stupid datastructure. The structure is maintained as a | |
654 ring, and the singleton ring has the sole element as it's left and | |
655 right neighbours. */ | |
656 | |
657 static void init_MHASH_table (void); /* Forward reference */ | |
658 | |
659 typedef struct alloc_dll | |
660 { | |
661 size_t size; /* #bytes currently in use */ | |
662 size_t space_for; /* #bytes we really have */ | |
663 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */ | |
664 VM_ADDR vm_addr; /* VM address returned by mmap */ | |
665 struct alloc_dll *left; /* Left link in circular doubly linked list */ | |
666 struct alloc_dll *right; | |
667 } *MMAP_HANDLE; | |
668 | |
669 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ | |
670 static size_t page_size = 0; /* Size of VM pages */ | |
671 static int mmap_hysteresis; /* Should be size_t, really. */ | |
672 | |
673 /* Get a new handle for a fresh block. */ | |
674 static MMAP_HANDLE | |
675 new_mmap_handle (size_t nsiz) | |
676 { | |
677 MMAP_HANDLE h = UNDERLYING_MALLOC( sizeof( struct alloc_dll ) ); | |
678 if ( h == 0) return 0; | |
679 h->size = nsiz; | |
680 if (mmap_start == 0) | |
681 { | |
682 init_MHASH_table (); | |
683 mmap_start = h; mmap_start->left = h; mmap_start->right = h; | |
684 } | |
685 { | |
686 MMAP_HANDLE prev = mmap_start->left; | |
687 MMAP_HANDLE nex = mmap_start; | |
688 | |
689 /* Four pointers need fixing. */ | |
690 h->right = nex; | |
691 h->left = prev; | |
692 prev->right = h; | |
693 nex->left = h; | |
694 } | |
695 return h; | |
696 } | |
697 | |
698 /* Find a handle given the aliased address using linear search. */ | |
699 static MMAP_HANDLE | |
700 find_mmap_handle_lsearch (POINTER *alias) | |
701 { | |
702 MMAP_HANDLE h = mmap_start; | |
703 if (h == 0) return 0; | |
704 do { | |
705 if (h->aliased_address == alias && *alias == h->vm_addr) | |
706 return h; | |
707 h = h->right; | |
708 } while( h != mmap_start ); | |
709 return 0; /* Bogus alias passed. */ | |
710 } | |
711 | |
712 /* Free a handle. */ | |
713 static void | |
714 free_mmap_handle (MMAP_HANDLE h) | |
715 { | |
716 MMAP_HANDLE prev = h->left; | |
717 MMAP_HANDLE nex = h->right; | |
718 if (prev == h || nex == h) /* In fact, this should be && */ | |
719 { /* We're the singleton dll */ | |
720 UNDERLYING_FREE( h ); /* Free the sole item */ | |
721 mmap_start = 0; return; | |
722 } | |
723 else if (h == mmap_start) | |
724 { | |
725 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ | |
726 } | |
727 prev->right = nex; | |
728 nex->left = prev; | |
729 UNDERLYING_FREE( h ); | |
730 } | |
731 | |
732 /* A simple hash table to speed up the inverted lookup of | |
733 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a | |
734 particular bucket. We invalidate a hash table entry during block | |
735 deletion if the hash has cached the deleted block's address. */ | |
736 | |
737 /* Simple hash check. */ | |
738 struct { | |
739 int n_hits; /* How many addresses map to this? */ | |
740 MMAP_HANDLE handle; /* What is the current handle? */ | |
741 VM_ADDR addr; /* What is it's VM address? */ | |
742 } MHASH_HITS[ MHASH_PRIME ]; | |
743 | |
744 static void | |
745 init_MHASH_table (void) | |
746 { | |
747 int i = 0; | |
748 for (; i < MHASH_PRIME; i++) | |
749 { | |
750 MHASH_HITS[i].n_hits = 0; | |
751 MHASH_HITS[i].addr = 0; | |
752 MHASH_HITS[i].handle = 0; | |
753 } | |
754 } | |
755 | |
756 /* Compute the hash value for an address. */ | |
757 static int | |
758 MHASH (VM_ADDR addr) | |
759 { | |
760 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
761 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve | |
762 when converted to signed ints */ | |
763 return ((hval >= 0) ? hval : MHASH_PRIME + hval); | |
764 } | |
765 | |
766 /* Add a VM address with it's corresponding handle to the table. */ | |
767 static void | |
768 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h) | |
769 { | |
770 int kVal = MHASH( addr ); | |
771 if (MHASH_HITS[kVal].n_hits++ == 0) | |
772 { /* Only overwrite the table if there were no hits so far. */ | |
773 MHASH_HITS[kVal].addr = addr; | |
774 MHASH_HITS[kVal].handle = h; | |
775 } | |
776 } | |
777 | |
778 /* Delete a VM address entry from the hash table. */ | |
779 static void | |
780 MHASH_DEL (VM_ADDR addr) | |
781 { | |
782 int kVal = MHASH( addr ); | |
783 MHASH_HITS[kVal].n_hits--; | |
784 if (addr == MHASH_HITS[kVal].addr) | |
785 { | |
786 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */ | |
787 MHASH_HITS[kVal].handle = 0; | |
788 } | |
789 } | |
790 | |
791 /* End of hash buckets */ | |
792 | |
793 /* Metering malloc performance. */ | |
794 #ifdef MMAP_METERING | |
795 /* If we're metering, we introduce some extra symbols to aid the noble | |
796 cause of bloating XEmacs core size. */ | |
797 | |
798 Lisp_Object Qmm_times_mapped; | |
799 Lisp_Object Qmm_pages_mapped; | |
800 Lisp_Object Qmm_times_unmapped; | |
801 Lisp_Object Qmm_times_remapped; | |
802 Lisp_Object Qmm_didnt_copy; | |
803 Lisp_Object Qmm_pages_copied; | |
804 Lisp_Object Qmm_average_bumpval; | |
805 Lisp_Object Qmm_wastage; | |
806 Lisp_Object Qmm_live_pages; | |
807 Lisp_Object Qmm_addr_looked_up; | |
808 Lisp_Object Qmm_hash_worked; | |
809 Lisp_Object Qmm_addrlist_size; | |
810 | |
811 #define M_Map 0 /* How many times allocated? */ | |
812 #define M_Pages_Map 1 /* How many pages allocated? */ | |
813 #define M_Unmap 2 /* How many times freed? */ | |
814 #define M_Remap 3 /* How many times increased in size? */ | |
815 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */ | |
816 #define M_Copy_Pages 5 /* Total # pages copied */ | |
817 #define M_Average_Bumpval 6 /* Average bump value */ | |
818 #define M_Wastage 7 /* Remaining (unused space) */ | |
819 #define M_Live_Pages 8 /* #live pages */ | |
820 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */ | |
821 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */ | |
822 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */ | |
823 | |
824 #define N_Meterables 12 /* Total number of meterables */ | |
825 #define MEMMETER(x) {x;} | |
826 #define MVAL(x) (meter[x]) | |
827 #define MLVAL(x) (make_int (meter[x])) | |
828 static int meter[N_Meterables]; | |
829 | |
830 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, | |
831 Smmap_allocator_status, 0, 0, 0 /* | |
832 Return some information about mmap-based allocator. | |
833 | |
834 mmap-addrlist-size: number of entries in address picking list. | |
835 mmap-times-mapped: number of times r_alloc was called. | |
836 mmap-pages-mapped: number of pages mapped by r_alloc calls only. | |
837 mmap-times-unmapped: number of times r_free was called. | |
838 mmap-times-remapped: number of times r_re_alloc was called. | |
839 mmap-didnt-copy: number of times re-alloc didn\'t have to move the block. | |
840 mmap-pages-copied: total number of pages copied. | |
841 mmap-average-bumpval: average increase in size demanded to re-alloc. | |
842 mmap-wastage: total number of bytes allocated, but not currently in use. | |
843 mmap-live-pages: total number of pages live. | |
844 */ ) | |
845 () | |
846 { | |
847 Lisp_Object result; | |
848 | |
849 result = Fcons (Fcons (Qmm_addrlist_size, MLVAL (M_Addrlist_Size)), Qnil); | |
850 result = Fcons (Fcons (Qmm_hash_worked, MLVAL (M_Hash_Worked)), result); | |
851 result = Fcons (Fcons (Qmm_addr_looked_up, MLVAL (M_Address_Lookup)), result); | |
852 result = Fcons (Fcons (Qmm_live_pages, MLVAL (M_Live_Pages)), result); | |
853 result = Fcons (Fcons (Qmm_wastage, MLVAL (M_Wastage)), result); | |
854 result = Fcons (Fcons (Qmm_average_bumpval, MLVAL (M_Average_Bumpval)), | |
855 result); | |
856 result = Fcons (Fcons (Qmm_pages_copied, MLVAL (M_Copy_Pages)), result); | |
857 result = Fcons (Fcons (Qmm_didnt_copy, MLVAL (M_Didnt_Copy)), result); | |
858 result = Fcons (Fcons (Qmm_times_remapped, MLVAL (M_Remap)), result); | |
859 result = Fcons (Fcons (Qmm_times_unmapped, MLVAL (M_Unmap)), result); | |
860 result = Fcons (Fcons (Qmm_pages_mapped, MLVAL (M_Pages_Map)), result); | |
861 result = Fcons (Fcons (Qmm_times_mapped, MLVAL (M_Map)), result); | |
862 | |
863 return result; | |
864 } | |
865 | |
866 #else /* !MMAP_METERING */ | |
867 | |
868 #define MEMMETER(x) | |
869 #define MVAL(x) | |
870 | |
871 #endif /* MMAP_METERING */ | |
872 | |
873 static MMAP_HANDLE | |
874 find_mmap_handle (POINTER *alias) | |
875 { | |
876 int kval = MHASH( *alias ); | |
877 MEMMETER( MVAL(M_Address_Lookup)++ ) | |
878 switch( MHASH_HITS[kval].n_hits) | |
879 { | |
880 case 0: | |
881 MEMMETER( MVAL( M_Hash_Worked )++ ) | |
882 return 0; | |
883 | |
884 case 1: | |
885 if (*alias == MHASH_HITS[kval].addr) | |
886 { | |
887 MEMMETER( MVAL( M_Hash_Worked) ++ ); | |
888 return MHASH_HITS[kval].handle; | |
889 } | |
890 /* FALL THROUGH */ | |
891 default: | |
892 return find_mmap_handle_lsearch( alias ); | |
893 } /* switch */ | |
894 } | |
895 | |
896 /* | |
897 Some kernels don't like being asked to pick addresses for mapping | |
898 themselves---IRIX is known to become extremely slow if mmap is | |
899 passed a ZERO as the first argument. In such cases, we use an | |
900 address map which is managed local to the XEmacs process. The | |
901 address map maintains an ordered linked list of (address, size, | |
902 occupancy) triples ordered by the absolute address. Initially, a | |
903 large address area is marked as being empty. The address picking | |
904 scheme takes bites off the first block which is still empty and | |
905 large enough. If mmap with the specified address fails, it is | |
906 marked unavailable and not attempted thereafter. The scheme will | |
907 keep fragmenting the large empty block until it finds an address | |
908 which can be successfully mmapped, or until there are no free | |
909 blocks of the given size left. | |
910 | |
911 Note that this scheme, given it's first-fit strategy, is prone to | |
912 fragmentation of the the first part of memory earmarked for this | |
913 purpose. [ACP Vol I]. We can't use the workaround of using a | |
914 randomized first fit because we don't want to presume too much | |
915 about the memory map. Instead, we try to coalesce empty or | |
916 unavailable blocks at any available opportunity. */ | |
917 | |
918 static void Addr_Block_initialize(); /* Initialization procedure for address picking scheme */ | |
919 static VM_ADDR New_Addr_Block( SIZE sz ); /* Get a suitable VM_ADDR via mmap */ | |
920 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); /* Free a VM_ADDR allocated via New_Addr_Block */ | |
921 | |
922 #ifdef MMAP_GENERATE_ADDRESSES | |
923 /* Implementation of the three calls for address picking when XEmacs is incharge */ | |
924 | |
925 /* The enum denotes the status of the following block. */ | |
926 typedef enum { empty = 0, occupied, unavailable } addr_status; | |
927 | |
928 typedef struct addr_chain | |
929 { | |
930 POINTER addr; | |
931 SIZE sz; | |
932 addr_status flag; | |
933 struct addr_chain *next; | |
934 } ADDRESS_BLOCK, *ADDRESS_CHAIN; | |
935 /* NB: empty and unavailable blocks are concatenated. */ | |
936 | |
937 static ADDRESS_CHAIN addr_chain = 0; | |
938 /* Start off the address block chain with a humongous address block | |
939 which is empty to start with. Note that addr_chain is invariant | |
940 WRT the addition/deletion of address blocks because of the assert | |
941 in Coalesce() and the strict ordering of blocks by their address | |
942 */ | |
943 static void Addr_Block_initialize() | |
944 { | |
945 MEMMETER( MVAL( M_Addrlist_Size )++) | |
946 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | |
947 addr_chain->next = 0; /* Last block in chain */ | |
948 addr_chain->sz = 0x0c000000; /* Size */ | |
949 addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS); | |
950 addr_chain->flag = empty; | |
951 } | |
952 | |
953 /* Coalesce address blocks if they are contiguous. Only empty and | |
954 unavailable slots are coalesced. */ | |
955 static void Coalesce_Addr_Blocks() | |
956 { | |
957 ADDRESS_CHAIN p; | |
958 for (p = addr_chain; p; p = p->next) | |
959 { | |
960 while (p->next && p->flag == p->next->flag) | |
961 { | |
962 ADDRESS_CHAIN np; | |
963 np = p->next; | |
964 | |
965 if (p->flag == occupied) break; /* No cigar */ | |
966 | |
967 /* Check if the addresses are contiguous. */ | |
968 if (p->addr + p->sz != np->addr) break; | |
969 | |
970 MEMMETER( MVAL( M_Addrlist_Size )--) | |
971 /* We can coalesce these two. */ | |
972 p->sz += np->sz; | |
973 p->next = np->next; | |
974 assert( np != addr_chain ); /* We're not freeing the head of the list. */ | |
975 UNDERLYING_FREE( np ); | |
976 } | |
977 } /* for all p */ | |
978 } | |
979 | |
980 /* Get an empty address block of specified size. */ | |
981 static VM_ADDR New_Addr_Block( SIZE sz ) | |
982 { | |
983 ADDRESS_CHAIN p = addr_chain; | |
984 VM_ADDR new_addr = VM_FAILURE_ADDR; | |
985 for (; p; p = p->next) | |
986 { | |
987 if (p->flag == empty && p->sz > sz) | |
988 { | |
989 /* Create a new entry following p which is empty. */ | |
990 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) ); | |
991 remainder->next = p->next; | |
992 remainder->flag = empty; | |
993 remainder->addr = p->addr + sz; | |
994 remainder->sz = p->sz - sz; | |
995 | |
996 MEMMETER( MVAL( M_Addrlist_Size )++) | |
997 | |
998 /* Now make p become an occupied block with the appropriate size */ | |
999 p->next = remainder; | |
1000 p->sz = sz; | |
1001 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, | |
1002 MAP_FLAGS, DEV_ZERO_FD, 0 ); | |
1003 if (new_addr == VM_FAILURE_ADDR) | |
1004 { | |
1005 p->flag = unavailable; | |
1006 continue; | |
1007 } | |
1008 p->flag = occupied; | |
1009 break; | |
1010 } | |
1011 } | |
1012 Coalesce_Addr_Blocks(); | |
1013 return new_addr; | |
1014 } | |
1015 | |
1016 /* Free an address block. We mark the block as being empty, and attempt to | |
1017 do any coalescing that may have resulted from this. */ | |
1018 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | |
1019 { | |
1020 ADDRESS_CHAIN p = addr_chain; | |
1021 for (; p; p = p->next ) | |
1022 { | |
1023 if (p->addr == addr) | |
1024 { | |
1025 if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */ | |
1026 munmap( (VM_ADDR) p->addr, p->sz ); | |
1027 p->flag = empty; | |
1028 break; | |
1029 } | |
1030 } | |
1031 if (!p) abort(); /* Can't happen... we've got a block to free which is not in | |
1032 the address list. */ | |
1033 Coalesce_Addr_Blocks(); | |
1034 } | |
1035 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1036 /* This is an alternate (simpler) implementation in cases where the | |
1037 address is picked by the kernel. */ | |
1038 | |
1039 static void Addr_Block_initialize() | |
1040 {} /* Nothing. */ | |
1041 | |
1042 static VM_ADDR New_Addr_Block( SIZE sz ) | |
1043 { | |
1044 return mmap( 0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | |
1045 DEV_ZERO_FD, 0 ); | |
1046 } | |
1047 | |
1048 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | |
1049 { | |
1050 munmap( addr, sz ); | |
1051 } | |
1052 | |
1053 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1054 | |
1055 | |
1056 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | |
1057 | |
1058 /* | |
1059 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start | |
1060 address aliased to the first parameter. | |
1061 */ | |
1062 | |
1063 POINTER r_alloc (POINTER *ptr, SIZE size); | |
1064 POINTER | |
1065 r_alloc (POINTER *ptr, SIZE size) | |
1066 { | |
1067 MMAP_HANDLE mh; | |
1068 | |
1069 switch(r_alloc_initialized) | |
1070 { | |
1071 case 0: | |
1072 abort(); | |
1073 case 1: | |
1074 *ptr = UNDERLYING_MALLOC(size); | |
1075 break; | |
1076 default: | |
1077 mh = new_mmap_handle( size ); | |
1078 if (mh) | |
1079 { | |
1080 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | |
1081 SIZE mmapped_size = ROUNDUP( size + hysteresis ); | |
1082 MEMMETER( MVAL(M_Map)++ ) | |
1083 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) | |
1084 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) | |
1085 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) | |
1086 mh->vm_addr = New_Addr_Block( mmapped_size ); | |
1087 if (mh->vm_addr == VM_FAILURE_ADDR) { | |
1088 free_mmap_handle( mh ); /* Free the loser */ | |
1089 *ptr = 0; | |
1090 return 0; /* ralloc failed due to mmap() failure. */ | |
1091 } | |
1092 MHASH_ADD( mh->vm_addr, mh ); | |
1093 mh->space_for = mmapped_size; | |
1094 mh->aliased_address = ptr; | |
1095 *ptr = mh->vm_addr; | |
1096 } | |
1097 else | |
1098 *ptr = 0; /* Malloc of block failed */ | |
1099 break; | |
1100 } | |
1101 return *ptr; | |
1102 } | |
1103 | |
1104 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
1105 Store 0 in *PTR to show there's no block allocated. */ | |
1106 | |
1107 void r_alloc_free (POINTER *ptr); | |
1108 void | |
1109 r_alloc_free (POINTER *ptr) | |
1110 { | |
1111 switch( r_alloc_initialized) { | |
1112 case 0: | |
1113 abort(); | |
1114 | |
1115 case 1: | |
1116 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ | |
1117 break; | |
1118 | |
1119 default: | |
1120 { | |
1121 MMAP_HANDLE dead_handle = find_mmap_handle( ptr ); | |
1122 /* Check if we've got it. */ | |
1123 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */ | |
1124 { | |
1125 UNDERLYING_FREE( *ptr ); | |
1126 } | |
1127 else | |
1128 { | |
1129 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) ) | |
1130 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size )) | |
1131 MEMMETER(MVAL(M_Unmap)++) | |
1132 MHASH_DEL( dead_handle->vm_addr ); | |
1133 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for ); | |
1134 free_mmap_handle (dead_handle); | |
1135 } | |
1136 } | |
1137 break; | |
1138 } /* r_alloc_initialized */ | |
1139 *ptr = 0; /* Zap the pointer's contents. */ | |
1140 } | |
1141 | |
1142 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
1143 | |
1144 Change *PTR to reflect the new bloc, and return this value. | |
1145 | |
1146 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1147 return zero. */ | |
1148 | |
1149 POINTER r_re_alloc (POINTER *ptr, SIZE sz); | |
1150 POINTER | |
1151 r_re_alloc (POINTER *ptr, SIZE sz) | |
1152 { | |
1153 if (r_alloc_initialized == 0) | |
1154 { | |
1155 abort (); | |
1156 return 0; /* suppress compiler warning */ | |
1157 } | |
1158 else if (r_alloc_initialized == 1) | |
1159 { | |
1160 POINTER tmp = realloc(*ptr, sz); | |
1161 if (tmp) | |
1162 *ptr = tmp; | |
1163 return tmp; | |
1164 } | |
1165 else | |
1166 { | |
1167 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | |
1168 SIZE actual_sz = ROUNDUP( sz + hysteresis ); | |
1169 MMAP_HANDLE h = find_mmap_handle( ptr ); | |
1170 VM_ADDR new_vm_addr; | |
1171 | |
1172 if ( h == 0 ) /* Was allocated using malloc. */ | |
1173 { | |
1174 POINTER tmp = UNDERLYING_REALLOC(*ptr, sz); | |
1175 if (tmp) | |
1176 *ptr = tmp; | |
1177 return tmp; | |
1178 } | |
1179 | |
1180 MEMMETER( | |
1181 MVAL(M_Average_Bumpval) = | |
1182 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size)) | |
1183 / (double) (MVAL(M_Remap) + 1)) | |
1184 MEMMETER(MVAL(M_Remap)++) | |
1185 if (h->space_for > sz) /* We've got some more room */ | |
1186 { /* Also, if a shrinkage was asked for. */ | |
1187 MEMMETER( MVAL(M_Didnt_Copy)++ ) | |
1188 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) | |
1189 /* We're pretty dumb at handling shrinkage. We should check for | |
1190 a larger gap than the standard hysteresis allowable, and if so, | |
1191 shrink the number of pages. Right now, we simply reset the size | |
1192 component and return. */ | |
1193 h->size = sz; | |
1194 return *ptr; | |
1195 } | |
1196 | |
1197 new_vm_addr = New_Addr_Block( actual_sz ); | |
1198 if (new_vm_addr == VM_FAILURE_ADDR) | |
1199 {/* Failed to realloc. */ | |
1200 /* *ptr = 0; */ | |
1201 return 0; | |
1202 } | |
1203 | |
1204 MHASH_ADD( new_vm_addr, h ); | |
1205 /* We got a block OK: now we should move the old contents to the | |
1206 new address. We use the old size of this block. */ | |
1207 memmove(new_vm_addr, h->vm_addr, h->size); | |
1208 MHASH_DEL( h->vm_addr ); | |
1209 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */ | |
1210 | |
1211 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) ) | |
1212 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size)) | |
1213 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size)) | |
1214 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size)) | |
1215 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) ) | |
1216 | |
1217 /* Update block datastructure. */ | |
1218 h->space_for = actual_sz; /* New total space */ | |
1219 h->size = sz; /* New (requested) size */ | |
1220 h->vm_addr = new_vm_addr; /* New VM start address */ | |
1221 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ | |
1222 *ptr = h->vm_addr; | |
1223 return *ptr; | |
1224 } | |
1225 } | |
1226 | |
1227 | |
1228 /* Initialize various things for memory allocation. | |
1229 */ | |
1230 void | |
1231 init_ralloc (void) | |
1232 { | |
1233 int i = 0; | |
1234 if (r_alloc_initialized > 1) | |
1235 return; /* used to return 1 */ | |
1236 | |
1237 if (++r_alloc_initialized == 1) | |
1238 return; /* used to return 1 */ | |
1239 | |
1240 Addr_Block_initialize(); /* Initialize the address picker, if required. */ | |
1241 page_size = PAGE; | |
1242 assert( page_size > 0 ); /* getpagesize() bogosity check. */ | |
1243 | |
1244 #ifndef MAP_ANONYMOUS | |
1245 DEV_ZERO_FD = open( "/dev/zero", O_RDWR ); | |
1246 if (DEV_ZERO_FD < 0) | |
1247 /* Failed. Perhaps we should abort here? */ | |
1248 return; /* used to return 0 */ | |
1249 #endif | |
1250 | |
1251 #ifdef MMAP_METERING | |
1252 for(i = 0; i < N_Meterables; i++ ) | |
1253 { | |
1254 meter[i] = 0; | |
1255 } | |
1256 #endif /* MMAP_METERING */ | |
1257 } | |
1258 | |
1259 void | |
1260 syms_of_ralloc (void) | |
1261 { | |
1262 #ifdef MMAP_METERING | |
1263 defsymbol( &Qmm_times_mapped, "mmap-times-mapped" ); | |
1264 defsymbol( &Qmm_pages_mapped, "mmap-pages-mapped" ); | |
1265 defsymbol( &Qmm_times_unmapped, "mmap-times-unmapped" ); | |
1266 defsymbol( &Qmm_times_remapped, "mmap-times-remapped" ); | |
1267 defsymbol( &Qmm_didnt_copy, "mmap-didnt-copy" ); | |
1268 defsymbol( &Qmm_pages_copied, "mmap-pages-copied" ); | |
1269 defsymbol( &Qmm_average_bumpval, "mmap-average-bumpval" ); | |
1270 defsymbol( &Qmm_wastage, "mmap-wastage" ); | |
1271 defsymbol( &Qmm_live_pages, "mmap-live-pages" ); | |
1272 defsymbol( &Qmm_addr_looked_up, "mmap-had-to-look-up-address" ); | |
1273 defsymbol( &Qmm_hash_worked, "mmap-hash-table-worked" ); | |
1274 defsymbol( &Qmm_addrlist_size, "mmap-addrlist-size" ); | |
1275 defsubr( &Smmap_allocator_status ); | |
1276 #endif /* MMAP_METERING */ | |
1277 } | |
1278 | |
1279 void | |
1280 vars_of_ralloc (void) | |
1281 { | |
1282 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* | |
1283 Extra room left at the end of an allocated arena, | |
1284 so that a re-alloc requesting extra space smaller than this | |
1285 does not actually cause a new arena to be allocated. | |
1286 | |
1287 A negative value is considered equal to zero. This is the | |
1288 minimum amount of space guaranteed to be left at the end of | |
1289 the arena. Because allocation happens in multiples of the OS | |
1290 page size, it is possible for more space to be left unused. | |
1291 */ ); | |
1292 mmap_hysteresis = 0; | |
1293 } | |
1294 | |
1295 #endif /* HAVE_MMAP */ |