Mercurial > hg > xemacs-beta
annotate src/ralloc.c @ 5581:56144c8593a8
Mechanically change INT to FIXNUM in our sources.
src/ChangeLog addition:
2011-10-09 Aidan Kehoe <kehoea@parhasard.net>
[...]
Mechanically change INT (where it refers to non-bignum Lisp
integers) to FIXNUM in our sources. Done for the following
functions, enums, and macros: Lisp_Type_Int_Even,
Lisp_Type_Int_Odd, INT_GCBITS, INT_VALBITS, make_int(), INTP(),
XINT(), CHECK_INT(), XREALINT(), INT_PLUS(), INT_MINUS(),
EMACS_INT_MAX (to MOST_POSITIVE_FIXNUM), EMACS_INT_MIN (to
MOST_NEGATIVE_FIXNUM), NUMBER_FITS_IN_AN_EMACS_INT() to
NUMBER_FITS_IN_A_FIXNUM(), XFLOATINT, XCHAR_OR_INT, INT_OR_FLOAT.
The EMACS_INT typedef was not changed, it does not describe
non-bignum Lisp integers.
Script that did the change available in
http://mid.gmane.org/20067.17650.181273.12014@parhasard.net .
modules/ChangeLog addition:
2011-10-09 Aidan Kehoe <kehoea@parhasard.net>
[...]
Mechanically change INT to FIXNUM, where the usage describes non-bignum
Lisp integers. See the src/ChangeLog entry for more details.
man/ChangeLog addition:
2011-10-09 Aidan Kehoe <kehoea@parhasard.net>
* internals/internals.texi (How Lisp Objects Are Represented in C):
* internals/internals.texi (Integers and Characters):
Mechanically change INT to FIXNUM, where the usage describes non-bignum
Lisp integers.
author | Aidan Kehoe <kehoea@parhasard.net> |
---|---|
date | Sun, 09 Oct 2011 09:51:57 +0100 |
parents | 308d34e9f07d |
children |
rev | line source |
---|---|
428 | 1 /* Block-relocating memory allocator. |
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
3 Copyright (C) 2010 Ben Wing. |
428 | 4 |
5 This file is part of XEmacs. | |
6 | |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
5050
diff
changeset
|
7 XEmacs is free software: you can redistribute it and/or modify it |
428 | 8 under the terms of the GNU General Public License as published by the |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
5050
diff
changeset
|
9 Free Software Foundation, either version 3 of the License, or (at your |
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
5050
diff
changeset
|
10 option) any later version. |
428 | 11 |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
5050
diff
changeset
|
18 along with XEmacs. If not, see <http://www.gnu.org/licenses/>. |
428 | 19 Synched Up with: FSF 20.2 (non-mmap portion only) |
20 */ | |
21 | |
22 /* NOTES: | |
23 | |
24 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | |
25 rather than all of them. This means allowing for a possible | |
26 hole between the first bloc and the end of malloc storage. */ | |
27 | |
28 #ifdef HAVE_CONFIG_H | |
29 #include <config.h> | |
30 #endif | |
31 | |
32 #ifdef HAVE_UNISTD_H | |
33 #include <unistd.h> /* for getpagesize() */ | |
34 #endif | |
35 | |
36 #ifdef emacs | |
37 | |
38 #include "lisp.h" | |
39 | |
40 /* The important properties of this type are that 1) it's a pointer, and | |
41 2) arithmetic on it should work as if the size of the object pointed | |
42 to has a size of 1. */ | |
43 #if 0 /* Arithmetic on void* is a GCC extension. */ | |
44 #ifdef __STDC__ | |
45 typedef void *POINTER; | |
46 #else | |
47 typedef unsigned char *POINTER; | |
48 #endif | |
49 #endif /* 0 */ | |
50 | |
51 /* Unconditionally use unsigned char * for this. */ | |
52 typedef unsigned char *POINTER; | |
53 | |
54 #ifdef DOUG_LEA_MALLOC | |
55 #define M_TOP_PAD -2 | |
56 #include <malloc.h> | |
57 #endif | |
58 | |
59 #include "getpagesize.h" | |
60 | |
61 #include <string.h> | |
3263 | 62 #ifndef NEW_GC |
428 | 63 void refill_memory_reserve (void); |
3263 | 64 #endif /* not NEW_GC */ |
428 | 65 |
66 #else /* Not emacs. */ | |
67 | |
1333 | 68 #define REGEX_MALLOC_CHECK() |
69 | |
428 | 70 #include <stddef.h> |
71 | |
72 typedef void *POINTER; | |
73 | |
74 #include <unistd.h> | |
75 #include <malloc.h> | |
76 #include <string.h> | |
77 | |
78 #endif /* emacs. */ | |
79 | |
80 void init_ralloc (void); | |
81 | |
82 #define NIL ((POINTER) 0) | |
83 | |
84 | |
85 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | |
86 | |
87 /* A flag to indicate whether we have initialized ralloc yet. For | |
88 Emacs's sake, please do not make this local to malloc_init; on some | |
89 machines, the dumping procedure makes all static variables | |
90 read-only. On these machines, the word static is #defined to be | |
91 the empty string, meaning that r_alloc_initialized becomes an | |
92 automatic variable, and loses its value each time Emacs is started up. */ | |
93 static int r_alloc_initialized = 0; | |
94 | |
95 | |
96 /* Declarations for working with the malloc, ralloc, and system breaks. */ | |
97 | |
98 /* Function to set the real break value. */ | |
99 static POINTER (*real_morecore) (ptrdiff_t size); | |
100 | |
101 /* The break value, as seen by malloc (). */ | |
102 static POINTER virtual_break_value; | |
103 | |
104 /* The break value, viewed by the relocatable blocs. */ | |
105 static POINTER break_value; | |
106 | |
107 /* This is the size of a page. We round memory requests to this boundary. */ | |
108 static int page_size; | |
109 | |
110 /* Whenever we get memory from the system, get this many extra bytes. This | |
111 must be a multiple of page_size. */ | |
112 static int extra_bytes; | |
113 | |
114 /* Macros for rounding. Note that rounding to any value is possible | |
115 by changing the definition of PAGE. */ | |
116 #define PAGE (getpagesize ()) | |
117 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | |
118 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
119 & ~(page_size - 1)) | |
120 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | |
121 | |
122 #define MEM_ALIGN sizeof(double) | |
123 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
124 & ~(MEM_ALIGN - 1)) | |
125 | |
126 /* Data structures of heaps and blocs. */ | |
127 | |
128 /* The relocatable objects, or blocs, and the malloc data | |
129 both reside within one or more heaps. | |
130 Each heap contains malloc data, running from `start' to `bloc_start', | |
131 and relocatable objects, running from `bloc_start' to `free'. | |
132 | |
133 Relocatable objects may relocate within the same heap | |
134 or may move into another heap; the heaps themselves may grow | |
135 but they never move. | |
136 | |
137 We try to make just one heap and make it larger as necessary. | |
138 But sometimes we can't do that, because we can't get contiguous | |
139 space to add onto the heap. When that happens, we start a new heap. */ | |
140 | |
141 typedef struct heap | |
142 { | |
143 struct heap *next; | |
144 struct heap *prev; | |
145 /* Start of memory range of this heap. */ | |
146 POINTER start; | |
147 /* End of memory range of this heap. */ | |
148 POINTER end; | |
149 /* Start of relocatable data in this heap. */ | |
150 POINTER bloc_start; | |
151 /* Start of unused space in this heap. */ | |
152 POINTER free; | |
153 /* First bloc in this heap. */ | |
154 struct bp *first_bloc; | |
155 /* Last bloc in this heap. */ | |
156 struct bp *last_bloc; | |
157 } *heap_ptr; | |
158 | |
159 #define NIL_HEAP ((heap_ptr) 0) | |
160 #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
161 | |
162 /* This is the first heap object. | |
163 If we need additional heap objects, each one resides at the beginning of | |
164 the space it covers. */ | |
165 static struct heap heap_base; | |
166 | |
167 /* Head and tail of the list of heaps. */ | |
168 static heap_ptr first_heap, last_heap; | |
169 | |
170 /* These structures are allocated in the malloc arena. | |
171 The linked list is kept in order of increasing '.data' members. | |
172 The data blocks abut each other; if b->next is non-nil, then | |
173 b->data + b->size == b->next->data. | |
174 | |
175 An element with variable==NIL denotes a freed block, which has not yet | |
176 been collected. They may only appear while r_alloc_freeze > 0, and will be | |
177 freed when the arena is thawed. Currently, these blocs are not reusable, | |
178 while the arena is frozen. Very inefficient. */ | |
179 | |
180 typedef struct bp | |
181 { | |
182 struct bp *next; | |
183 struct bp *prev; | |
184 POINTER *variable; | |
185 POINTER data; | |
440 | 186 size_t size; |
428 | 187 POINTER new_data; /* temporarily used for relocation */ |
188 struct heap *heap; /* Heap this bloc is in. */ | |
189 } *bloc_ptr; | |
190 | |
191 #define NIL_BLOC ((bloc_ptr) 0) | |
192 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
193 | |
194 /* Head and tail of the list of relocatable blocs. */ | |
195 static bloc_ptr first_bloc, last_bloc; | |
196 | |
197 static int use_relocatable_buffers; | |
198 | |
199 /* If >0, no relocation whatsoever takes place. */ | |
200 static int r_alloc_freeze_level; | |
201 | |
202 /* Obtain SIZE bytes of space. If enough space is not presently available | |
203 in our process reserve, (i.e., (page_break_value - break_value)), | |
204 this means getting more page-aligned space from the system. | |
205 | |
206 Return non-zero if all went well, or zero if we couldn't allocate | |
207 the memory. */ | |
208 | |
209 /* Functions to get and return memory from the system. */ | |
210 | |
211 /* Find the heap that ADDRESS falls within. */ | |
212 | |
213 static heap_ptr | |
214 find_heap (POINTER address) | |
215 { | |
216 heap_ptr heap; | |
217 | |
218 for (heap = last_heap; heap; heap = heap->prev) | |
219 { | |
220 if (heap->start <= address && address <= heap->end) | |
221 return heap; | |
222 } | |
223 | |
224 return NIL_HEAP; | |
225 } | |
226 | |
227 /* Find SIZE bytes of space in a heap. | |
228 Try to get them at ADDRESS (which must fall within some heap's range) | |
229 if we can get that many within one heap. | |
230 | |
231 If enough space is not presently available in our reserve, this means | |
232 getting more page-aligned space from the system. If the returned space | |
233 is not contiguous to the last heap, allocate a new heap, and append it | |
234 | |
235 obtain does not try to keep track of whether space is in use | |
236 or not in use. It just returns the address of SIZE bytes that | |
237 fall within a single heap. If you call obtain twice in a row | |
238 with the same arguments, you typically get the same value. | |
239 to the heap list. It's the caller's responsibility to keep | |
240 track of what space is in use. | |
241 | |
242 Return the address of the space if all went well, or zero if we couldn't | |
243 allocate the memory. */ | |
244 | |
245 static POINTER | |
440 | 246 obtain (POINTER address, size_t size) |
428 | 247 { |
248 heap_ptr heap; | |
440 | 249 size_t already_available; |
428 | 250 |
251 /* Find the heap that ADDRESS falls within. */ | |
252 for (heap = last_heap; heap; heap = heap->prev) | |
253 { | |
254 if (heap->start <= address && address <= heap->end) | |
255 break; | |
256 } | |
257 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
258 assert (heap); |
428 | 259 |
260 /* If we can't fit SIZE bytes in that heap, | |
261 try successive later heaps. */ | |
262 while (heap && address + size > heap->end) | |
263 { | |
264 heap = heap->next; | |
265 if (heap == NIL_HEAP) | |
266 break; | |
267 address = heap->bloc_start; | |
268 } | |
269 | |
270 /* If we can't fit them within any existing heap, | |
271 get more space. */ | |
272 if (heap == NIL_HEAP) | |
273 { | |
3025 | 274 POINTER new_ = (*real_morecore)(0); |
440 | 275 size_t get; |
428 | 276 |
277 already_available = (char *)last_heap->end - (char *)address; | |
278 | |
3025 | 279 if (new_ != last_heap->end) |
428 | 280 { |
281 /* Someone else called sbrk. Make a new heap. */ | |
282 | |
3025 | 283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new_); |
428 | 284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); |
285 | |
3025 | 286 if ((*real_morecore) (bloc_start - new_) != new_) |
428 | 287 return 0; |
288 | |
3025 | 289 new_heap->start = new_; |
428 | 290 new_heap->end = bloc_start; |
291 new_heap->bloc_start = bloc_start; | |
292 new_heap->free = bloc_start; | |
293 new_heap->next = NIL_HEAP; | |
294 new_heap->prev = last_heap; | |
295 new_heap->first_bloc = NIL_BLOC; | |
296 new_heap->last_bloc = NIL_BLOC; | |
297 last_heap->next = new_heap; | |
298 last_heap = new_heap; | |
299 | |
300 address = bloc_start; | |
301 already_available = 0; | |
302 } | |
303 | |
304 /* Add space to the last heap (which we may have just created). | |
305 Get some extra, so we can come here less often. */ | |
306 | |
307 get = size + extra_bytes - already_available; | |
308 get = (char *) ROUNDUP ((char *)last_heap->end + get) | |
309 - (char *) last_heap->end; | |
310 | |
311 if ((*real_morecore) (get) != last_heap->end) | |
312 return 0; | |
313 | |
314 last_heap->end += get; | |
315 } | |
316 | |
317 return address; | |
318 } | |
319 | |
320 #if 0 | |
321 /* Obtain SIZE bytes of space and return a pointer to the new area. | |
322 If we could not allocate the space, return zero. */ | |
323 | |
324 static POINTER | |
440 | 325 get_more_space (size_t size) |
428 | 326 { |
327 POINTER ptr = break_value; | |
328 if (obtain (size)) | |
329 return ptr; | |
330 else | |
331 return 0; | |
332 } | |
333 #endif | |
334 | |
335 /* Note that SIZE bytes of space have been relinquished by the process. | |
336 If SIZE is more than a page, return the space to the system. */ | |
337 | |
338 static void | |
339 relinquish (void) | |
340 { | |
341 register heap_ptr h; | |
342 int excess = 0; | |
343 | |
344 /* Add the amount of space beyond break_value | |
345 in all heaps which have extend beyond break_value at all. */ | |
346 | |
347 for (h = last_heap; h && break_value < h->end; h = h->prev) | |
348 { | |
349 excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
350 ? h->bloc_start : break_value); | |
351 } | |
352 | |
353 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
354 { | |
355 /* Keep extra_bytes worth of empty space. | |
356 And don't free anything unless we can free at least extra_bytes. */ | |
357 excess -= extra_bytes; | |
358 | |
359 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) | |
360 { | |
361 /* This heap should have no blocs in it. */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
362 assert (last_heap->first_bloc == NIL_BLOC && |
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
363 last_heap->last_bloc == NIL_BLOC); |
428 | 364 |
365 /* Return the last heap, with its header, to the system. */ | |
366 excess = (char *)last_heap->end - (char *)last_heap->start; | |
367 last_heap = last_heap->prev; | |
368 last_heap->next = NIL_HEAP; | |
369 } | |
370 else | |
371 { | |
372 excess = (char *) last_heap->end | |
373 - (char *) ROUNDUP ((char *)last_heap->end - excess); | |
374 last_heap->end -= excess; | |
375 } | |
376 | |
377 if ((*real_morecore) (- excess) == 0) | |
2500 | 378 ABORT (); |
428 | 379 } |
380 } | |
381 | |
382 /* Return the total size in use by relocating allocator, | |
383 above where malloc gets space. */ | |
384 | |
385 long r_alloc_size_in_use (void); | |
386 long | |
440 | 387 r_alloc_size_in_use (void) |
428 | 388 { |
389 return break_value - virtual_break_value; | |
390 } | |
391 | |
392 /* The meat - allocating, freeing, and relocating blocs. */ | |
393 | |
394 | |
395 /* Find the bloc referenced by the address in PTR. Returns a pointer | |
396 to that block. */ | |
397 | |
398 static bloc_ptr | |
399 find_bloc (POINTER *ptr) | |
400 { | |
401 register bloc_ptr p = first_bloc; | |
402 | |
403 while (p != NIL_BLOC) | |
404 { | |
405 if (p->variable == ptr && p->data == *ptr) | |
406 return p; | |
407 | |
408 p = p->next; | |
409 } | |
410 | |
411 return p; | |
412 } | |
413 | |
414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
415 Returns a pointer to the new bloc, or zero if we couldn't allocate | |
416 memory for the new block. */ | |
417 | |
418 static bloc_ptr | |
440 | 419 get_bloc (size_t size) |
428 | 420 { |
421 register bloc_ptr new_bloc; | |
422 register heap_ptr heap; | |
423 | |
424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
425 || ! (new_bloc->data = obtain (break_value, size))) | |
426 { | |
427 if (new_bloc) | |
428 free (new_bloc); | |
429 | |
430 return 0; | |
431 } | |
432 | |
433 break_value = new_bloc->data + size; | |
434 | |
435 new_bloc->size = size; | |
436 new_bloc->next = NIL_BLOC; | |
437 new_bloc->variable = (POINTER *) NIL; | |
438 new_bloc->new_data = 0; | |
439 | |
440 /* Record in the heap that this space is in use. */ | |
441 heap = find_heap (new_bloc->data); | |
442 heap->free = break_value; | |
443 | |
444 /* Maintain the correspondence between heaps and blocs. */ | |
445 new_bloc->heap = heap; | |
446 heap->last_bloc = new_bloc; | |
447 if (heap->first_bloc == NIL_BLOC) | |
448 heap->first_bloc = new_bloc; | |
449 | |
450 /* Put this bloc on the doubly-linked list of blocs. */ | |
451 if (first_bloc) | |
452 { | |
453 new_bloc->prev = last_bloc; | |
454 last_bloc->next = new_bloc; | |
455 last_bloc = new_bloc; | |
456 } | |
457 else | |
458 { | |
459 first_bloc = last_bloc = new_bloc; | |
460 new_bloc->prev = NIL_BLOC; | |
461 } | |
462 | |
463 return new_bloc; | |
464 } | |
465 | |
466 /* Calculate new locations of blocs in the list beginning with BLOC, | |
467 relocating it to start at ADDRESS, in heap HEAP. If enough space is | |
468 not presently available in our reserve, call obtain for | |
469 more space. | |
470 | |
471 Store the new location of each bloc in its new_data field. | |
472 Do not touch the contents of blocs or break_value. */ | |
473 | |
474 static int | |
475 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) | |
476 { | |
477 register bloc_ptr b = bloc; | |
478 | |
479 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
480 assert (!r_alloc_freeze_level); |
428 | 481 |
482 while (b) | |
483 { | |
484 /* If bloc B won't fit within HEAP, | |
485 move to the next heap and try again. */ | |
486 while (heap && address + b->size > heap->end) | |
487 { | |
488 heap = heap->next; | |
489 if (heap == NIL_HEAP) | |
490 break; | |
491 address = heap->bloc_start; | |
492 } | |
493 | |
494 /* If BLOC won't fit in any heap, | |
495 get enough new space to hold BLOC and all following blocs. */ | |
496 if (heap == NIL_HEAP) | |
497 { | |
498 register bloc_ptr tb = b; | |
440 | 499 register size_t s = 0; |
428 | 500 |
501 /* Add up the size of all the following blocs. */ | |
502 while (tb != NIL_BLOC) | |
503 { | |
504 if (tb->variable) | |
505 s += tb->size; | |
506 | |
507 tb = tb->next; | |
508 } | |
509 | |
510 /* Get that space. */ | |
511 address = obtain (address, s); | |
512 if (address == 0) | |
513 return 0; | |
514 | |
515 heap = last_heap; | |
516 } | |
517 | |
518 /* Record the new address of this bloc | |
519 and update where the next bloc can start. */ | |
520 b->new_data = address; | |
521 if (b->variable) | |
522 address += b->size; | |
523 b = b->next; | |
524 } | |
525 | |
526 return 1; | |
527 } | |
528 | |
529 #if 0 /* unused */ | |
530 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. | |
531 This is necessary if we put the memory of space of BLOC | |
532 before that of BEFORE. */ | |
533 | |
534 static void | |
535 reorder_bloc (bloc_ptr bloc, bloc_ptr before) | |
536 { | |
537 bloc_ptr prev, next; | |
538 | |
539 /* Splice BLOC out from where it is. */ | |
540 prev = bloc->prev; | |
541 next = bloc->next; | |
542 | |
543 if (prev) | |
544 prev->next = next; | |
545 if (next) | |
546 next->prev = prev; | |
547 | |
548 /* Splice it in before BEFORE. */ | |
549 prev = before->prev; | |
550 | |
551 if (prev) | |
552 prev->next = bloc; | |
553 bloc->prev = prev; | |
554 | |
555 before->prev = bloc; | |
556 bloc->next = before; | |
557 } | |
558 #endif /* unused */ | |
559 | |
560 /* Update the records of which heaps contain which blocs, starting | |
561 with heap HEAP and bloc BLOC. */ | |
562 | |
563 static void | |
564 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) | |
565 { | |
566 register bloc_ptr b; | |
567 | |
568 /* Initialize HEAP's status to reflect blocs before BLOC. */ | |
569 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
570 { | |
571 /* The previous bloc is in HEAP. */ | |
572 heap->last_bloc = bloc->prev; | |
573 heap->free = bloc->prev->data + bloc->prev->size; | |
574 } | |
575 else | |
576 { | |
577 /* HEAP contains no blocs before BLOC. */ | |
578 heap->first_bloc = NIL_BLOC; | |
579 heap->last_bloc = NIL_BLOC; | |
580 heap->free = heap->bloc_start; | |
581 } | |
582 | |
583 /* Advance through blocs one by one. */ | |
584 for (b = bloc; b != NIL_BLOC; b = b->next) | |
585 { | |
586 /* Advance through heaps, marking them empty, | |
587 till we get to the one that B is in. */ | |
588 while (heap) | |
589 { | |
590 if (heap->bloc_start <= b->data && b->data <= heap->end) | |
591 break; | |
592 heap = heap->next; | |
593 /* We know HEAP is not null now, | |
594 because there has to be space for bloc B. */ | |
595 heap->first_bloc = NIL_BLOC; | |
596 heap->last_bloc = NIL_BLOC; | |
597 heap->free = heap->bloc_start; | |
598 } | |
599 | |
600 /* Update HEAP's status for bloc B. */ | |
601 heap->free = b->data + b->size; | |
602 heap->last_bloc = b; | |
603 if (heap->first_bloc == NIL_BLOC) | |
604 heap->first_bloc = b; | |
605 | |
606 /* Record that B is in HEAP. */ | |
607 b->heap = heap; | |
608 } | |
609 | |
610 /* If there are any remaining heaps and no blocs left, | |
611 mark those heaps as empty. */ | |
612 heap = heap->next; | |
613 while (heap) | |
614 { | |
615 heap->first_bloc = NIL_BLOC; | |
616 heap->last_bloc = NIL_BLOC; | |
617 heap->free = heap->bloc_start; | |
618 heap = heap->next; | |
619 } | |
620 } | |
621 | |
622 /* Resize BLOC to SIZE bytes. This relocates the blocs | |
623 that come after BLOC in memory. */ | |
624 | |
625 static int | |
440 | 626 resize_bloc (bloc_ptr bloc, size_t size) |
428 | 627 { |
628 register bloc_ptr b; | |
629 heap_ptr heap; | |
630 POINTER address; | |
440 | 631 size_t old_size; |
428 | 632 |
633 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
634 assert (!r_alloc_freeze_level); |
428 | 635 |
636 if (bloc == NIL_BLOC || size == bloc->size) | |
637 return 1; | |
638 | |
639 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
640 { | |
641 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
642 break; | |
643 } | |
644 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
645 assert (heap != NIL_HEAP); |
428 | 646 |
647 old_size = bloc->size; | |
648 bloc->size = size; | |
649 | |
650 /* Note that bloc could be moved into the previous heap. */ | |
651 address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
652 : first_heap->bloc_start); | |
653 while (heap) | |
654 { | |
655 if (heap->bloc_start <= address && address <= heap->end) | |
656 break; | |
657 heap = heap->prev; | |
658 } | |
659 | |
660 if (! relocate_blocs (bloc, heap, address)) | |
661 { | |
662 bloc->size = old_size; | |
663 return 0; | |
664 } | |
665 | |
666 if (size > old_size) | |
667 { | |
668 for (b = last_bloc; b != bloc; b = b->prev) | |
669 { | |
670 if (!b->variable) | |
671 { | |
672 b->size = 0; | |
673 b->data = b->new_data; | |
674 } | |
675 else | |
676 { | |
440 | 677 memmove (b->new_data, b->data, b->size); |
428 | 678 *b->variable = b->data = b->new_data; |
679 } | |
680 } | |
681 if (!bloc->variable) | |
682 { | |
683 bloc->size = 0; | |
684 bloc->data = bloc->new_data; | |
685 } | |
686 else | |
687 { | |
440 | 688 memmove (bloc->new_data, bloc->data, old_size); |
428 | 689 memset (bloc->new_data + old_size, 0, size - old_size); |
690 *bloc->variable = bloc->data = bloc->new_data; | |
691 } | |
692 } | |
693 else | |
694 { | |
695 for (b = bloc; b != NIL_BLOC; b = b->next) | |
696 { | |
697 if (!b->variable) | |
698 { | |
699 b->size = 0; | |
700 b->data = b->new_data; | |
701 } | |
702 else | |
703 { | |
440 | 704 memmove (b->new_data, b->data, b->size); |
428 | 705 *b->variable = b->data = b->new_data; |
706 } | |
707 } | |
708 } | |
709 | |
710 update_heap_bloc_correspondence (bloc, heap); | |
711 | |
712 break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
713 : first_heap->bloc_start); | |
714 return 1; | |
715 } | |
716 | |
717 /* Free BLOC from the chain of blocs, relocating any blocs above it | |
718 and returning BLOC->size bytes to the free area. */ | |
719 | |
720 static void | |
721 free_bloc (bloc_ptr bloc) | |
722 { | |
723 heap_ptr heap = bloc->heap; | |
724 | |
725 if (r_alloc_freeze_level) | |
726 { | |
727 bloc->variable = (POINTER *) NIL; | |
728 return; | |
729 } | |
730 | |
731 resize_bloc (bloc, 0); | |
732 | |
733 if (bloc == first_bloc && bloc == last_bloc) | |
734 { | |
735 first_bloc = last_bloc = NIL_BLOC; | |
736 } | |
737 else if (bloc == last_bloc) | |
738 { | |
739 last_bloc = bloc->prev; | |
740 last_bloc->next = NIL_BLOC; | |
741 } | |
742 else if (bloc == first_bloc) | |
743 { | |
744 first_bloc = bloc->next; | |
745 first_bloc->prev = NIL_BLOC; | |
746 } | |
747 else | |
748 { | |
749 bloc->next->prev = bloc->prev; | |
750 bloc->prev->next = bloc->next; | |
751 } | |
752 | |
753 /* Update the records of which blocs are in HEAP. */ | |
754 if (heap->first_bloc == bloc) | |
755 { | |
756 if (bloc->next != 0 && bloc->next->heap == heap) | |
757 heap->first_bloc = bloc->next; | |
758 else | |
759 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
760 } | |
761 if (heap->last_bloc == bloc) | |
762 { | |
763 if (bloc->prev != 0 && bloc->prev->heap == heap) | |
764 heap->last_bloc = bloc->prev; | |
765 else | |
766 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
767 } | |
768 | |
769 relinquish (); | |
770 free (bloc); | |
771 } | |
772 | |
773 /* Interface routines. */ | |
774 | |
775 /* Obtain SIZE bytes of storage from the free pool, or the system, as | |
776 necessary. If relocatable blocs are in use, this means relocating | |
777 them. This function gets plugged into the GNU malloc's __morecore | |
778 hook. | |
779 | |
780 We provide hysteresis, never relocating by less than extra_bytes. | |
781 | |
782 If we're out of memory, we should return zero, to imitate the other | |
783 __morecore hook values - in particular, __default_morecore in the | |
784 GNU malloc package. */ | |
785 | |
786 POINTER r_alloc_sbrk (ptrdiff_t size); | |
787 POINTER | |
788 r_alloc_sbrk (ptrdiff_t size) | |
789 { | |
790 register bloc_ptr b; | |
791 POINTER address; | |
792 | |
793 if (! r_alloc_initialized) | |
794 init_ralloc (); | |
795 | |
796 if (! use_relocatable_buffers) | |
797 return (*real_morecore) (size); | |
798 | |
799 if (size == 0) | |
800 return virtual_break_value; | |
801 | |
802 if (size > 0) | |
803 { | |
804 /* Allocate a page-aligned space. GNU malloc would reclaim an | |
805 extra space if we passed an unaligned one. But we could | |
806 not always find a space which is contiguous to the previous. */ | |
807 POINTER new_bloc_start; | |
808 heap_ptr h = first_heap; | |
440 | 809 size_t get = ROUNDUP (size); |
428 | 810 |
811 address = (POINTER) ROUNDUP (virtual_break_value); | |
812 | |
813 /* Search the list upward for a heap which is large enough. */ | |
814 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | |
815 { | |
816 h = h->next; | |
817 if (h == NIL_HEAP) | |
818 break; | |
819 address = (POINTER) ROUNDUP (h->start); | |
820 } | |
821 | |
822 /* If not found, obtain more space. */ | |
823 if (h == NIL_HEAP) | |
824 { | |
825 get += extra_bytes + page_size; | |
826 | |
827 if (! obtain (address, get)) | |
828 return 0; | |
829 | |
830 if (first_heap == last_heap) | |
831 address = (POINTER) ROUNDUP (virtual_break_value); | |
832 else | |
833 address = (POINTER) ROUNDUP (last_heap->start); | |
834 h = last_heap; | |
835 } | |
836 | |
837 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); | |
838 | |
839 if (first_heap->bloc_start < new_bloc_start) | |
840 { | |
841 /* This is no clean solution - no idea how to do it better. */ | |
842 if (r_alloc_freeze_level) | |
843 return NIL; | |
844 | |
845 /* There is a bug here: if the above obtain call succeeded, but the | |
846 relocate_blocs call below does not succeed, we need to free | |
847 the memory that we got with obtain. */ | |
848 | |
849 /* Move all blocs upward. */ | |
850 if (! relocate_blocs (first_bloc, h, new_bloc_start)) | |
851 return 0; | |
852 | |
853 /* Note that (POINTER)(h+1) <= new_bloc_start since | |
854 get >= page_size, so the following does not destroy the heap | |
855 header. */ | |
856 for (b = last_bloc; b != NIL_BLOC; b = b->prev) | |
857 { | |
440 | 858 memmove (b->new_data, b->data, b->size); |
428 | 859 *b->variable = b->data = b->new_data; |
860 } | |
861 | |
862 h->bloc_start = new_bloc_start; | |
863 | |
864 update_heap_bloc_correspondence (first_bloc, h); | |
865 } | |
866 if (h != first_heap) | |
867 { | |
868 /* Give up managing heaps below the one the new | |
869 virtual_break_value points to. */ | |
870 first_heap->prev = NIL_HEAP; | |
871 first_heap->next = h->next; | |
872 first_heap->start = h->start; | |
873 first_heap->end = h->end; | |
874 first_heap->free = h->free; | |
875 first_heap->first_bloc = h->first_bloc; | |
876 first_heap->last_bloc = h->last_bloc; | |
877 first_heap->bloc_start = h->bloc_start; | |
878 | |
879 if (first_heap->next) | |
880 first_heap->next->prev = first_heap; | |
881 else | |
882 last_heap = first_heap; | |
883 } | |
884 | |
885 memset (address, 0, size); | |
886 } | |
887 else /* size < 0 */ | |
888 { | |
440 | 889 size_t excess = (char *)first_heap->bloc_start |
428 | 890 - ((char *)virtual_break_value + size); |
891 | |
892 address = virtual_break_value; | |
893 | |
894 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
895 { | |
896 excess -= extra_bytes; | |
897 first_heap->bloc_start | |
898 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); | |
899 | |
900 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); | |
901 | |
902 for (b = first_bloc; b != NIL_BLOC; b = b->next) | |
903 { | |
440 | 904 memmove (b->new_data, b->data, b->size); |
428 | 905 *b->variable = b->data = b->new_data; |
906 } | |
907 } | |
908 | |
909 if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
910 { | |
911 /* We found an additional space below the first heap */ | |
912 first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
913 } | |
914 } | |
915 | |
916 virtual_break_value = (POINTER) ((char *)address + size); | |
917 break_value = (last_bloc | |
918 ? last_bloc->data + last_bloc->size | |
919 : first_heap->bloc_start); | |
920 if (size < 0) | |
921 relinquish (); | |
922 | |
923 return address; | |
924 } | |
925 | |
926 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
927 the data is returned in *PTR. PTR is thus the address of some variable | |
928 which will use the data area. | |
929 | |
930 The allocation of 0 bytes is valid. | |
931 In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
932 before allocating a new area. Not yet done. | |
933 | |
934 If we can't allocate the necessary memory, set *PTR to zero, and | |
935 return zero. */ | |
936 | |
440 | 937 POINTER r_alloc (POINTER *ptr, size_t size); |
428 | 938 POINTER |
440 | 939 r_alloc (POINTER *ptr, size_t size) |
428 | 940 { |
941 bloc_ptr new_bloc; | |
942 | |
1333 | 943 REGEX_MALLOC_CHECK (); |
944 | |
428 | 945 if (! r_alloc_initialized) |
946 init_ralloc (); | |
947 | |
948 new_bloc = get_bloc (size); | |
949 if (new_bloc) | |
950 { | |
951 new_bloc->variable = ptr; | |
952 *ptr = new_bloc->data; | |
953 } | |
954 else | |
955 *ptr = 0; | |
956 | |
957 return *ptr; | |
958 } | |
959 | |
960 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
961 Store 0 in *PTR to show there's no block allocated. */ | |
962 | |
963 void r_alloc_free (POINTER *ptr); | |
964 void | |
965 r_alloc_free (POINTER *ptr) | |
966 { | |
967 register bloc_ptr dead_bloc; | |
968 | |
1333 | 969 REGEX_MALLOC_CHECK (); |
970 | |
428 | 971 if (! r_alloc_initialized) |
972 init_ralloc (); | |
973 | |
974 dead_bloc = find_bloc (ptr); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
975 assert (dead_bloc != NIL_BLOC); |
428 | 976 |
977 free_bloc (dead_bloc); | |
978 *ptr = 0; | |
979 | |
980 #ifdef emacs | |
3263 | 981 #ifndef NEW_GC |
428 | 982 refill_memory_reserve (); |
3263 | 983 #endif /* not NEW_GC */ |
428 | 984 #endif |
985 } | |
986 | |
987 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
988 Do this by shifting all blocks above this one up in memory, unless | |
989 SIZE is less than or equal to the current bloc size, in which case | |
990 do nothing. | |
991 | |
992 In case r_alloc_freeze is set, a new bloc is allocated, and the | |
993 memory copied to it. Not very efficient. We could traverse the | |
994 bloc_list for a best fit of free blocs first. | |
995 | |
996 Change *PTR to reflect the new bloc, and return this value. | |
997 | |
998 If more memory cannot be allocated, then leave *PTR unchanged, and | |
999 return zero. */ | |
1000 | |
440 | 1001 POINTER r_re_alloc (POINTER *ptr, size_t size); |
428 | 1002 POINTER |
440 | 1003 r_re_alloc (POINTER *ptr, size_t size) |
428 | 1004 { |
1005 register bloc_ptr bloc; | |
1006 | |
1333 | 1007 REGEX_MALLOC_CHECK (); |
1008 | |
428 | 1009 if (! r_alloc_initialized) |
1010 init_ralloc (); | |
1011 | |
1012 if (!*ptr) | |
1013 return r_alloc (ptr, size); | |
1014 if (!size) | |
1015 { | |
1016 r_alloc_free (ptr); | |
1017 return r_alloc (ptr, 0); | |
1018 } | |
1019 | |
1020 bloc = find_bloc (ptr); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1021 assert (bloc != NIL_BLOC); |
428 | 1022 |
1023 if (size < bloc->size) | |
1024 { | |
1025 /* Wouldn't it be useful to actually resize the bloc here? */ | |
1026 /* I think so too, but not if it's too expensive... */ | |
1027 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | |
1028 && r_alloc_freeze_level == 0) | |
1029 { | |
1030 resize_bloc (bloc, MEM_ROUNDUP (size)); | |
1031 /* Never mind if this fails, just do nothing... */ | |
1032 /* It *should* be infallible! */ | |
1033 } | |
1034 } | |
1035 else if (size > bloc->size) | |
1036 { | |
1037 if (r_alloc_freeze_level) | |
1038 { | |
1039 bloc_ptr new_bloc; | |
1040 new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1041 if (new_bloc) | |
1042 { | |
1043 new_bloc->variable = ptr; | |
1044 *ptr = new_bloc->data; | |
1045 bloc->variable = (POINTER *) NIL; | |
1046 } | |
1047 else | |
1048 return NIL; | |
1049 } | |
1050 else | |
1051 { | |
1052 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1053 return NIL; | |
1054 } | |
1055 } | |
1056 return *ptr; | |
1057 } | |
1058 | |
1059 /* Disable relocations, after making room for at least SIZE bytes | |
1060 of non-relocatable heap if possible. The relocatable blocs are | |
1061 guaranteed to hold still until thawed, even if this means that | |
1062 malloc must return a null pointer. */ | |
1063 | |
1064 void r_alloc_freeze (long size); | |
1065 void | |
1066 r_alloc_freeze (long size) | |
1067 { | |
1068 if (! r_alloc_initialized) | |
1069 init_ralloc (); | |
1070 | |
1071 /* If already frozen, we can't make any more room, so don't try. */ | |
1072 if (r_alloc_freeze_level > 0) | |
1073 size = 0; | |
1074 /* If we can't get the amount requested, half is better than nothing. */ | |
1075 while (size > 0 && r_alloc_sbrk (size) == 0) | |
1076 size /= 2; | |
1077 ++r_alloc_freeze_level; | |
1078 if (size > 0) | |
1079 r_alloc_sbrk (-size); | |
1080 } | |
1081 | |
1082 void r_alloc_thaw (void); | |
1083 void | |
1084 r_alloc_thaw (void) | |
1085 { | |
1086 | |
1087 if (! r_alloc_initialized) | |
1088 init_ralloc (); | |
1089 | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1090 assert (--r_alloc_freeze_level >= 0); |
428 | 1091 |
1092 /* This frees all unused blocs. It is not too inefficient, as the resize | |
440 | 1093 and memmove is done only once. Afterwards, all unreferenced blocs are |
428 | 1094 already shrunk to zero size. */ |
1095 if (!r_alloc_freeze_level) | |
1096 { | |
1097 bloc_ptr *b = &first_bloc; | |
1098 while (*b) | |
1099 if (!(*b)->variable) | |
1100 free_bloc (*b); | |
1101 else | |
1102 b = &(*b)->next; | |
1103 } | |
1104 } | |
1105 | |
1106 | |
1107 /* The hook `malloc' uses for the function which gets more space | |
1108 from the system. */ | |
1109 #ifndef DOUG_LEA_MALLOC | |
1110 extern POINTER (*__morecore) (ptrdiff_t size); | |
1111 #endif | |
1112 | |
1113 /* Initialize various things for memory allocation. */ | |
1114 | |
1115 void | |
1116 init_ralloc (void) | |
1117 { | |
1118 if (r_alloc_initialized) | |
1119 return; | |
1120 | |
1121 r_alloc_initialized = 1; | |
1122 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1123 __morecore = | |
1799 | 1124 #ifdef TYPEOF |
1125 (TYPEOF (__morecore)) | |
428 | 1126 #endif |
1127 r_alloc_sbrk; | |
1128 | |
1129 first_heap = last_heap = &heap_base; | |
1130 first_heap->next = first_heap->prev = NIL_HEAP; | |
1131 first_heap->start = first_heap->bloc_start | |
1132 = virtual_break_value = break_value = (*real_morecore) (0); | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1133 assert (break_value != NIL); |
428 | 1134 |
1135 page_size = PAGE; | |
1136 extra_bytes = ROUNDUP (50000); | |
1137 | |
1138 #ifdef DOUG_LEA_MALLOC | |
1139 mallopt (M_TOP_PAD, 64 * 4096); | |
1140 #else | |
1141 #if 0 /* Hasn't been synched yet */ | |
1142 /* Give GNU malloc's morecore some hysteresis | |
1143 so that we move all the relocatable blocks much less often. */ | |
1144 __malloc_extra_blocks = 64; | |
1145 #endif | |
1146 #endif | |
1147 | |
1148 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
1149 | |
1150 /* The extra call to real_morecore guarantees that the end of the | |
1151 address space is a multiple of page_size, even if page_size is | |
1152 not really the page size of the system running the binary in | |
1153 which page_size is stored. This allows a binary to be built on a | |
1154 system with one page size and run on a system with a smaller page | |
1155 size. */ | |
1156 (*real_morecore) (first_heap->end - first_heap->start); | |
1157 | |
1158 /* Clear the rest of the last page; this memory is in our address space | |
1159 even though it is after the sbrk value. */ | |
1160 /* Doubly true, with the additional call that explicitly adds the | |
1161 rest of that page to the address space. */ | |
1162 memset (first_heap->start, 0, first_heap->end - first_heap->start); | |
1163 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
1164 use_relocatable_buffers = 1; | |
1165 } | |
1166 | |
1167 #if defined (emacs) && defined (DOUG_LEA_MALLOC) | |
1168 | |
1169 /* Reinitialize the morecore hook variables after restarting a dumped | |
1170 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | |
1171 void r_alloc_reinit (void); | |
1172 void | |
1173 r_alloc_reinit (void) | |
1174 { | |
1175 /* Only do this if the hook has been reset, so that we don't get an | |
1176 infinite loop, in case Emacs was linked statically. */ | |
1177 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) | |
1178 { | |
1179 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; | |
1180 __morecore = | |
1799 | 1181 #ifdef TYPEOF |
1182 (TYPEOF (__morecore)) | |
428 | 1183 #endif |
1184 r_alloc_sbrk; | |
1185 } | |
1186 } | |
1187 #if 0 | |
1188 #ifdef DEBUG | |
1189 | |
1190 void | |
1191 r_alloc_check (void) | |
1192 { | |
1193 int found = 0; | |
1194 heap_ptr h, ph = 0; | |
1195 bloc_ptr b, pb = 0; | |
1196 | |
1197 if (!r_alloc_initialized) | |
1198 return; | |
1199 | |
1200 assert (first_heap); | |
1201 assert (last_heap->end <= (POINTER) sbrk (0)); | |
1202 assert ((POINTER) first_heap < first_heap->start); | |
1203 assert (first_heap->start <= virtual_break_value); | |
1204 assert (virtual_break_value <= first_heap->end); | |
1205 | |
1206 for (h = first_heap; h; h = h->next) | |
1207 { | |
1208 assert (h->prev == ph); | |
1209 assert ((POINTER) ROUNDUP (h->end) == h->end); | |
1210 #if 0 /* ??? The code in ralloc.c does not really try to ensure | |
1211 the heap start has any sort of alignment. | |
1212 Perhaps it should. */ | |
1213 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); | |
1214 #endif | |
1215 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); | |
1216 assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1217 | |
1218 if (ph) | |
1219 { | |
1220 assert (ph->end < h->start); | |
1221 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1222 } | |
1223 | |
1224 if (h->bloc_start <= break_value && break_value <= h->end) | |
1225 found = 1; | |
1226 | |
1227 ph = h; | |
1228 } | |
1229 | |
1230 assert (found); | |
1231 assert (last_heap == ph); | |
1232 | |
1233 for (b = first_bloc; b; b = b->next) | |
1234 { | |
1235 assert (b->prev == pb); | |
1236 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
440 | 1237 assert ((size_t) MEM_ROUNDUP (b->size) == b->size); |
428 | 1238 |
1239 ph = 0; | |
1240 for (h = first_heap; h; h = h->next) | |
1241 { | |
1242 if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1243 break; | |
1244 ph = h; | |
1245 } | |
1246 | |
1247 assert (h); | |
1248 | |
1249 if (pb && pb->data + pb->size != b->data) | |
1250 { | |
1251 assert (ph && b->data == h->bloc_start); | |
1252 while (ph) | |
1253 { | |
1254 if (ph->bloc_start <= pb->data | |
1255 && pb->data + pb->size <= ph->end) | |
1256 { | |
1257 assert (pb->data + pb->size + b->size > ph->end); | |
1258 break; | |
1259 } | |
1260 else | |
1261 { | |
1262 assert (ph->bloc_start + b->size > ph->end); | |
1263 } | |
1264 ph = ph->prev; | |
1265 } | |
1266 } | |
1267 pb = b; | |
1268 } | |
1269 | |
1270 assert (last_bloc == pb); | |
1271 | |
1272 if (last_bloc) | |
1273 assert (last_bloc->data + last_bloc->size == break_value); | |
1274 else | |
1275 assert (first_heap->bloc_start == break_value); | |
1276 } | |
1277 #endif /* DEBUG */ | |
1278 #endif /* 0 */ | |
1279 | |
1280 #endif | |
1281 | |
1282 #else /* HAVE_MMAP */ | |
1283 | |
1284 /* | |
1285 A relocating allocator built using the mmap(2) facility available | |
1286 in some OSes. Based on another version written by Paul Flinders, | |
1287 from which code (and comments) are snarfed. | |
1288 | |
1289 The OS should support mmap() with MAP_ANONYMOUS attribute, or have | |
1290 /dev/zero. It should support private memory mapping. | |
1291 | |
1292 Paul Flinders wrote a version which works well for systems that | |
1293 allow callers to specify (virtual) addresses to mmap(). | |
1294 Unfortunately, such a scheme doesn't work for certain systems like | |
1295 HP-UX that have a system-wide virtual->real address map, and | |
1296 consequently impose restrictions on the virtual address values | |
1297 permitted. | |
1298 | |
1299 NB: The mapping scheme in HP-UX is motivated by the inverted page | |
1300 table design in some HP processors. | |
1301 | |
1302 This alternate implementation allows for the addresses to be | |
1303 optionally chosen by the system. Fortunately, buffer allocation | |
1304 doesn't insist upon contiguous memory which Flinders' scheme | |
1305 provides, and this one doesn't. | |
1306 | |
1307 We don't really provide for hysteresis here, but add some metering | |
1308 to monitor how poorly the allocator actually works. See the | |
1309 documentation for `mmap-hysteresis'. | |
1310 | |
1311 This implementation actually cycles through the blocks allocated | |
1312 via mmap() and only sends it to free() if it wasn't one of them. | |
1313 Unfortunately, this is O(n) in the number of mmapped blocks. (Not | |
1314 really, as we have a hash table which tries to reduce the cost.) | |
1315 Also, this dereferences the pointer passed, so it would cause a | |
1316 segfault if garbage was passed to it. */ | |
1317 | |
1318 #include <fcntl.h> | |
1319 #include <sys/mman.h> | |
1320 #include <stdio.h> | |
1321 | |
1322 typedef void *VM_ADDR; /* VM addresses */ | |
442 | 1323 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ |
428 | 1324 |
1325 /* Configuration for relocating allocator. */ | |
1326 | |
1327 /* #define MMAP_GENERATE_ADDRESSES */ | |
1328 /* Define this if you want Emacs to manage the address table. | |
1329 It is not recommended unless you have major problems with the | |
1330 default scheme, which allows the OS to pick addresses. */ | |
1331 | |
1332 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be | |
1333 discarded while computing the hash, as they're always zero. The | |
1334 default is appropriate for a page size of 4096 bytes. */ | |
1335 | |
1336 #define USELESS_LOWER_ADDRESS_BITS 12 | |
1337 | |
1338 | |
1339 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */ | |
1340 | |
1341 #define MHASH_PRIME 89 | |
1342 | |
1343 | |
1344 /* Whether we want to enable metering of some ralloc performance. | |
1345 This incurs a constant penalty for each mmap operation. */ | |
1346 | |
1347 #define MMAP_METERING | |
1348 | |
1349 | |
1350 /* Rename the following to protect against a some smartness elsewhere. | |
1351 We need access to the allocator used for non-mmap allocation | |
1352 elsewhere, in case we get passed a handle that we didn't allocate | |
1353 ourselves. Currently, this default allocator is also used to | |
1354 maintain local structures for relocatable blocks. */ | |
1355 | |
1356 #define UNDERLYING_MALLOC malloc | |
1357 #define UNDERLYING_FREE free | |
1358 #define UNDERLYING_REALLOC realloc | |
1359 | |
1360 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES | |
1361 is defined, and MAP_VARIABLE otherwise. Some losing systems don't | |
1362 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */ | |
1363 | |
1364 #ifdef MMAP_GENERATE_ADDRESSES | |
1365 # ifdef MAP_FIXED | |
1366 # define MAP_ADDRCHOICE_FLAG MAP_FIXED | |
1367 # endif | |
1368 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1369 # ifdef MAP_VARIABLE | |
1370 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE | |
1371 # endif | |
1372 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1373 | |
1374 /* Default case. */ | |
1375 #ifndef MAP_ADDRCHOICE_FLAG | |
1376 # define MAP_ADDRCHOICE_FLAG 0 | |
1377 #endif /* MAP_ADDRCHOICE_FLAG */ | |
1378 | |
1379 #ifdef MAP_ANONYMOUS | |
1380 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS) | |
1381 #else | |
1382 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG) | |
1383 #endif /* MAP_ANONYMOUS */ | |
1384 | |
1385 | |
1386 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For | |
1387 Emacs's sake, please do not make this local to malloc_init; on some | |
1388 machines, the dumping procedure makes all static variables | |
1389 read-only. On these machines, the word static is #defined to be | |
1390 the empty string, meaning that r_alloc_initialized becomes an | |
1391 automatic variable, and loses its value each time Emacs is started up. | |
1392 | |
1393 If we're using mmap this flag has three possible values | |
1394 0 - initial value | |
1395 1 - Normal value when running temacs. In this case buffers | |
1396 are allocated using malloc so that any data that they | |
1397 contain becomes part of the undumped executable. | |
1398 2 - Normal value when running emacs */ | |
1399 static int r_alloc_initialized = 0; | |
1400 | |
1401 /* (ptf): Macros for rounding. Note that rounding to any value is possible | |
1402 by changing the definition of PAGE. */ | |
1403 #define PAGE (getpagesize ()) | |
1404 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size) | |
1405 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size) | |
1406 | |
1407 | |
1408 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS | |
1409 points to a file descriptor opened on /dev/zero */ | |
1410 | |
1411 static int DEV_ZERO_FD = -1; | |
1412 | |
1413 | |
1414 /* We actually need a data structure that can be usefully structured | |
1415 based on the VM address, and allows an ~O(1) lookup on an arbitrary | |
1416 address, i.e. a hash table. Maybe the XEmacs hash table can be | |
1417 coaxed enough. At the moment, we use lookup on a hash table to | |
1418 decide whether to do an O(n) search on the malloced block list. | |
1419 Addresses are hashed to a bucket modulo MHASH_PRIME. */ | |
1420 | |
1421 | |
1422 /* We settle for a standard doubly-linked-list. The dynarr type isn't | |
1423 very amenable to deletion of items in the middle, so we conjure up | |
1424 yet another stupid datastructure. The structure is maintained as a | |
1425 ring, and the singleton ring has the sole element as its left and | |
1426 right neighbours. */ | |
1427 | |
1428 static void init_MHASH_table (void); /* Forward reference */ | |
1429 | |
1430 typedef struct alloc_dll | |
1431 { | |
1432 size_t size; /* #bytes currently in use */ | |
1433 size_t space_for; /* #bytes we really have */ | |
1434 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */ | |
1435 VM_ADDR vm_addr; /* VM address returned by mmap */ | |
1436 struct alloc_dll *left; /* Left link in circular doubly linked list */ | |
1437 struct alloc_dll *right; | |
1438 } *MMAP_HANDLE; | |
1439 | |
1440 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ | |
1441 static size_t page_size = 0; /* Size of VM pages */ | |
458 | 1442 static Fixnum mmap_hysteresis; /* Logically a "size_t" */ |
428 | 1443 |
1444 /* Get a new handle for a fresh block. */ | |
1445 static MMAP_HANDLE | |
1446 new_mmap_handle (size_t nsiz) | |
1447 { | |
1448 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll)); | |
1449 if ( h == 0) return 0; | |
1450 h->size = nsiz; | |
1451 if (mmap_start == 0) | |
1452 { | |
1453 init_MHASH_table (); | |
1454 mmap_start = h; mmap_start->left = h; mmap_start->right = h; | |
1455 } | |
1456 { | |
1457 MMAP_HANDLE prev = mmap_start->left; | |
1458 MMAP_HANDLE nex = mmap_start; | |
1459 | |
1460 /* Four pointers need fixing. */ | |
1461 h->right = nex; | |
1462 h->left = prev; | |
1463 prev->right = h; | |
1464 nex->left = h; | |
1465 } | |
1466 return h; | |
1467 } | |
1468 | |
1469 /* Find a handle given the aliased address using linear search. */ | |
1470 static MMAP_HANDLE | |
1471 find_mmap_handle_lsearch (POINTER *alias) | |
1472 { | |
1473 MMAP_HANDLE h = mmap_start; | |
1474 if (h == 0) return 0; | |
1475 do { | |
1476 if (h->aliased_address == alias && *alias == h->vm_addr) | |
1477 return h; | |
1478 h = h->right; | |
1479 } while( h != mmap_start ); | |
1480 return 0; /* Bogus alias passed. */ | |
1481 } | |
1482 | |
1483 /* Free a handle. */ | |
1484 static void | |
1485 free_mmap_handle (MMAP_HANDLE h) | |
1486 { | |
1487 MMAP_HANDLE prev = h->left; | |
1488 MMAP_HANDLE nex = h->right; | |
1489 if (prev == h || nex == h) /* In fact, this should be && */ | |
1490 { /* We're the singleton dll */ | |
1491 UNDERLYING_FREE( h ); /* Free the sole item */ | |
1492 mmap_start = 0; return; | |
1493 } | |
1494 else if (h == mmap_start) | |
1495 { | |
1496 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ | |
1497 } | |
1498 prev->right = nex; | |
1499 nex->left = prev; | |
1500 UNDERLYING_FREE( h ); | |
1501 } | |
1502 | |
1503 /* A simple hash table to speed up the inverted lookup of | |
1504 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a | |
1505 particular bucket. We invalidate a hash table entry during block | |
1506 deletion if the hash has cached the deleted block's address. */ | |
1507 | |
1508 /* Simple hash check. */ | |
1509 struct { | |
1510 int n_hits; /* How many addresses map to this? */ | |
1511 MMAP_HANDLE handle; /* What is the current handle? */ | |
1512 VM_ADDR addr; /* What is its VM address? */ | |
1513 } MHASH_HITS[ MHASH_PRIME ]; | |
1514 | |
1515 static void | |
1516 init_MHASH_table (void) | |
1517 { | |
1518 int i = 0; | |
1519 for (; i < MHASH_PRIME; i++) | |
1520 { | |
1521 MHASH_HITS[i].n_hits = 0; | |
1522 MHASH_HITS[i].addr = 0; | |
1523 MHASH_HITS[i].handle = 0; | |
1524 } | |
1525 } | |
1526 | |
1527 /* Compute the hash value for an address. */ | |
1528 static int | |
1529 MHASH (VM_ADDR addr) | |
1530 { | |
1531 #if (LONGBITS == 64) | |
1532 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1533 #else | |
1534 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | |
1535 #endif | |
1536 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve | |
1537 when converted to signed ints */ | |
1538 return ((hval >= 0) ? hval : MHASH_PRIME + hval); | |
1539 } | |
1540 | |
1541 /* Add a VM address with its corresponding handle to the table. */ | |
1542 static void | |
1543 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h) | |
1544 { | |
1545 int kVal = MHASH( addr ); | |
1546 if (MHASH_HITS[kVal].n_hits++ == 0) | |
1547 { /* Only overwrite the table if there were no hits so far. */ | |
1548 MHASH_HITS[kVal].addr = addr; | |
1549 MHASH_HITS[kVal].handle = h; | |
1550 } | |
1551 } | |
1552 | |
1553 /* Delete a VM address entry from the hash table. */ | |
1554 static void | |
1555 MHASH_DEL (VM_ADDR addr) | |
1556 { | |
1557 int kVal = MHASH( addr ); | |
1558 MHASH_HITS[kVal].n_hits--; | |
1559 if (addr == MHASH_HITS[kVal].addr) | |
1560 { | |
1561 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */ | |
1562 MHASH_HITS[kVal].handle = 0; | |
1563 } | |
1564 } | |
1565 | |
1566 /* End of hash buckets */ | |
1567 | |
1568 /* Metering malloc performance. */ | |
1569 #ifdef MMAP_METERING | |
1570 /* If we're metering, we introduce some extra symbols to aid the noble | |
1571 cause of bloating XEmacs core size. */ | |
1572 | |
1573 static Lisp_Object Qmmap_times_mapped; | |
1574 static Lisp_Object Qmmap_pages_mapped; | |
1575 static Lisp_Object Qmmap_times_unmapped; | |
1576 static Lisp_Object Qmmap_times_remapped; | |
1577 static Lisp_Object Qmmap_didnt_copy; | |
1578 static Lisp_Object Qmmap_pages_copied; | |
1579 static Lisp_Object Qmmap_average_bumpval; | |
1580 static Lisp_Object Qmmap_wastage; | |
1581 static Lisp_Object Qmmap_live_pages; | |
1582 static Lisp_Object Qmmap_addr_looked_up; | |
1583 static Lisp_Object Qmmap_hash_worked; | |
1584 static Lisp_Object Qmmap_addrlist_size; | |
1585 | |
1586 #define M_Map 0 /* How many times allocated? */ | |
1587 #define M_Pages_Map 1 /* How many pages allocated? */ | |
1588 #define M_Unmap 2 /* How many times freed? */ | |
1589 #define M_Remap 3 /* How many times increased in size? */ | |
1590 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */ | |
1591 #define M_Copy_Pages 5 /* Total # pages copied */ | |
1592 #define M_Average_Bumpval 6 /* Average bump value */ | |
1593 #define M_Wastage 7 /* Remaining (unused space) */ | |
1594 #define M_Live_Pages 8 /* #live pages */ | |
1595 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */ | |
1596 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */ | |
1597 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */ | |
1598 | |
1599 #define N_Meterables 12 /* Total number of meterables */ | |
1600 #define MEMMETER(x) {x;} | |
1601 #define MVAL(x) (meter[x]) | |
5581
56144c8593a8
Mechanically change INT to FIXNUM in our sources.
Aidan Kehoe <kehoea@parhasard.net>
parents:
5402
diff
changeset
|
1602 #define MLVAL(x) (make_fixnum (meter[x])) |
428 | 1603 static int meter[N_Meterables]; |
1604 | |
1605 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /* | |
1606 Return some information about mmap-based allocator. | |
1607 | |
1608 mmap-times-mapped: number of times r_alloc was called. | |
1609 mmap-pages-mapped: number of pages mapped by r_alloc calls only. | |
1610 mmap-times-unmapped: number of times r_free was called. | |
1611 mmap-times-remapped: number of times r_re_alloc was called. | |
1612 mmap-didnt-copy: number of times re-alloc did NOT have to move the block. | |
1613 mmap-pages-copied: total number of pages copied. | |
1614 mmap-average-bumpval: average increase in size demanded to re-alloc. | |
1615 mmap-wastage: total number of bytes allocated, but not currently in use. | |
1616 mmap-live-pages: total number of pages live. | |
1617 mmap-addr-looked-up: total number of times needed to check if addr is in block. | |
1618 mmap-hash-worked: total number of times the simple hash check worked. | |
1619 mmap-addrlist-size: number of entries in address picking list. | |
1620 */ | |
1621 ()) | |
1622 { | |
1623 Lisp_Object result = Qnil; | |
1624 | |
1625 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result); | |
1626 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result); | |
1627 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result); | |
1628 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result); | |
1629 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result); | |
1630 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result); | |
1631 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result); | |
1632 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result); | |
1633 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result); | |
1634 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result); | |
1635 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result); | |
1636 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result); | |
1637 | |
1638 return result; | |
1639 } | |
1640 | |
1641 #else /* !MMAP_METERING */ | |
1642 | |
1643 #define MEMMETER(x) | |
1644 #define MVAL(x) | |
1645 | |
1646 #endif /* MMAP_METERING */ | |
1647 | |
1648 static MMAP_HANDLE | |
1649 find_mmap_handle (POINTER *alias) | |
1650 { | |
1651 int kval = MHASH( *alias ); | |
1652 MEMMETER( MVAL(M_Address_Lookup)++ ) | |
1653 switch( MHASH_HITS[kval].n_hits) | |
1654 { | |
1655 case 0: | |
1656 MEMMETER( MVAL( M_Hash_Worked )++ ) | |
1657 return 0; | |
1658 | |
1659 case 1: | |
1660 if (*alias == MHASH_HITS[kval].addr) | |
1661 { | |
1662 MEMMETER( MVAL( M_Hash_Worked) ++ ); | |
1663 return MHASH_HITS[kval].handle; | |
1664 } | |
1665 /* FALL THROUGH */ | |
1666 default: | |
1667 return find_mmap_handle_lsearch( alias ); | |
1668 } /* switch */ | |
1669 } | |
1670 | |
1671 /* | |
1672 Some kernels don't like being asked to pick addresses for mapping | |
1673 themselves---IRIX is known to become extremely slow if mmap is | |
1674 passed a ZERO as the first argument. In such cases, we use an | |
1675 address map which is managed local to the XEmacs process. The | |
1676 address map maintains an ordered linked list of (address, size, | |
1677 occupancy) triples ordered by the absolute address. Initially, a | |
1678 large address area is marked as being empty. The address picking | |
1679 scheme takes bites off the first block which is still empty and | |
1680 large enough. If mmap with the specified address fails, it is | |
1681 marked unavailable and not attempted thereafter. The scheme will | |
1682 keep fragmenting the large empty block until it finds an address | |
1683 which can be successfully mmapped, or until there are no free | |
1684 blocks of the given size left. | |
1685 | |
1686 Note that this scheme, given its first-fit strategy, is prone to | |
1687 fragmentation of the first part of memory earmarked for this | |
1688 purpose. [ACP Vol I]. We can't use the workaround of using a | |
1689 randomized first fit because we don't want to presume too much | |
1690 about the memory map. Instead, we try to coalesce empty or | |
1691 unavailable blocks at any available opportunity. */ | |
1692 | |
1693 /* Initialization procedure for address picking scheme */ | |
1694 static void Addr_Block_initialize(void); | |
1695 | |
1696 /* Get a suitable VM_ADDR via mmap */ | |
440 | 1697 static VM_ADDR New_Addr_Block (size_t sz); |
428 | 1698 |
1699 /* Free a VM_ADDR allocated via New_Addr_Block */ | |
440 | 1700 static void Free_Addr_Block (VM_ADDR addr, size_t sz); |
428 | 1701 |
1702 #ifdef MMAP_GENERATE_ADDRESSES | |
1703 /* Implementation of the three calls for address picking when XEmacs is incharge */ | |
1704 | |
1705 /* The enum denotes the status of the following block. */ | |
1706 typedef enum { empty = 0, occupied, unavailable } addr_status; | |
1707 | |
1708 typedef struct addr_chain | |
1709 { | |
1710 POINTER addr; | |
440 | 1711 size_t sz; |
428 | 1712 addr_status flag; |
1713 struct addr_chain *next; | |
1714 } ADDRESS_BLOCK, *ADDRESS_CHAIN; | |
1715 /* NB: empty and unavailable blocks are concatenated. */ | |
1716 | |
1717 static ADDRESS_CHAIN addr_chain = 0; | |
1718 /* Start off the address block chain with a humongous address block | |
1719 which is empty to start with. Note that addr_chain is invariant | |
1720 WRT the addition/deletion of address blocks because of the assert | |
1721 in Coalesce() and the strict ordering of blocks by their address | |
1722 */ | |
440 | 1723 static void |
1724 Addr_Block_initialize (void) | |
428 | 1725 { |
1726 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1727 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | |
1728 addr_chain->next = 0; /* Last block in chain */ | |
1729 addr_chain->sz = 0x0c000000; /* Size */ | |
442 | 1730 addr_chain->addr = (POINTER) (0x04000000); |
428 | 1731 addr_chain->flag = empty; |
1732 } | |
1733 | |
1734 /* Coalesce address blocks if they are contiguous. Only empty and | |
1735 unavailable slots are coalesced. */ | |
440 | 1736 static void |
1737 Coalesce_Addr_Blocks (void) | |
428 | 1738 { |
1739 ADDRESS_CHAIN p; | |
1740 for (p = addr_chain; p; p = p->next) | |
1741 { | |
1742 while (p->next && p->flag == p->next->flag) | |
1743 { | |
1744 ADDRESS_CHAIN np; | |
1745 np = p->next; | |
1746 | |
1747 if (p->flag == occupied) break; /* No cigar */ | |
1748 | |
1749 /* Check if the addresses are contiguous. */ | |
1750 if (p->addr + p->sz != np->addr) break; | |
1751 | |
1752 MEMMETER( MVAL( M_Addrlist_Size )--) | |
1753 /* We can coalesce these two. */ | |
1754 p->sz += np->sz; | |
1755 p->next = np->next; | |
1756 assert( np != addr_chain ); /* We're not freeing the head of the list. */ | |
1757 UNDERLYING_FREE( np ); | |
1758 } | |
1759 } /* for all p */ | |
1760 } | |
1761 | |
1762 /* Get an empty address block of specified size. */ | |
440 | 1763 static VM_ADDR |
1764 New_Addr_Block (size_t sz) | |
428 | 1765 { |
1766 ADDRESS_CHAIN p = addr_chain; | |
1767 VM_ADDR new_addr = VM_FAILURE_ADDR; | |
1768 for (; p; p = p->next) | |
1769 { | |
1770 if (p->flag == empty && p->sz > sz) | |
1771 { | |
1772 /* Create a new entry following p which is empty. */ | |
1773 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) ); | |
1774 remainder->next = p->next; | |
1775 remainder->flag = empty; | |
1776 remainder->addr = p->addr + sz; | |
1777 remainder->sz = p->sz - sz; | |
1778 | |
1779 MEMMETER( MVAL( M_Addrlist_Size )++) | |
1780 | |
1781 /* Now make p become an occupied block with the appropriate size */ | |
1782 p->next = remainder; | |
1783 p->sz = sz; | |
1784 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, | |
1785 MAP_FLAGS, DEV_ZERO_FD, 0 ); | |
1786 if (new_addr == VM_FAILURE_ADDR) | |
1787 { | |
1788 p->flag = unavailable; | |
1789 continue; | |
1790 } | |
1791 p->flag = occupied; | |
1792 break; | |
1793 } | |
1794 } | |
1795 Coalesce_Addr_Blocks(); | |
1796 return new_addr; | |
1797 } | |
1798 | |
1799 /* Free an address block. We mark the block as being empty, and attempt to | |
1800 do any coalescing that may have resulted from this. */ | |
440 | 1801 static void |
1802 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
428 | 1803 { |
1804 ADDRESS_CHAIN p = addr_chain; | |
1805 for (; p; p = p->next ) | |
1806 { | |
1807 if (p->addr == addr) | |
1808 { | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1809 assert (p->sz == sz); /* ACK! Shouldn't happen at all. */ |
428 | 1810 munmap( (VM_ADDR) p->addr, p->sz ); |
1811 p->flag = empty; | |
1812 break; | |
1813 } | |
1814 } | |
5050
6f2158fa75ed
Fix quick-build, use asserts() in place of ABORT()
Ben Wing <ben@xemacs.org>
parents:
3263
diff
changeset
|
1815 assert (p); /* Can't happen... we've got a block to free which is not in |
428 | 1816 the address list. */ |
1817 Coalesce_Addr_Blocks(); | |
1818 } | |
1819 #else /* !MMAP_GENERATE_ADDRESSES */ | |
1820 /* This is an alternate (simpler) implementation in cases where the | |
1821 address is picked by the kernel. */ | |
1822 | |
440 | 1823 static void |
1824 Addr_Block_initialize (void) | |
428 | 1825 { |
1826 /* Nothing. */ | |
1827 } | |
1828 | |
440 | 1829 static VM_ADDR |
1830 New_Addr_Block (size_t sz) | |
428 | 1831 { |
1832 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | |
1833 DEV_ZERO_FD, 0 ); | |
1834 } | |
1835 | |
440 | 1836 static void |
1837 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
428 | 1838 { |
1839 munmap ((caddr_t) addr, sz ); | |
1840 } | |
1841 | |
1842 #endif /* MMAP_GENERATE_ADDRESSES */ | |
1843 | |
1844 | |
1845 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | |
1846 | |
1847 /* | |
440 | 1848 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start |
428 | 1849 address aliased to the first parameter. |
1850 */ | |
1851 | |
440 | 1852 POINTER r_alloc (POINTER *ptr, size_t size); |
428 | 1853 POINTER |
440 | 1854 r_alloc (POINTER *ptr, size_t size) |
428 | 1855 { |
1856 MMAP_HANDLE mh; | |
1857 | |
1333 | 1858 REGEX_MALLOC_CHECK (); |
1859 | |
428 | 1860 switch(r_alloc_initialized) |
1861 { | |
1862 case 0: | |
2500 | 1863 ABORT(); |
428 | 1864 case 1: |
1865 *ptr = (POINTER) UNDERLYING_MALLOC(size); | |
1866 break; | |
1867 default: | |
1868 mh = new_mmap_handle( size ); | |
1869 if (mh) | |
1870 { | |
440 | 1871 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1872 size_t mmapped_size = ROUNDUP( size + hysteresis ); | |
428 | 1873 MEMMETER( MVAL(M_Map)++ ) |
1874 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) | |
1875 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) | |
1876 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) | |
1877 mh->vm_addr = New_Addr_Block( mmapped_size ); | |
1878 if (mh->vm_addr == VM_FAILURE_ADDR) { | |
1879 free_mmap_handle( mh ); /* Free the loser */ | |
1880 *ptr = 0; | |
1881 return 0; /* ralloc failed due to mmap() failure. */ | |
1882 } | |
1883 MHASH_ADD( mh->vm_addr, mh ); | |
1884 mh->space_for = mmapped_size; | |
1885 mh->aliased_address = ptr; | |
1886 *ptr = (POINTER) mh->vm_addr; | |
1887 } | |
1888 else | |
1889 *ptr = 0; /* Malloc of block failed */ | |
1890 break; | |
1891 } | |
1892 return *ptr; | |
1893 } | |
1894 | |
1895 /* Free a bloc of relocatable storage whose data is pointed to by PTR. | |
1896 Store 0 in *PTR to show there's no block allocated. */ | |
1897 | |
1898 void r_alloc_free (POINTER *ptr); | |
1899 void | |
1900 r_alloc_free (POINTER *ptr) | |
1901 { | |
1333 | 1902 REGEX_MALLOC_CHECK (); |
1903 | |
428 | 1904 switch( r_alloc_initialized) { |
1905 case 0: | |
2500 | 1906 ABORT(); |
428 | 1907 |
1908 case 1: | |
1909 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ | |
1910 break; | |
1911 | |
1912 default: | |
1913 { | |
1914 MMAP_HANDLE dead_handle = find_mmap_handle( ptr ); | |
1915 /* Check if we've got it. */ | |
1916 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */ | |
1917 { | |
1918 UNDERLYING_FREE( *ptr ); | |
1919 } | |
1920 else | |
1921 { | |
1922 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) ) | |
1923 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size )) | |
1924 MEMMETER(MVAL(M_Unmap)++) | |
1925 MHASH_DEL( dead_handle->vm_addr ); | |
1926 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for ); | |
1927 free_mmap_handle (dead_handle); | |
1928 } | |
1929 } | |
1930 break; | |
1931 } /* r_alloc_initialized */ | |
1932 *ptr = 0; /* Zap the pointer's contents. */ | |
1933 } | |
1934 | |
1935 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | |
1936 | |
1937 Change *PTR to reflect the new bloc, and return this value. | |
1938 | |
1939 If more memory cannot be allocated, then leave *PTR unchanged, and | |
1940 return zero. */ | |
1941 | |
440 | 1942 POINTER r_re_alloc (POINTER *ptr, size_t sz); |
428 | 1943 POINTER |
440 | 1944 r_re_alloc (POINTER *ptr, size_t sz) |
428 | 1945 { |
1333 | 1946 REGEX_MALLOC_CHECK (); |
1947 | |
428 | 1948 if (r_alloc_initialized == 0) |
1949 { | |
2500 | 1950 ABORT (); |
428 | 1951 return 0; /* suppress compiler warning */ |
1952 } | |
1953 else if (r_alloc_initialized == 1) | |
1954 { | |
1955 POINTER tmp = (POINTER) realloc(*ptr, sz); | |
1956 if (tmp) | |
1957 *ptr = tmp; | |
1958 return tmp; | |
1959 } | |
1960 else | |
1961 { | |
440 | 1962 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1963 size_t actual_sz = ROUNDUP( sz + hysteresis ); | |
428 | 1964 MMAP_HANDLE h = find_mmap_handle( ptr ); |
1965 VM_ADDR new_vm_addr; | |
1966 | |
1967 if ( h == 0 ) /* Was allocated using malloc. */ | |
1968 { | |
1969 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz); | |
1970 if (tmp) | |
1971 *ptr = tmp; | |
1972 return tmp; | |
1973 } | |
1974 | |
1975 MEMMETER( | |
1976 MVAL(M_Average_Bumpval) = | |
1977 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size)) | |
1978 / (double) (MVAL(M_Remap) + 1)) | |
1979 MEMMETER(MVAL(M_Remap)++) | |
1980 if (h->space_for > sz) /* We've got some more room */ | |
1981 { /* Also, if a shrinkage was asked for. */ | |
1982 MEMMETER( MVAL(M_Didnt_Copy)++ ) | |
1983 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) | |
1984 /* We're pretty dumb at handling shrinkage. We should check for | |
1985 a larger gap than the standard hysteresis allowable, and if so, | |
1986 shrink the number of pages. Right now, we simply reset the size | |
1987 component and return. */ | |
1988 h->size = sz; | |
1989 return *ptr; | |
1990 } | |
1991 | |
1992 new_vm_addr = New_Addr_Block( actual_sz ); | |
1993 if (new_vm_addr == VM_FAILURE_ADDR) | |
1994 {/* Failed to realloc. */ | |
1995 /* *ptr = 0; */ | |
1996 return 0; | |
1997 } | |
1998 | |
1999 MHASH_ADD( new_vm_addr, h ); | |
2000 /* We got a block OK: now we should move the old contents to the | |
2001 new address. We use the old size of this block. */ | |
2002 memmove(new_vm_addr, h->vm_addr, h->size); | |
2003 MHASH_DEL( h->vm_addr ); | |
2004 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */ | |
2005 | |
2006 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) ) | |
2007 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size)) | |
2008 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size)) | |
2009 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size)) | |
2010 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) ) | |
2011 | |
2012 /* Update block datastructure. */ | |
2013 h->space_for = actual_sz; /* New total space */ | |
2014 h->size = sz; /* New (requested) size */ | |
2015 h->vm_addr = new_vm_addr; /* New VM start address */ | |
2016 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ | |
2017 *ptr = (POINTER) h->vm_addr; | |
2018 return *ptr; | |
2019 } | |
2020 } | |
2021 | |
2022 | |
2023 /* Initialize various things for memory allocation. | |
2024 */ | |
2025 void | |
2026 init_ralloc (void) | |
2027 { | |
2028 int i = 0; | |
2029 if (r_alloc_initialized > 1) | |
2030 return; /* used to return 1 */ | |
2031 | |
909 | 2032 #ifdef PDUMP |
2033 /* Under pdump, we need to activate ralloc on the first go. */ | |
2034 ++r_alloc_initialized; | |
2035 #endif | |
428 | 2036 if (++r_alloc_initialized == 1) |
2037 return; /* used to return 1 */ | |
2038 | |
2039 Addr_Block_initialize(); /* Initialize the address picker, if required. */ | |
2040 page_size = PAGE; | |
2041 assert( page_size > 0 ); /* getpagesize() bogosity check. */ | |
2042 | |
2043 #ifndef MAP_ANONYMOUS | |
2044 DEV_ZERO_FD = open( "/dev/zero", O_RDWR ); | |
2045 if (DEV_ZERO_FD < 0) | |
2046 /* Failed. Perhaps we should abort here? */ | |
2047 return; /* used to return 0 */ | |
2048 #endif | |
2049 | |
2050 #ifdef MMAP_METERING | |
2051 for(i = 0; i < N_Meterables; i++ ) | |
2052 { | |
2053 meter[i] = 0; | |
2054 } | |
2055 #endif /* MMAP_METERING */ | |
2056 } | |
2057 | |
2058 void | |
2059 syms_of_ralloc (void) | |
2060 { | |
2061 #ifdef MMAP_METERING | |
563 | 2062 DEFSYMBOL (Qmmap_times_mapped); |
2063 DEFSYMBOL (Qmmap_pages_mapped); | |
2064 DEFSYMBOL (Qmmap_times_unmapped); | |
2065 DEFSYMBOL (Qmmap_times_remapped); | |
2066 DEFSYMBOL (Qmmap_didnt_copy); | |
2067 DEFSYMBOL (Qmmap_pages_copied); | |
2068 DEFSYMBOL (Qmmap_average_bumpval); | |
2069 DEFSYMBOL (Qmmap_wastage); | |
2070 DEFSYMBOL (Qmmap_live_pages); | |
2071 DEFSYMBOL (Qmmap_addr_looked_up); | |
2072 DEFSYMBOL (Qmmap_hash_worked); | |
2073 DEFSYMBOL (Qmmap_addrlist_size); | |
428 | 2074 DEFSUBR (Fmmap_allocator_status); |
2075 #endif /* MMAP_METERING */ | |
2076 } | |
2077 | |
2078 void | |
2079 vars_of_ralloc (void) | |
2080 { | |
2081 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* | |
2082 Extra room left at the end of an allocated arena, | |
2083 so that a re-alloc requesting extra space smaller than this | |
2084 does not actually cause a new arena to be allocated. | |
2085 | |
2086 A negative value is considered equal to zero. This is the | |
2087 minimum amount of space guaranteed to be left at the end of | |
2088 the arena. Because allocation happens in multiples of the OS | |
2089 page size, it is possible for more space to be left unused. | |
2090 */ ); | |
2091 mmap_hysteresis = 0; | |
2092 } | |
2093 | |
2094 #endif /* HAVE_MMAP */ |