Mercurial > hg > xemacs-beta
comparison src/ralloc.c @ 255:084402c475ba r20-5b26
Import from CVS: tag r20-5b26
author | cvs |
---|---|
date | Mon, 13 Aug 2007 10:21:18 +0200 |
parents | 3d6bfa290dbd |
children | c5d627a313b1 |
comparison
equal
deleted
inserted
replaced
254:e92abcaa252b | 255:084402c475ba |
---|---|
14 for more details. | 14 for more details. |
15 | 15 |
16 You should have received a copy of the GNU General Public License | 16 You should have received a copy of the GNU General Public License |
17 along with GNU Emacs; see the file COPYING. If not, write to | 17 along with GNU Emacs; see the file COPYING. If not, write to |
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
19 Boston, MA 02111-1307, USA. */ | 19 Boston, MA 02111-1307, USA. |
20 | |
21 Synched Up with: FSF 20.2 (non-mmap portion only) | |
22 */ | |
20 | 23 |
21 /* NOTES: | 24 /* NOTES: |
22 | 25 |
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | 26 Only relocate the blocs necessary for SIZE in r_alloc_sbrk, |
24 rather than all of them. This means allowing for a possible | 27 rather than all of them. This means allowing for a possible |
45 /* Unconditionally use unsigned char * for this. */ | 48 /* Unconditionally use unsigned char * for this. */ |
46 typedef unsigned char *POINTER; | 49 typedef unsigned char *POINTER; |
47 | 50 |
48 typedef unsigned long SIZE; | 51 typedef unsigned long SIZE; |
49 | 52 |
53 #ifdef DOUG_LEA_MALLOC | |
54 #define M_TOP_PAD -2 | |
55 extern int mallopt (); | |
56 #endif | |
57 | |
50 #include "getpagesize.h" | 58 #include "getpagesize.h" |
51 | 59 |
52 #include <string.h> | 60 #include <string.h> |
53 | 61 |
54 #else /* Not emacs. */ | 62 #else /* Not emacs. */ |
67 #define safe_bcopy(x, y, z) memmove (y, x, z) | 75 #define safe_bcopy(x, y, z) memmove (y, x, z) |
68 | 76 |
69 #define NIL ((POINTER) 0) | 77 #define NIL ((POINTER) 0) |
70 | 78 |
71 | 79 |
72 #ifndef HAVE_MMAP | 80 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) |
73 | 81 |
74 /* A flag to indicate whether we have initialized ralloc yet. For | 82 /* A flag to indicate whether we have initialized ralloc yet. For |
75 Emacs's sake, please do not make this local to malloc_init; on some | 83 Emacs's sake, please do not make this local to malloc_init; on some |
76 machines, the dumping procedure makes all static variables | 84 machines, the dumping procedure makes all static variables |
77 read-only. On these machines, the word static is #defined to be | 85 read-only. On these machines, the word static is #defined to be |
106 #define PAGE (getpagesize ()) | 114 #define PAGE (getpagesize ()) |
107 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | 115 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) |
108 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | 116 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ |
109 & ~(page_size - 1)) | 117 & ~(page_size - 1)) |
110 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | 118 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) |
119 | |
120 #define MEM_ALIGN sizeof(double) | |
121 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
122 & ~(MEM_ALIGN - 1)) | |
111 | 123 |
112 /* Functions to get and return memory from the system. */ | 124 /* Data structures of heaps and blocs. */ |
125 | |
126 /* The relocatable objects, or blocs, and the malloc data | |
127 both reside within one or more heaps. | |
128 Each heap contains malloc data, running from `start' to `bloc_start', | |
129 and relocatable objects, running from `bloc_start' to `free'. | |
130 | |
131 Relocatable objects may relocate within the same heap | |
132 or may move into another heap; the heaps themselves may grow | |
133 but they never move. | |
134 | |
135 We try to make just one heap and make it larger as necessary. | |
136 But sometimes we can't do that, because we can't get contiguous | |
137 space to add onto the heap. When that happens, we start a new heap. */ | |
138 | |
139 typedef struct heap | |
140 { | |
141 struct heap *next; | |
142 struct heap *prev; | |
143 /* Start of memory range of this heap. */ | |
144 POINTER start; | |
145 /* End of memory range of this heap. */ | |
146 POINTER end; | |
147 /* Start of relocatable data in this heap. */ | |
148 POINTER bloc_start; | |
149 /* Start of unused space in this heap. */ | |
150 POINTER free; | |
151 /* First bloc in this heap. */ | |
152 struct bp *first_bloc; | |
153 /* Last bloc in this heap. */ | |
154 struct bp *last_bloc; | |
155 } *heap_ptr; | |
156 | |
157 #define NIL_HEAP ((heap_ptr) 0) | |
158 #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
159 | |
160 /* This is the first heap object. | |
161 If we need additional heap objects, each one resides at the beginning of | |
162 the space it covers. */ | |
163 static struct heap heap_base; | |
164 | |
165 /* Head and tail of the list of heaps. */ | |
166 static heap_ptr first_heap, last_heap; | |
167 | |
168 /* These structures are allocated in the malloc arena. | |
169 The linked list is kept in order of increasing '.data' members. | |
170 The data blocks abut each other; if b->next is non-nil, then | |
171 b->data + b->size == b->next->data. | |
172 | |
173 An element with variable==NIL denotes a freed block, which has not yet | |
174 been collected. They may only appear while r_alloc_freeze > 0, and will be | |
175 freed when the arena is thawed. Currently, these blocs are not reusable, | |
176 while the arena is frozen. Very inefficient. */ | |
177 | |
178 typedef struct bp | |
179 { | |
180 struct bp *next; | |
181 struct bp *prev; | |
182 POINTER *variable; | |
183 POINTER data; | |
184 SIZE size; | |
185 POINTER new_data; /* temporarily used for relocation */ | |
186 struct heap *heap; /* Heap this bloc is in. */ | |
187 } *bloc_ptr; | |
188 | |
189 #define NIL_BLOC ((bloc_ptr) 0) | |
190 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
191 | |
192 /* Head and tail of the list of relocatable blocs. */ | |
193 static bloc_ptr first_bloc, last_bloc; | |
194 | |
195 static int use_relocatable_buffers; | |
196 | |
197 /* If >0, no relocation whatsoever takes place. */ | |
198 static int r_alloc_freeze_level; | |
113 | 199 |
114 /* Obtain SIZE bytes of space. If enough space is not presently available | 200 /* Obtain SIZE bytes of space. If enough space is not presently available |
115 in our process reserve, (i.e., (page_break_value - break_value)), | 201 in our process reserve, (i.e., (page_break_value - break_value)), |
116 this means getting more page-aligned space from the system. | 202 this means getting more page-aligned space from the system. |
117 | 203 |
118 Return non-zero if all went well, or zero if we couldn't allocate | 204 Return non-zero if all went well, or zero if we couldn't allocate |
119 the memory. */ | 205 the memory. */ |
120 static int | 206 |
121 obtain (SIZE size) | 207 /* Functions to get and return memory from the system. */ |
122 { | 208 |
123 SIZE already_available = page_break_value - break_value; | 209 /* Find the heap that ADDRESS falls within. */ |
124 | 210 |
125 if (already_available < size) | 211 static heap_ptr |
126 { | 212 find_heap (address) |
127 SIZE get = ROUNDUP (size - already_available); | 213 POINTER address; |
128 /* Get some extra, so we can come here less often. */ | 214 { |
129 get += extra_bytes; | 215 heap_ptr heap; |
130 | 216 |
131 if ((*real_morecore) (get) == 0) | 217 for (heap = last_heap; heap; heap = heap->prev) |
218 { | |
219 if (heap->start <= address && address <= heap->end) | |
220 return heap; | |
221 } | |
222 | |
223 return NIL_HEAP; | |
224 } | |
225 | |
226 /* Find SIZE bytes of space in a heap. | |
227 Try to get them at ADDRESS (which must fall within some heap's range) | |
228 if we can get that many within one heap. | |
229 | |
230 If enough space is not presently available in our reserve, this means | |
231 getting more page-aligned space from the system. If the returned space | |
232 is not contiguous to the last heap, allocate a new heap, and append it | |
233 | |
234 obtain does not try to keep track of whether space is in use | |
235 or not in use. It just returns the address of SIZE bytes that | |
236 fall within a single heap. If you call obtain twice in a row | |
237 with the same arguments, you typically get the same value. | |
238 to the heap list. It's the caller's responsibility to keep | |
239 track of what space is in use. | |
240 | |
241 Return the address of the space if all went well, or zero if we couldn't | |
242 allocate the memory. */ | |
243 | |
244 static POINTER | |
245 obtain (POINTER address, SIZE size) | |
246 { | |
247 heap_ptr heap; | |
248 SIZE already_available; | |
249 | |
250 /* Find the heap that ADDRESS falls within. */ | |
251 for (heap = last_heap; heap; heap = heap->prev) | |
252 { | |
253 if (heap->start <= address && address <= heap->end) | |
254 break; | |
255 } | |
256 | |
257 if (! heap) | |
258 abort (); | |
259 | |
260 /* If we can't fit SIZE bytes in that heap, | |
261 try successive later heaps. */ | |
262 while (heap && address + size > heap->end) | |
263 { | |
264 heap = heap->next; | |
265 if (heap == NIL_HEAP) | |
266 break; | |
267 address = heap->bloc_start; | |
268 } | |
269 | |
270 /* If we can't fit them within any existing heap, | |
271 get more space. */ | |
272 if (heap == NIL_HEAP) | |
273 { | |
274 POINTER new = (*real_morecore)(0); | |
275 SIZE get; | |
276 | |
277 already_available = (char *)last_heap->end - (char *)address; | |
278 | |
279 if (new != last_heap->end) | |
280 { | |
281 /* Someone else called sbrk. Make a new heap. */ | |
282 | |
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new); | |
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); | |
285 | |
286 if ((*real_morecore) (bloc_start - new) != new) | |
287 return 0; | |
288 | |
289 new_heap->start = new; | |
290 new_heap->end = bloc_start; | |
291 new_heap->bloc_start = bloc_start; | |
292 new_heap->free = bloc_start; | |
293 new_heap->next = NIL_HEAP; | |
294 new_heap->prev = last_heap; | |
295 new_heap->first_bloc = NIL_BLOC; | |
296 new_heap->last_bloc = NIL_BLOC; | |
297 last_heap->next = new_heap; | |
298 last_heap = new_heap; | |
299 | |
300 address = bloc_start; | |
301 already_available = 0; | |
302 } | |
303 | |
304 /* Add space to the last heap (which we may have just created). | |
305 Get some extra, so we can come here less often. */ | |
306 | |
307 get = size + extra_bytes - already_available; | |
308 get = (char *) ROUNDUP ((char *)last_heap->end + get) | |
309 - (char *) last_heap->end; | |
310 | |
311 if ((*real_morecore) (get) != last_heap->end) | |
132 return 0; | 312 return 0; |
133 | 313 |
134 page_break_value += get; | 314 last_heap->end += get; |
135 } | 315 } |
136 | 316 |
137 break_value += size; | 317 return address; |
138 | 318 } |
139 return 1; | 319 |
140 } | 320 #if 0 |
141 | |
142 /* Obtain SIZE bytes of space and return a pointer to the new area. | 321 /* Obtain SIZE bytes of space and return a pointer to the new area. |
143 If we could not allocate the space, return zero. */ | 322 If we could not allocate the space, return zero. */ |
144 | 323 |
145 static POINTER | 324 static POINTER |
146 get_more_space (SIZE size) | 325 get_more_space (SIZE size) |
149 if (obtain (size)) | 328 if (obtain (size)) |
150 return ptr; | 329 return ptr; |
151 else | 330 else |
152 return 0; | 331 return 0; |
153 } | 332 } |
333 #endif | |
154 | 334 |
155 /* Note that SIZE bytes of space have been relinquished by the process. | 335 /* Note that SIZE bytes of space have been relinquished by the process. |
156 If SIZE is more than a page, return the space to the system. */ | 336 If SIZE is more than a page, return the space to the system. */ |
157 | 337 |
158 static void | 338 static void |
159 relinquish (SIZE size) | 339 relinquish () |
160 { | 340 { |
161 POINTER new_page_break; | 341 register heap_ptr h; |
162 int excess; | 342 int excess = 0; |
163 | 343 |
164 break_value -= size; | 344 /* Add the amount of space beyond break_value |
165 new_page_break = (POINTER) ROUNDUP (break_value); | 345 in all heaps which have extend beyond break_value at all. */ |
166 excess = (char *) page_break_value - (char *) new_page_break; | 346 |
167 | 347 for (h = last_heap; h && break_value < h->end; h = h->prev) |
168 if (excess > extra_bytes * 2) | 348 { |
349 excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
350 ? h->bloc_start : break_value); | |
351 } | |
352 | |
353 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
169 { | 354 { |
170 /* Keep extra_bytes worth of empty space. | 355 /* Keep extra_bytes worth of empty space. |
171 And don't free anything unless we can free at least extra_bytes. */ | 356 And don't free anything unless we can free at least extra_bytes. */ |
172 if ((*real_morecore) (extra_bytes - excess) == 0) | 357 excess -= extra_bytes; |
358 | |
359 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) | |
360 { | |
361 /* This heap should have no blocs in it. */ | |
362 if (last_heap->first_bloc != NIL_BLOC | |
363 || last_heap->last_bloc != NIL_BLOC) | |
364 abort (); | |
365 | |
366 /* Return the last heap, with its header, to the system. */ | |
367 excess = (char *)last_heap->end - (char *)last_heap->start; | |
368 last_heap = last_heap->prev; | |
369 last_heap->next = NIL_HEAP; | |
370 } | |
371 else | |
372 { | |
373 excess = (char *) last_heap->end | |
374 - (char *) ROUNDUP ((char *)last_heap->end - excess); | |
375 last_heap->end -= excess; | |
376 } | |
377 | |
378 if ((*real_morecore) (- excess) == 0) | |
173 abort (); | 379 abort (); |
174 | 380 } |
175 page_break_value += extra_bytes - excess; | 381 } |
176 } | 382 |
177 | 383 /* Return the total size in use by relocating allocator, |
178 /* Zero the space from the end of the "official" break to the actual | 384 above where malloc gets space. */ |
179 break, so that bugs show up faster. */ | 385 |
180 memset (break_value, 0, ((char *) page_break_value - (char *) break_value)); | 386 long |
387 r_alloc_size_in_use () | |
388 { | |
389 return break_value - virtual_break_value; | |
181 } | 390 } |
182 | 391 |
183 /* The meat - allocating, freeing, and relocating blocs. */ | 392 /* The meat - allocating, freeing, and relocating blocs. */ |
184 | 393 |
185 /* These structures are allocated in the malloc arena. | |
186 The linked list is kept in order of increasing '.data' members. | |
187 The data blocks abut each other; if b->next is non-nil, then | |
188 b->data + b->size == b->next->data. */ | |
189 typedef struct bp | |
190 { | |
191 struct bp *next; | |
192 struct bp *prev; | |
193 POINTER *variable; | |
194 POINTER data; | |
195 SIZE size; | |
196 } *bloc_ptr; | |
197 | |
198 #define NIL_BLOC ((bloc_ptr) 0) | |
199 #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
200 | |
201 /* Head and tail of the list of relocatable blocs. */ | |
202 static bloc_ptr first_bloc, last_bloc; | |
203 | 394 |
204 /* Find the bloc referenced by the address in PTR. Returns a pointer | 395 /* Find the bloc referenced by the address in PTR. Returns a pointer |
205 to that block. */ | 396 to that block. */ |
206 | 397 |
207 static bloc_ptr | 398 static bloc_ptr |
208 find_bloc (POINTER *ptr) | 399 find_bloc (POINTER *ptr) |
209 { | 400 { |
210 bloc_ptr p = first_bloc; | 401 register bloc_ptr p = first_bloc; |
211 | 402 |
212 while (p != NIL_BLOC) | 403 while (p != NIL_BLOC) |
213 { | 404 { |
214 if (p->variable == ptr && p->data == *ptr) | 405 if (p->variable == ptr && p->data == *ptr) |
215 return p; | 406 return p; |
225 memory for the new block. */ | 416 memory for the new block. */ |
226 | 417 |
227 static bloc_ptr | 418 static bloc_ptr |
228 get_bloc (SIZE size) | 419 get_bloc (SIZE size) |
229 { | 420 { |
230 bloc_ptr new_bloc; | 421 register bloc_ptr new_bloc; |
422 register heap_ptr heap; | |
231 | 423 |
232 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | 424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) |
233 || ! (new_bloc->data = get_more_space (size))) | 425 || ! (new_bloc->data = obtain (break_value, size))) |
234 { | 426 { |
235 if (new_bloc) | 427 if (new_bloc) |
236 free (new_bloc); | 428 free (new_bloc); |
237 | 429 |
238 return 0; | 430 return 0; |
239 } | 431 } |
432 | |
433 break_value = new_bloc->data + size; | |
240 | 434 |
241 new_bloc->size = size; | 435 new_bloc->size = size; |
242 new_bloc->next = NIL_BLOC; | 436 new_bloc->next = NIL_BLOC; |
243 new_bloc->variable = (POINTER *) NIL; | 437 new_bloc->variable = (POINTER *) NIL; |
244 | 438 new_bloc->new_data = 0; |
439 | |
440 /* Record in the heap that this space is in use. */ | |
441 heap = find_heap (new_bloc->data); | |
442 heap->free = break_value; | |
443 | |
444 /* Maintain the correspondence between heaps and blocs. */ | |
445 new_bloc->heap = heap; | |
446 heap->last_bloc = new_bloc; | |
447 if (heap->first_bloc == NIL_BLOC) | |
448 heap->first_bloc = new_bloc; | |
449 | |
450 /* Put this bloc on the doubly-linked list of blocs. */ | |
245 if (first_bloc) | 451 if (first_bloc) |
246 { | 452 { |
247 new_bloc->prev = last_bloc; | 453 new_bloc->prev = last_bloc; |
248 last_bloc->next = new_bloc; | 454 last_bloc->next = new_bloc; |
249 last_bloc = new_bloc; | 455 last_bloc = new_bloc; |
255 } | 461 } |
256 | 462 |
257 return new_bloc; | 463 return new_bloc; |
258 } | 464 } |
259 | 465 |
260 /* Relocate all blocs from BLOC on upward in the list to the zone | 466 /* Calculate new locations of blocs in the list beginning with BLOC, |
261 indicated by ADDRESS. Direction of relocation is determined by | 467 relocating it to start at ADDRESS, in heap HEAP. If enough space is |
262 the position of ADDRESS relative to BLOC->data. | 468 not presently available in our reserve, call obtain for |
263 | 469 more space. |
264 If BLOC is NIL_BLOC, nothing is done. | 470 |
265 | 471 Store the new location of each bloc in its new_data field. |
266 Note that ordering of blocs is not affected by this function. */ | 472 Do not touch the contents of blocs or break_value. */ |
473 | |
474 static int | |
475 relocate_blocs (bloc, heap, address) | |
476 bloc_ptr bloc; | |
477 heap_ptr heap; | |
478 POINTER address; | |
479 { | |
480 register bloc_ptr b = bloc; | |
481 | |
482 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
483 if (r_alloc_freeze_level) | |
484 abort(); | |
485 | |
486 while (b) | |
487 { | |
488 /* If bloc B won't fit within HEAP, | |
489 move to the next heap and try again. */ | |
490 while (heap && address + b->size > heap->end) | |
491 { | |
492 heap = heap->next; | |
493 if (heap == NIL_HEAP) | |
494 break; | |
495 address = heap->bloc_start; | |
496 } | |
497 | |
498 /* If BLOC won't fit in any heap, | |
499 get enough new space to hold BLOC and all following blocs. */ | |
500 if (heap == NIL_HEAP) | |
501 { | |
502 register bloc_ptr tb = b; | |
503 register SIZE s = 0; | |
504 | |
505 /* Add up the size of all the following blocs. */ | |
506 while (tb != NIL_BLOC) | |
507 { | |
508 if (tb->variable) | |
509 s += tb->size; | |
510 | |
511 tb = tb->next; | |
512 } | |
513 | |
514 /* Get that space. */ | |
515 address = obtain (address, s); | |
516 if (address == 0) | |
517 return 0; | |
518 | |
519 heap = last_heap; | |
520 } | |
521 | |
522 /* Record the new address of this bloc | |
523 and update where the next bloc can start. */ | |
524 b->new_data = address; | |
525 if (b->variable) | |
526 address += b->size; | |
527 b = b->next; | |
528 } | |
529 | |
530 return 1; | |
531 } | |
532 | |
533 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. | |
534 This is necessary if we put the memory of space of BLOC | |
535 before that of BEFORE. */ | |
267 | 536 |
268 static void | 537 static void |
269 relocate_some_blocs (bloc_ptr bloc, POINTER address) | 538 reorder_bloc (bloc, before) |
270 { | 539 bloc_ptr bloc, before; |
271 if (bloc != NIL_BLOC) | 540 { |
272 { | 541 bloc_ptr prev, next; |
273 SIZE offset = address - bloc->data; | 542 |
274 SIZE data_size = 0; | 543 /* Splice BLOC out from where it is. */ |
275 bloc_ptr b; | 544 prev = bloc->prev; |
276 | 545 next = bloc->next; |
546 | |
547 if (prev) | |
548 prev->next = next; | |
549 if (next) | |
550 next->prev = prev; | |
551 | |
552 /* Splice it in before BEFORE. */ | |
553 prev = before->prev; | |
554 | |
555 if (prev) | |
556 prev->next = bloc; | |
557 bloc->prev = prev; | |
558 | |
559 before->prev = bloc; | |
560 bloc->next = before; | |
561 } | |
562 | |
563 /* Update the records of which heaps contain which blocs, starting | |
564 with heap HEAP and bloc BLOC. */ | |
565 | |
566 static void | |
567 update_heap_bloc_correspondence (bloc, heap) | |
568 bloc_ptr bloc; | |
569 heap_ptr heap; | |
570 { | |
571 register bloc_ptr b; | |
572 | |
573 /* Initialize HEAP's status to reflect blocs before BLOC. */ | |
574 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
575 { | |
576 /* The previous bloc is in HEAP. */ | |
577 heap->last_bloc = bloc->prev; | |
578 heap->free = bloc->prev->data + bloc->prev->size; | |
579 } | |
580 else | |
581 { | |
582 /* HEAP contains no blocs before BLOC. */ | |
583 heap->first_bloc = NIL_BLOC; | |
584 heap->last_bloc = NIL_BLOC; | |
585 heap->free = heap->bloc_start; | |
586 } | |
587 | |
588 /* Advance through blocs one by one. */ | |
589 for (b = bloc; b != NIL_BLOC; b = b->next) | |
590 { | |
591 /* Advance through heaps, marking them empty, | |
592 till we get to the one that B is in. */ | |
593 while (heap) | |
594 { | |
595 if (heap->bloc_start <= b->data && b->data <= heap->end) | |
596 break; | |
597 heap = heap->next; | |
598 /* We know HEAP is not null now, | |
599 because there has to be space for bloc B. */ | |
600 heap->first_bloc = NIL_BLOC; | |
601 heap->last_bloc = NIL_BLOC; | |
602 heap->free = heap->bloc_start; | |
603 } | |
604 | |
605 /* Update HEAP's status for bloc B. */ | |
606 heap->free = b->data + b->size; | |
607 heap->last_bloc = b; | |
608 if (heap->first_bloc == NIL_BLOC) | |
609 heap->first_bloc = b; | |
610 | |
611 /* Record that B is in HEAP. */ | |
612 b->heap = heap; | |
613 } | |
614 | |
615 /* If there are any remaining heaps and no blocs left, | |
616 mark those heaps as empty. */ | |
617 heap = heap->next; | |
618 while (heap) | |
619 { | |
620 heap->first_bloc = NIL_BLOC; | |
621 heap->last_bloc = NIL_BLOC; | |
622 heap->free = heap->bloc_start; | |
623 heap = heap->next; | |
624 } | |
625 } | |
626 | |
627 /* Resize BLOC to SIZE bytes. This relocates the blocs | |
628 that come after BLOC in memory. */ | |
629 | |
630 static int | |
631 resize_bloc (bloc, size) | |
632 bloc_ptr bloc; | |
633 SIZE size; | |
634 { | |
635 register bloc_ptr b; | |
636 heap_ptr heap; | |
637 POINTER address; | |
638 SIZE old_size; | |
639 | |
640 /* No need to ever call this if arena is frozen, bug somewhere! */ | |
641 if (r_alloc_freeze_level) | |
642 abort(); | |
643 | |
644 if (bloc == NIL_BLOC || size == bloc->size) | |
645 return 1; | |
646 | |
647 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
648 { | |
649 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
650 break; | |
651 } | |
652 | |
653 if (heap == NIL_HEAP) | |
654 abort (); | |
655 | |
656 old_size = bloc->size; | |
657 bloc->size = size; | |
658 | |
659 /* Note that bloc could be moved into the previous heap. */ | |
660 address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
661 : first_heap->bloc_start); | |
662 while (heap) | |
663 { | |
664 if (heap->bloc_start <= address && address <= heap->end) | |
665 break; | |
666 heap = heap->prev; | |
667 } | |
668 | |
669 if (! relocate_blocs (bloc, heap, address)) | |
670 { | |
671 bloc->size = old_size; | |
672 return 0; | |
673 } | |
674 | |
675 if (size > old_size) | |
676 { | |
677 for (b = last_bloc; b != bloc; b = b->prev) | |
678 { | |
679 if (!b->variable) | |
680 { | |
681 b->size = 0; | |
682 b->data = b->new_data; | |
683 } | |
684 else | |
685 { | |
686 safe_bcopy (b->data, b->new_data, b->size); | |
687 *b->variable = b->data = b->new_data; | |
688 } | |
689 } | |
690 if (!bloc->variable) | |
691 { | |
692 bloc->size = 0; | |
693 bloc->data = bloc->new_data; | |
694 } | |
695 else | |
696 { | |
697 safe_bcopy (bloc->data, bloc->new_data, old_size); | |
698 memset (bloc->new_data + old_size, 0, size - old_size); | |
699 *bloc->variable = bloc->data = bloc->new_data; | |
700 } | |
701 } | |
702 else | |
703 { | |
277 for (b = bloc; b != NIL_BLOC; b = b->next) | 704 for (b = bloc; b != NIL_BLOC; b = b->next) |
278 { | 705 { |
279 data_size += b->size; | 706 if (!b->variable) |
280 b->data += offset; | 707 { |
281 *b->variable = b->data; | 708 b->size = 0; |
282 } | 709 b->data = b->new_data; |
283 | 710 } |
284 memmove (address, address - offset, data_size); | 711 else |
285 } | 712 { |
286 } | 713 safe_bcopy (b->data, b->new_data, b->size); |
287 | 714 *b->variable = b->data = b->new_data; |
715 } | |
716 } | |
717 } | |
718 | |
719 update_heap_bloc_correspondence (bloc, heap); | |
720 | |
721 break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
722 : first_heap->bloc_start); | |
723 return 1; | |
724 } | |
725 | |
288 /* Free BLOC from the chain of blocs, relocating any blocs above it | 726 /* Free BLOC from the chain of blocs, relocating any blocs above it |
289 and returning BLOC->size bytes to the free area. */ | 727 and returning BLOC->size bytes to the free area. */ |
290 | 728 |
291 static void | 729 static void |
292 free_bloc (bloc_ptr bloc) | 730 free_bloc (bloc_ptr bloc) |
293 { | 731 { |
732 heap_ptr heap = bloc->heap; | |
733 | |
734 if (r_alloc_freeze_level) | |
735 { | |
736 bloc->variable = (POINTER *) NIL; | |
737 return; | |
738 } | |
739 | |
740 resize_bloc (bloc, 0); | |
741 | |
294 if (bloc == first_bloc && bloc == last_bloc) | 742 if (bloc == first_bloc && bloc == last_bloc) |
295 { | 743 { |
296 first_bloc = last_bloc = NIL_BLOC; | 744 first_bloc = last_bloc = NIL_BLOC; |
297 } | 745 } |
298 else if (bloc == last_bloc) | 746 else if (bloc == last_bloc) |
309 { | 757 { |
310 bloc->next->prev = bloc->prev; | 758 bloc->next->prev = bloc->prev; |
311 bloc->prev->next = bloc->next; | 759 bloc->prev->next = bloc->next; |
312 } | 760 } |
313 | 761 |
314 relocate_some_blocs (bloc->next, bloc->data); | 762 /* Update the records of which blocs are in HEAP. */ |
315 relinquish (bloc->size); | 763 if (heap->first_bloc == bloc) |
764 { | |
765 if (bloc->next != 0 && bloc->next->heap == heap) | |
766 heap->first_bloc = bloc->next; | |
767 else | |
768 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
769 } | |
770 if (heap->last_bloc == bloc) | |
771 { | |
772 if (bloc->prev != 0 && bloc->prev->heap == heap) | |
773 heap->last_bloc = bloc->prev; | |
774 else | |
775 heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
776 } | |
777 | |
778 relinquish (); | |
316 free (bloc); | 779 free (bloc); |
317 } | 780 } |
318 | 781 |
319 /* Interface routines. */ | 782 /* Interface routines. */ |
320 | |
321 static int use_relocatable_buffers; | |
322 | 783 |
323 /* Obtain SIZE bytes of storage from the free pool, or the system, as | 784 /* Obtain SIZE bytes of storage from the free pool, or the system, as |
324 necessary. If relocatable blocs are in use, this means relocating | 785 necessary. If relocatable blocs are in use, this means relocating |
325 them. This function gets plugged into the GNU malloc's __morecore | 786 them. This function gets plugged into the GNU malloc's __morecore |
326 hook. | 787 hook. |
332 GNU malloc package. */ | 793 GNU malloc package. */ |
333 | 794 |
334 POINTER | 795 POINTER |
335 r_alloc_sbrk (long size) | 796 r_alloc_sbrk (long size) |
336 { | 797 { |
337 /* This is the first address not currently available for the heap. */ | 798 register bloc_ptr b; |
338 POINTER top; | 799 POINTER address; |
339 /* Amount of empty space below that. */ | 800 |
340 /* It is not correct to use SIZE here, because that is usually unsigned. | 801 if (! r_alloc_initialized) |
341 ptrdiff_t would be okay, but is not always available. | 802 init_ralloc (); |
342 `long' will work in all cases, in practice. */ | |
343 long already_available; | |
344 POINTER ptr; | |
345 | 803 |
346 if (! use_relocatable_buffers) | 804 if (! use_relocatable_buffers) |
347 return (*real_morecore) (size); | 805 return (*real_morecore) (size); |
348 | 806 |
349 top = first_bloc ? first_bloc->data : page_break_value; | 807 if (size == 0) |
350 already_available = (char *) top - (char *) virtual_break_value; | 808 return virtual_break_value; |
351 | 809 |
352 /* Do we not have enough gap already? */ | 810 if (size > 0) |
353 if (size > 0 && already_available < size) | 811 { |
354 { | 812 /* Allocate a page-aligned space. GNU malloc would reclaim an |
355 /* Get what we need, plus some extra so we can come here less often. */ | 813 extra space if we passed an unaligned one. But we could |
356 SIZE get = size - already_available + extra_bytes; | 814 not always find a space which is contiguous to the previous. */ |
357 | 815 POINTER new_bloc_start; |
358 if (! obtain (get)) | 816 heap_ptr h = first_heap; |
359 return 0; | 817 SIZE get = ROUNDUP (size); |
360 | 818 |
361 if (first_bloc) | 819 address = (POINTER) ROUNDUP (virtual_break_value); |
362 relocate_some_blocs (first_bloc, first_bloc->data + get); | 820 |
363 | 821 /* Search the list upward for a heap which is large enough. */ |
364 /* Zero out the space we just allocated, to help catch bugs | 822 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) |
365 quickly. */ | 823 { |
366 memset (virtual_break_value, 0, get); | 824 h = h->next; |
367 } | 825 if (h == NIL_HEAP) |
368 /* Can we keep extra_bytes of gap while freeing at least extra_bytes? */ | 826 break; |
369 else if (size < 0 && already_available - size > 2 * extra_bytes) | 827 address = (POINTER) ROUNDUP (h->start); |
370 { | 828 } |
371 /* Ok, do so. This is how many to free. */ | 829 |
372 SIZE give_back = already_available - size - extra_bytes; | 830 /* If not found, obtain more space. */ |
373 | 831 if (h == NIL_HEAP) |
374 if (first_bloc) | 832 { |
375 relocate_some_blocs (first_bloc, first_bloc->data - give_back); | 833 get += extra_bytes + page_size; |
376 relinquish (give_back); | 834 |
377 } | 835 if (! obtain (address, get)) |
378 | 836 return 0; |
379 ptr = virtual_break_value; | 837 |
380 virtual_break_value += size; | 838 if (first_heap == last_heap) |
381 | 839 address = (POINTER) ROUNDUP (virtual_break_value); |
382 return ptr; | 840 else |
841 address = (POINTER) ROUNDUP (last_heap->start); | |
842 h = last_heap; | |
843 } | |
844 | |
845 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); | |
846 | |
847 if (first_heap->bloc_start < new_bloc_start) | |
848 { | |
849 /* This is no clean solution - no idea how to do it better. */ | |
850 if (r_alloc_freeze_level) | |
851 return NIL; | |
852 | |
853 /* There is a bug here: if the above obtain call succeeded, but the | |
854 relocate_blocs call below does not succeed, we need to free | |
855 the memory that we got with obtain. */ | |
856 | |
857 /* Move all blocs upward. */ | |
858 if (! relocate_blocs (first_bloc, h, new_bloc_start)) | |
859 return 0; | |
860 | |
861 /* Note that (POINTER)(h+1) <= new_bloc_start since | |
862 get >= page_size, so the following does not destroy the heap | |
863 header. */ | |
864 for (b = last_bloc; b != NIL_BLOC; b = b->prev) | |
865 { | |
866 safe_bcopy (b->data, b->new_data, b->size); | |
867 *b->variable = b->data = b->new_data; | |
868 } | |
869 | |
870 h->bloc_start = new_bloc_start; | |
871 | |
872 update_heap_bloc_correspondence (first_bloc, h); | |
873 } | |
874 if (h != first_heap) | |
875 { | |
876 /* Give up managing heaps below the one the new | |
877 virtual_break_value points to. */ | |
878 first_heap->prev = NIL_HEAP; | |
879 first_heap->next = h->next; | |
880 first_heap->start = h->start; | |
881 first_heap->end = h->end; | |
882 first_heap->free = h->free; | |
883 first_heap->first_bloc = h->first_bloc; | |
884 first_heap->last_bloc = h->last_bloc; | |
885 first_heap->bloc_start = h->bloc_start; | |
886 | |
887 if (first_heap->next) | |
888 first_heap->next->prev = first_heap; | |
889 else | |
890 last_heap = first_heap; | |
891 } | |
892 | |
893 memset (address, 0, size); | |
894 } | |
895 else /* size < 0 */ | |
896 { | |
897 SIZE excess = (char *)first_heap->bloc_start | |
898 - ((char *)virtual_break_value + size); | |
899 | |
900 address = virtual_break_value; | |
901 | |
902 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
903 { | |
904 excess -= extra_bytes; | |
905 first_heap->bloc_start | |
906 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); | |
907 | |
908 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); | |
909 | |
910 for (b = first_bloc; b != NIL_BLOC; b = b->next) | |
911 { | |
912 safe_bcopy (b->data, b->new_data, b->size); | |
913 *b->variable = b->data = b->new_data; | |
914 } | |
915 } | |
916 | |
917 if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
918 { | |
919 /* We found an additional space below the first heap */ | |
920 first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
921 } | |
922 } | |
923 | |
924 virtual_break_value = (POINTER) ((char *)address + size); | |
925 break_value = (last_bloc | |
926 ? last_bloc->data + last_bloc->size | |
927 : first_heap->bloc_start); | |
928 if (size < 0) | |
929 relinquish (); | |
930 | |
931 return address; | |
383 } | 932 } |
384 | 933 |
385 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | 934 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to |
386 the data is returned in *PTR. PTR is thus the address of some variable | 935 the data is returned in *PTR. PTR is thus the address of some variable |
387 which will use the data area. | 936 which will use the data area. |
388 | 937 |
938 The allocation of 0 bytes is valid. | |
939 In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
940 before allocating a new area. Not yet done. | |
941 | |
389 If we can't allocate the necessary memory, set *PTR to zero, and | 942 If we can't allocate the necessary memory, set *PTR to zero, and |
390 return zero. */ | 943 return zero. */ |
391 | 944 |
392 POINTER | 945 POINTER |
393 r_alloc (POINTER *ptr, SIZE size) | 946 r_alloc (POINTER *ptr, SIZE size) |
413 Store 0 in *PTR to show there's no block allocated. */ | 966 Store 0 in *PTR to show there's no block allocated. */ |
414 | 967 |
415 void | 968 void |
416 r_alloc_free (POINTER *ptr) | 969 r_alloc_free (POINTER *ptr) |
417 { | 970 { |
418 bloc_ptr dead_bloc; | 971 register bloc_ptr dead_bloc; |
972 | |
973 if (! r_alloc_initialized) | |
974 init_ralloc (); | |
419 | 975 |
420 dead_bloc = find_bloc (ptr); | 976 dead_bloc = find_bloc (ptr); |
421 if (dead_bloc == NIL_BLOC) | 977 if (dead_bloc == NIL_BLOC) |
422 abort (); | 978 abort (); |
423 | 979 |
424 free_bloc (dead_bloc); | 980 free_bloc (dead_bloc); |
425 *ptr = 0; | 981 *ptr = 0; |
982 | |
983 #ifdef emacs | |
984 refill_memory_reserve (); | |
985 #endif | |
426 } | 986 } |
427 | 987 |
428 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. | 988 /* Given a pointer at address PTR to relocatable data, resize it to SIZE. |
429 Do this by shifting all blocks above this one up in memory, unless | 989 Do this by shifting all blocks above this one up in memory, unless |
430 SIZE is less than or equal to the current bloc size, in which case | 990 SIZE is less than or equal to the current bloc size, in which case |
431 do nothing. | 991 do nothing. |
432 | 992 |
993 In case r_alloc_freeze is set, a new bloc is allocated, and the | |
994 memory copied to it. Not very efficient. We could traverse the | |
995 bloc_list for a best fit of free blocs first. | |
996 | |
433 Change *PTR to reflect the new bloc, and return this value. | 997 Change *PTR to reflect the new bloc, and return this value. |
434 | 998 |
435 If more memory cannot be allocated, then leave *PTR unchanged, and | 999 If more memory cannot be allocated, then leave *PTR unchanged, and |
436 return zero. */ | 1000 return zero. */ |
437 | 1001 |
438 POINTER | 1002 POINTER |
439 r_re_alloc (POINTER *ptr, SIZE size) | 1003 r_re_alloc (POINTER *ptr, SIZE size) |
440 { | 1004 { |
441 bloc_ptr bloc; | 1005 register bloc_ptr bloc; |
1006 | |
1007 if (! r_alloc_initialized) | |
1008 init_ralloc (); | |
1009 | |
1010 if (!*ptr) | |
1011 return r_alloc (ptr, size); | |
1012 if (!size) | |
1013 { | |
1014 r_alloc_free (ptr); | |
1015 return r_alloc (ptr, 0); | |
1016 } | |
442 | 1017 |
443 bloc = find_bloc (ptr); | 1018 bloc = find_bloc (ptr); |
444 if (bloc == NIL_BLOC) | 1019 if (bloc == NIL_BLOC) |
445 abort (); | 1020 abort (); |
446 | 1021 |
447 if (size <= bloc->size) | 1022 if (size < bloc->size) |
448 /* Wouldn't it be useful to actually resize the bloc here? */ | 1023 { |
449 return *ptr; | 1024 /* Wouldn't it be useful to actually resize the bloc here? */ |
450 | 1025 /* I think so too, but not if it's too expensive... */ |
451 if (! obtain (size - bloc->size)) | 1026 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) |
452 return 0; | 1027 && r_alloc_freeze_level == 0) |
453 | 1028 { |
454 relocate_some_blocs (bloc->next, bloc->data + size); | 1029 resize_bloc (bloc, MEM_ROUNDUP (size)); |
455 | 1030 /* Never mind if this fails, just do nothing... */ |
456 /* Zero out the new space in the bloc, to help catch bugs faster. */ | 1031 /* It *should* be infallible! */ |
457 memset (bloc->data + bloc->size, 0, size - bloc->size); | 1032 } |
458 | 1033 } |
459 /* Indicate that this block has a new size. */ | 1034 else if (size > bloc->size) |
460 bloc->size = size; | 1035 { |
461 | 1036 if (r_alloc_freeze_level) |
1037 { | |
1038 bloc_ptr new_bloc; | |
1039 new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1040 if (new_bloc) | |
1041 { | |
1042 new_bloc->variable = ptr; | |
1043 *ptr = new_bloc->data; | |
1044 bloc->variable = (POINTER *) NIL; | |
1045 } | |
1046 else | |
1047 return NIL; | |
1048 } | |
1049 else | |
1050 { | |
1051 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1052 return NIL; | |
1053 } | |
1054 } | |
462 return *ptr; | 1055 return *ptr; |
463 } | 1056 } |
1057 | |
1058 /* Disable relocations, after making room for at least SIZE bytes | |
1059 of non-relocatable heap if possible. The relocatable blocs are | |
1060 guaranteed to hold still until thawed, even if this means that | |
1061 malloc must return a null pointer. */ | |
1062 | |
1063 void | |
1064 r_alloc_freeze (size) | |
1065 long size; | |
1066 { | |
1067 if (! r_alloc_initialized) | |
1068 init_ralloc (); | |
1069 | |
1070 /* If already frozen, we can't make any more room, so don't try. */ | |
1071 if (r_alloc_freeze_level > 0) | |
1072 size = 0; | |
1073 /* If we can't get the amount requested, half is better than nothing. */ | |
1074 while (size > 0 && r_alloc_sbrk (size) == 0) | |
1075 size /= 2; | |
1076 ++r_alloc_freeze_level; | |
1077 if (size > 0) | |
1078 r_alloc_sbrk (-size); | |
1079 } | |
1080 | |
1081 void | |
1082 r_alloc_thaw () | |
1083 { | |
1084 | |
1085 if (! r_alloc_initialized) | |
1086 init_ralloc (); | |
1087 | |
1088 if (--r_alloc_freeze_level < 0) | |
1089 abort (); | |
1090 | |
1091 /* This frees all unused blocs. It is not too inefficient, as the resize | |
1092 and bcopy is done only once. Afterwards, all unreferenced blocs are | |
1093 already shrunk to zero size. */ | |
1094 if (!r_alloc_freeze_level) | |
1095 { | |
1096 bloc_ptr *b = &first_bloc; | |
1097 while (*b) | |
1098 if (!(*b)->variable) | |
1099 free_bloc (*b); | |
1100 else | |
1101 b = &(*b)->next; | |
1102 } | |
1103 } | |
1104 | |
464 | 1105 |
465 /* The hook `malloc' uses for the function which gets more space | 1106 /* The hook `malloc' uses for the function which gets more space |
466 from the system. */ | 1107 from the system. */ |
467 extern POINTER (*__morecore) (); | 1108 extern POINTER (*__morecore) (); |
468 | 1109 |
476 | 1117 |
477 r_alloc_initialized = 1; | 1118 r_alloc_initialized = 1; |
478 real_morecore = __morecore; | 1119 real_morecore = __morecore; |
479 __morecore = r_alloc_sbrk; | 1120 __morecore = r_alloc_sbrk; |
480 | 1121 |
481 virtual_break_value = break_value = (*real_morecore) (0); | 1122 first_heap = last_heap = &heap_base; |
1123 first_heap->next = first_heap->prev = NIL_HEAP; | |
1124 first_heap->start = first_heap->bloc_start | |
1125 = virtual_break_value = break_value = (*real_morecore) (0); | |
482 if (break_value == NIL) | 1126 if (break_value == NIL) |
483 abort (); | 1127 abort (); |
484 | 1128 |
485 page_size = PAGE; | 1129 page_size = PAGE; |
486 extra_bytes = ROUNDUP (50000); | 1130 extra_bytes = ROUNDUP (50000); |
487 | 1131 |
488 page_break_value = (POINTER) ROUNDUP (break_value); | 1132 #ifdef DOUG_LEA_MALLOC |
489 | 1133 mallopt (M_TOP_PAD, 64 * 4096); |
490 /* From eirik@elf.IThaca.ny.US (Eirik Fuller): | 1134 #else |
491 The extra call to real_morecore guarantees that the end of the | 1135 #if 0 /* Hasn't been synched yet */ |
1136 /* Give GNU malloc's morecore some hysteresis | |
1137 so that we move all the relocatable blocks much less often. */ | |
1138 __malloc_extra_blocks = 64; | |
1139 #endif | |
1140 #endif | |
1141 | |
1142 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
1143 | |
1144 /* The extra call to real_morecore guarantees that the end of the | |
492 address space is a multiple of page_size, even if page_size is | 1145 address space is a multiple of page_size, even if page_size is |
493 not really the page size of the system running the binary in | 1146 not really the page size of the system running the binary in |
494 which page_size is stored. This allows a binary to be built on a | 1147 which page_size is stored. This allows a binary to be built on a |
495 system with one page size and run on a system with a smaller page | 1148 system with one page size and run on a system with a smaller page |
496 size. (Such as compiling on a Sun 4/260 4.1.3 and running on a | 1149 size. */ |
497 Sun 4/65 4.1.3: 8k pages at compile time, 4k pages at run time.) | 1150 (*real_morecore) (first_heap->end - first_heap->start); |
498 */ | |
499 (*real_morecore) (page_break_value - break_value); | |
500 | 1151 |
501 /* Clear the rest of the last page; this memory is in our address space | 1152 /* Clear the rest of the last page; this memory is in our address space |
502 even though it is after the sbrk value. */ | 1153 even though it is after the sbrk value. */ |
503 | |
504 /* Doubly true, with the additional call that explicitly adds the | 1154 /* Doubly true, with the additional call that explicitly adds the |
505 rest of that page to the address space. */ | 1155 rest of that page to the address space. */ |
506 memset (break_value, 0, (page_break_value - break_value)); | 1156 memset (first_heap->start, 0, first_heap->end - first_heap->start); |
507 /* Also from eirik@elf.IThaca.ny.US */ | 1157 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; |
508 virtual_break_value = break_value = page_break_value; | |
509 use_relocatable_buffers = 1; | 1158 use_relocatable_buffers = 1; |
510 } | 1159 } |
1160 | |
1161 #if defined (emacs) && defined (DOUG_LEA_MALLOC) | |
1162 | |
1163 /* Reinitialize the morecore hook variables after restarting a dumped | |
1164 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | |
1165 void | |
1166 r_alloc_reinit () | |
1167 { | |
1168 /* Only do this if the hook has been reset, so that we don't get an | |
1169 infinite loop, in case Emacs was linked statically. */ | |
1170 if (__morecore != r_alloc_sbrk) | |
1171 { | |
1172 real_morecore = __morecore; | |
1173 __morecore = r_alloc_sbrk; | |
1174 } | |
1175 } | |
1176 #if 0 | |
1177 #ifdef DEBUG | |
1178 | |
1179 void | |
1180 r_alloc_check () | |
1181 { | |
1182 int found = 0; | |
1183 heap_ptr h, ph = 0; | |
1184 bloc_ptr b, pb = 0; | |
1185 | |
1186 if (!r_alloc_initialized) | |
1187 return; | |
1188 | |
1189 assert (first_heap); | |
1190 assert (last_heap->end <= (POINTER) sbrk (0)); | |
1191 assert ((POINTER) first_heap < first_heap->start); | |
1192 assert (first_heap->start <= virtual_break_value); | |
1193 assert (virtual_break_value <= first_heap->end); | |
1194 | |
1195 for (h = first_heap; h; h = h->next) | |
1196 { | |
1197 assert (h->prev == ph); | |
1198 assert ((POINTER) ROUNDUP (h->end) == h->end); | |
1199 #if 0 /* ??? The code in ralloc.c does not really try to ensure | |
1200 the heap start has any sort of alignment. | |
1201 Perhaps it should. */ | |
1202 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); | |
1203 #endif | |
1204 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); | |
1205 assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1206 | |
1207 if (ph) | |
1208 { | |
1209 assert (ph->end < h->start); | |
1210 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1211 } | |
1212 | |
1213 if (h->bloc_start <= break_value && break_value <= h->end) | |
1214 found = 1; | |
1215 | |
1216 ph = h; | |
1217 } | |
1218 | |
1219 assert (found); | |
1220 assert (last_heap == ph); | |
1221 | |
1222 for (b = first_bloc; b; b = b->next) | |
1223 { | |
1224 assert (b->prev == pb); | |
1225 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
1226 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); | |
1227 | |
1228 ph = 0; | |
1229 for (h = first_heap; h; h = h->next) | |
1230 { | |
1231 if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1232 break; | |
1233 ph = h; | |
1234 } | |
1235 | |
1236 assert (h); | |
1237 | |
1238 if (pb && pb->data + pb->size != b->data) | |
1239 { | |
1240 assert (ph && b->data == h->bloc_start); | |
1241 while (ph) | |
1242 { | |
1243 if (ph->bloc_start <= pb->data | |
1244 && pb->data + pb->size <= ph->end) | |
1245 { | |
1246 assert (pb->data + pb->size + b->size > ph->end); | |
1247 break; | |
1248 } | |
1249 else | |
1250 { | |
1251 assert (ph->bloc_start + b->size > ph->end); | |
1252 } | |
1253 ph = ph->prev; | |
1254 } | |
1255 } | |
1256 pb = b; | |
1257 } | |
1258 | |
1259 assert (last_bloc == pb); | |
1260 | |
1261 if (last_bloc) | |
1262 assert (last_bloc->data + last_bloc->size == break_value); | |
1263 else | |
1264 assert (first_heap->bloc_start == break_value); | |
1265 } | |
1266 #endif /* DEBUG */ | |
1267 #endif /* 0 */ | |
1268 | |
1269 #endif | |
1270 | |
511 #else /* HAVE_MMAP */ | 1271 #else /* HAVE_MMAP */ |
512 | 1272 |
513 /* | 1273 /* |
514 A relocating allocator built using the mmap(2) facility available | 1274 A relocating allocator built using the mmap(2) facility available |
515 in some OSes. Based on another version written by Paul Flinders, | 1275 in some OSes. Based on another version written by Paul Flinders, |