428
+ − 1 /* Block-relocating memory allocator.
+ − 2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ − 3
+ − 4 This file is part of XEmacs.
+ − 5
+ − 6 XEmacs is free software; you can redistribute it and/or modify it
+ − 7 under the terms of the GNU General Public License as published by the
+ − 8 Free Software Foundation; either version 2, or (at your option) any
+ − 9 later version.
+ − 10
+ − 11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
+ − 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ − 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ − 14 for more details.
+ − 15
+ − 16 You should have received a copy of the GNU General Public License
613
+ − 17 along with XEmacs; see the file COPYING. If not, write to
428
+ − 18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ − 19 Boston, MA 02111-1307, USA.
+ − 20
+ − 21 Synched Up with: FSF 20.2 (non-mmap portion only)
+ − 22 */
+ − 23
+ − 24 /* NOTES:
+ − 25
+ − 26 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
+ − 27 rather than all of them. This means allowing for a possible
+ − 28 hole between the first bloc and the end of malloc storage. */
+ − 29
+ − 30 #ifdef HAVE_CONFIG_H
+ − 31 #include <config.h>
+ − 32 #endif
+ − 33
+ − 34 #ifdef HAVE_UNISTD_H
+ − 35 #include <unistd.h> /* for getpagesize() */
+ − 36 #endif
+ − 37
+ − 38 #ifdef emacs
+ − 39
+ − 40 #include "lisp.h"
+ − 41
+ − 42 /* The important properties of this type are that 1) it's a pointer, and
+ − 43 2) arithmetic on it should work as if the size of the object pointed
+ − 44 to has a size of 1. */
+ − 45 #if 0 /* Arithmetic on void* is a GCC extension. */
+ − 46 #ifdef __STDC__
+ − 47 typedef void *POINTER;
+ − 48 #else
+ − 49 typedef unsigned char *POINTER;
+ − 50 #endif
+ − 51 #endif /* 0 */
+ − 52
+ − 53 /* Unconditionally use unsigned char * for this. */
+ − 54 typedef unsigned char *POINTER;
+ − 55
+ − 56 #ifdef DOUG_LEA_MALLOC
+ − 57 #define M_TOP_PAD -2
+ − 58 #include <malloc.h>
+ − 59 #endif
+ − 60
+ − 61 #include "getpagesize.h"
+ − 62
+ − 63 #include <string.h>
+ − 64 void refill_memory_reserve (void);
+ − 65
+ − 66 #else /* Not emacs. */
+ − 67
+ − 68 #include <stddef.h>
+ − 69
+ − 70 typedef void *POINTER;
+ − 71
+ − 72 #include <unistd.h>
+ − 73 #include <malloc.h>
+ − 74 #include <string.h>
+ − 75
+ − 76 #endif /* emacs. */
+ − 77
+ − 78 void init_ralloc (void);
+ − 79
+ − 80 #define NIL ((POINTER) 0)
+ − 81
+ − 82
+ − 83 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
+ − 84
+ − 85 /* A flag to indicate whether we have initialized ralloc yet. For
+ − 86 Emacs's sake, please do not make this local to malloc_init; on some
+ − 87 machines, the dumping procedure makes all static variables
+ − 88 read-only. On these machines, the word static is #defined to be
+ − 89 the empty string, meaning that r_alloc_initialized becomes an
+ − 90 automatic variable, and loses its value each time Emacs is started up. */
+ − 91 static int r_alloc_initialized = 0;
+ − 92
+ − 93
+ − 94 /* Declarations for working with the malloc, ralloc, and system breaks. */
+ − 95
+ − 96 /* Function to set the real break value. */
+ − 97 static POINTER (*real_morecore) (ptrdiff_t size);
+ − 98
+ − 99 /* The break value, as seen by malloc (). */
+ − 100 static POINTER virtual_break_value;
+ − 101
+ − 102 /* The break value, viewed by the relocatable blocs. */
+ − 103 static POINTER break_value;
+ − 104
+ − 105 /* This is the size of a page. We round memory requests to this boundary. */
+ − 106 static int page_size;
+ − 107
+ − 108 /* Whenever we get memory from the system, get this many extra bytes. This
+ − 109 must be a multiple of page_size. */
+ − 110 static int extra_bytes;
+ − 111
+ − 112 /* Macros for rounding. Note that rounding to any value is possible
+ − 113 by changing the definition of PAGE. */
+ − 114 #define PAGE (getpagesize ())
+ − 115 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
+ − 116 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
+ − 117 & ~(page_size - 1))
+ − 118 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
+ − 119
+ − 120 #define MEM_ALIGN sizeof(double)
+ − 121 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
+ − 122 & ~(MEM_ALIGN - 1))
+ − 123
+ − 124 /* Data structures of heaps and blocs. */
+ − 125
+ − 126 /* The relocatable objects, or blocs, and the malloc data
+ − 127 both reside within one or more heaps.
+ − 128 Each heap contains malloc data, running from `start' to `bloc_start',
+ − 129 and relocatable objects, running from `bloc_start' to `free'.
+ − 130
+ − 131 Relocatable objects may relocate within the same heap
+ − 132 or may move into another heap; the heaps themselves may grow
+ − 133 but they never move.
+ − 134
+ − 135 We try to make just one heap and make it larger as necessary.
+ − 136 But sometimes we can't do that, because we can't get contiguous
+ − 137 space to add onto the heap. When that happens, we start a new heap. */
+ − 138
+ − 139 typedef struct heap
+ − 140 {
+ − 141 struct heap *next;
+ − 142 struct heap *prev;
+ − 143 /* Start of memory range of this heap. */
+ − 144 POINTER start;
+ − 145 /* End of memory range of this heap. */
+ − 146 POINTER end;
+ − 147 /* Start of relocatable data in this heap. */
+ − 148 POINTER bloc_start;
+ − 149 /* Start of unused space in this heap. */
+ − 150 POINTER free;
+ − 151 /* First bloc in this heap. */
+ − 152 struct bp *first_bloc;
+ − 153 /* Last bloc in this heap. */
+ − 154 struct bp *last_bloc;
+ − 155 } *heap_ptr;
+ − 156
+ − 157 #define NIL_HEAP ((heap_ptr) 0)
+ − 158 #define HEAP_PTR_SIZE (sizeof (struct heap))
+ − 159
+ − 160 /* This is the first heap object.
+ − 161 If we need additional heap objects, each one resides at the beginning of
+ − 162 the space it covers. */
+ − 163 static struct heap heap_base;
+ − 164
+ − 165 /* Head and tail of the list of heaps. */
+ − 166 static heap_ptr first_heap, last_heap;
+ − 167
+ − 168 /* These structures are allocated in the malloc arena.
+ − 169 The linked list is kept in order of increasing '.data' members.
+ − 170 The data blocks abut each other; if b->next is non-nil, then
+ − 171 b->data + b->size == b->next->data.
+ − 172
+ − 173 An element with variable==NIL denotes a freed block, which has not yet
+ − 174 been collected. They may only appear while r_alloc_freeze > 0, and will be
+ − 175 freed when the arena is thawed. Currently, these blocs are not reusable,
+ − 176 while the arena is frozen. Very inefficient. */
+ − 177
+ − 178 typedef struct bp
+ − 179 {
+ − 180 struct bp *next;
+ − 181 struct bp *prev;
+ − 182 POINTER *variable;
+ − 183 POINTER data;
440
+ − 184 size_t size;
428
+ − 185 POINTER new_data; /* temporarily used for relocation */
+ − 186 struct heap *heap; /* Heap this bloc is in. */
+ − 187 } *bloc_ptr;
+ − 188
+ − 189 #define NIL_BLOC ((bloc_ptr) 0)
+ − 190 #define BLOC_PTR_SIZE (sizeof (struct bp))
+ − 191
+ − 192 /* Head and tail of the list of relocatable blocs. */
+ − 193 static bloc_ptr first_bloc, last_bloc;
+ − 194
+ − 195 static int use_relocatable_buffers;
+ − 196
+ − 197 /* If >0, no relocation whatsoever takes place. */
+ − 198 static int r_alloc_freeze_level;
+ − 199
+ − 200 /* Obtain SIZE bytes of space. If enough space is not presently available
+ − 201 in our process reserve, (i.e., (page_break_value - break_value)),
+ − 202 this means getting more page-aligned space from the system.
+ − 203
+ − 204 Return non-zero if all went well, or zero if we couldn't allocate
+ − 205 the memory. */
+ − 206
+ − 207 /* Functions to get and return memory from the system. */
+ − 208
+ − 209 /* Find the heap that ADDRESS falls within. */
+ − 210
+ − 211 static heap_ptr
+ − 212 find_heap (POINTER address)
+ − 213 {
+ − 214 heap_ptr heap;
+ − 215
+ − 216 for (heap = last_heap; heap; heap = heap->prev)
+ − 217 {
+ − 218 if (heap->start <= address && address <= heap->end)
+ − 219 return heap;
+ − 220 }
+ − 221
+ − 222 return NIL_HEAP;
+ − 223 }
+ − 224
+ − 225 /* Find SIZE bytes of space in a heap.
+ − 226 Try to get them at ADDRESS (which must fall within some heap's range)
+ − 227 if we can get that many within one heap.
+ − 228
+ − 229 If enough space is not presently available in our reserve, this means
+ − 230 getting more page-aligned space from the system. If the returned space
+ − 231 is not contiguous to the last heap, allocate a new heap, and append it
+ − 232
+ − 233 obtain does not try to keep track of whether space is in use
+ − 234 or not in use. It just returns the address of SIZE bytes that
+ − 235 fall within a single heap. If you call obtain twice in a row
+ − 236 with the same arguments, you typically get the same value.
+ − 237 to the heap list. It's the caller's responsibility to keep
+ − 238 track of what space is in use.
+ − 239
+ − 240 Return the address of the space if all went well, or zero if we couldn't
+ − 241 allocate the memory. */
+ − 242
+ − 243 static POINTER
440
+ − 244 obtain (POINTER address, size_t size)
428
+ − 245 {
+ − 246 heap_ptr heap;
440
+ − 247 size_t already_available;
428
+ − 248
+ − 249 /* Find the heap that ADDRESS falls within. */
+ − 250 for (heap = last_heap; heap; heap = heap->prev)
+ − 251 {
+ − 252 if (heap->start <= address && address <= heap->end)
+ − 253 break;
+ − 254 }
+ − 255
+ − 256 if (! heap)
+ − 257 abort ();
+ − 258
+ − 259 /* If we can't fit SIZE bytes in that heap,
+ − 260 try successive later heaps. */
+ − 261 while (heap && address + size > heap->end)
+ − 262 {
+ − 263 heap = heap->next;
+ − 264 if (heap == NIL_HEAP)
+ − 265 break;
+ − 266 address = heap->bloc_start;
+ − 267 }
+ − 268
+ − 269 /* If we can't fit them within any existing heap,
+ − 270 get more space. */
+ − 271 if (heap == NIL_HEAP)
+ − 272 {
+ − 273 POINTER new = (*real_morecore)(0);
440
+ − 274 size_t get;
428
+ − 275
+ − 276 already_available = (char *)last_heap->end - (char *)address;
+ − 277
+ − 278 if (new != last_heap->end)
+ − 279 {
+ − 280 /* Someone else called sbrk. Make a new heap. */
+ − 281
+ − 282 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
+ − 283 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
+ − 284
+ − 285 if ((*real_morecore) (bloc_start - new) != new)
+ − 286 return 0;
+ − 287
+ − 288 new_heap->start = new;
+ − 289 new_heap->end = bloc_start;
+ − 290 new_heap->bloc_start = bloc_start;
+ − 291 new_heap->free = bloc_start;
+ − 292 new_heap->next = NIL_HEAP;
+ − 293 new_heap->prev = last_heap;
+ − 294 new_heap->first_bloc = NIL_BLOC;
+ − 295 new_heap->last_bloc = NIL_BLOC;
+ − 296 last_heap->next = new_heap;
+ − 297 last_heap = new_heap;
+ − 298
+ − 299 address = bloc_start;
+ − 300 already_available = 0;
+ − 301 }
+ − 302
+ − 303 /* Add space to the last heap (which we may have just created).
+ − 304 Get some extra, so we can come here less often. */
+ − 305
+ − 306 get = size + extra_bytes - already_available;
+ − 307 get = (char *) ROUNDUP ((char *)last_heap->end + get)
+ − 308 - (char *) last_heap->end;
+ − 309
+ − 310 if ((*real_morecore) (get) != last_heap->end)
+ − 311 return 0;
+ − 312
+ − 313 last_heap->end += get;
+ − 314 }
+ − 315
+ − 316 return address;
+ − 317 }
+ − 318
+ − 319 #if 0
+ − 320 /* Obtain SIZE bytes of space and return a pointer to the new area.
+ − 321 If we could not allocate the space, return zero. */
+ − 322
+ − 323 static POINTER
440
+ − 324 get_more_space (size_t size)
428
+ − 325 {
+ − 326 POINTER ptr = break_value;
+ − 327 if (obtain (size))
+ − 328 return ptr;
+ − 329 else
+ − 330 return 0;
+ − 331 }
+ − 332 #endif
+ − 333
+ − 334 /* Note that SIZE bytes of space have been relinquished by the process.
+ − 335 If SIZE is more than a page, return the space to the system. */
+ − 336
+ − 337 static void
+ − 338 relinquish (void)
+ − 339 {
+ − 340 register heap_ptr h;
+ − 341 int excess = 0;
+ − 342
+ − 343 /* Add the amount of space beyond break_value
+ − 344 in all heaps which have extend beyond break_value at all. */
+ − 345
+ − 346 for (h = last_heap; h && break_value < h->end; h = h->prev)
+ − 347 {
+ − 348 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
+ − 349 ? h->bloc_start : break_value);
+ − 350 }
+ − 351
+ − 352 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
+ − 353 {
+ − 354 /* Keep extra_bytes worth of empty space.
+ − 355 And don't free anything unless we can free at least extra_bytes. */
+ − 356 excess -= extra_bytes;
+ − 357
+ − 358 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
+ − 359 {
+ − 360 /* This heap should have no blocs in it. */
+ − 361 if (last_heap->first_bloc != NIL_BLOC
+ − 362 || last_heap->last_bloc != NIL_BLOC)
+ − 363 abort ();
+ − 364
+ − 365 /* Return the last heap, with its header, to the system. */
+ − 366 excess = (char *)last_heap->end - (char *)last_heap->start;
+ − 367 last_heap = last_heap->prev;
+ − 368 last_heap->next = NIL_HEAP;
+ − 369 }
+ − 370 else
+ − 371 {
+ − 372 excess = (char *) last_heap->end
+ − 373 - (char *) ROUNDUP ((char *)last_heap->end - excess);
+ − 374 last_heap->end -= excess;
+ − 375 }
+ − 376
+ − 377 if ((*real_morecore) (- excess) == 0)
+ − 378 abort ();
+ − 379 }
+ − 380 }
+ − 381
+ − 382 /* Return the total size in use by relocating allocator,
+ − 383 above where malloc gets space. */
+ − 384
+ − 385 long r_alloc_size_in_use (void);
+ − 386 long
440
+ − 387 r_alloc_size_in_use (void)
428
+ − 388 {
+ − 389 return break_value - virtual_break_value;
+ − 390 }
+ − 391
+ − 392 /* The meat - allocating, freeing, and relocating blocs. */
+ − 393
+ − 394
+ − 395 /* Find the bloc referenced by the address in PTR. Returns a pointer
+ − 396 to that block. */
+ − 397
+ − 398 static bloc_ptr
+ − 399 find_bloc (POINTER *ptr)
+ − 400 {
+ − 401 register bloc_ptr p = first_bloc;
+ − 402
+ − 403 while (p != NIL_BLOC)
+ − 404 {
+ − 405 if (p->variable == ptr && p->data == *ptr)
+ − 406 return p;
+ − 407
+ − 408 p = p->next;
+ − 409 }
+ − 410
+ − 411 return p;
+ − 412 }
+ − 413
+ − 414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
+ − 415 Returns a pointer to the new bloc, or zero if we couldn't allocate
+ − 416 memory for the new block. */
+ − 417
+ − 418 static bloc_ptr
440
+ − 419 get_bloc (size_t size)
428
+ − 420 {
+ − 421 register bloc_ptr new_bloc;
+ − 422 register heap_ptr heap;
+ − 423
+ − 424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
+ − 425 || ! (new_bloc->data = obtain (break_value, size)))
+ − 426 {
+ − 427 if (new_bloc)
+ − 428 free (new_bloc);
+ − 429
+ − 430 return 0;
+ − 431 }
+ − 432
+ − 433 break_value = new_bloc->data + size;
+ − 434
+ − 435 new_bloc->size = size;
+ − 436 new_bloc->next = NIL_BLOC;
+ − 437 new_bloc->variable = (POINTER *) NIL;
+ − 438 new_bloc->new_data = 0;
+ − 439
+ − 440 /* Record in the heap that this space is in use. */
+ − 441 heap = find_heap (new_bloc->data);
+ − 442 heap->free = break_value;
+ − 443
+ − 444 /* Maintain the correspondence between heaps and blocs. */
+ − 445 new_bloc->heap = heap;
+ − 446 heap->last_bloc = new_bloc;
+ − 447 if (heap->first_bloc == NIL_BLOC)
+ − 448 heap->first_bloc = new_bloc;
+ − 449
+ − 450 /* Put this bloc on the doubly-linked list of blocs. */
+ − 451 if (first_bloc)
+ − 452 {
+ − 453 new_bloc->prev = last_bloc;
+ − 454 last_bloc->next = new_bloc;
+ − 455 last_bloc = new_bloc;
+ − 456 }
+ − 457 else
+ − 458 {
+ − 459 first_bloc = last_bloc = new_bloc;
+ − 460 new_bloc->prev = NIL_BLOC;
+ − 461 }
+ − 462
+ − 463 return new_bloc;
+ − 464 }
+ − 465
+ − 466 /* Calculate new locations of blocs in the list beginning with BLOC,
+ − 467 relocating it to start at ADDRESS, in heap HEAP. If enough space is
+ − 468 not presently available in our reserve, call obtain for
+ − 469 more space.
+ − 470
+ − 471 Store the new location of each bloc in its new_data field.
+ − 472 Do not touch the contents of blocs or break_value. */
+ − 473
+ − 474 static int
+ − 475 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
+ − 476 {
+ − 477 register bloc_ptr b = bloc;
+ − 478
+ − 479 /* No need to ever call this if arena is frozen, bug somewhere! */
+ − 480 if (r_alloc_freeze_level)
+ − 481 abort();
+ − 482
+ − 483 while (b)
+ − 484 {
+ − 485 /* If bloc B won't fit within HEAP,
+ − 486 move to the next heap and try again. */
+ − 487 while (heap && address + b->size > heap->end)
+ − 488 {
+ − 489 heap = heap->next;
+ − 490 if (heap == NIL_HEAP)
+ − 491 break;
+ − 492 address = heap->bloc_start;
+ − 493 }
+ − 494
+ − 495 /* If BLOC won't fit in any heap,
+ − 496 get enough new space to hold BLOC and all following blocs. */
+ − 497 if (heap == NIL_HEAP)
+ − 498 {
+ − 499 register bloc_ptr tb = b;
440
+ − 500 register size_t s = 0;
428
+ − 501
+ − 502 /* Add up the size of all the following blocs. */
+ − 503 while (tb != NIL_BLOC)
+ − 504 {
+ − 505 if (tb->variable)
+ − 506 s += tb->size;
+ − 507
+ − 508 tb = tb->next;
+ − 509 }
+ − 510
+ − 511 /* Get that space. */
+ − 512 address = obtain (address, s);
+ − 513 if (address == 0)
+ − 514 return 0;
+ − 515
+ − 516 heap = last_heap;
+ − 517 }
+ − 518
+ − 519 /* Record the new address of this bloc
+ − 520 and update where the next bloc can start. */
+ − 521 b->new_data = address;
+ − 522 if (b->variable)
+ − 523 address += b->size;
+ − 524 b = b->next;
+ − 525 }
+ − 526
+ − 527 return 1;
+ − 528 }
+ − 529
+ − 530 #if 0 /* unused */
+ − 531 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
+ − 532 This is necessary if we put the memory of space of BLOC
+ − 533 before that of BEFORE. */
+ − 534
+ − 535 static void
+ − 536 reorder_bloc (bloc_ptr bloc, bloc_ptr before)
+ − 537 {
+ − 538 bloc_ptr prev, next;
+ − 539
+ − 540 /* Splice BLOC out from where it is. */
+ − 541 prev = bloc->prev;
+ − 542 next = bloc->next;
+ − 543
+ − 544 if (prev)
+ − 545 prev->next = next;
+ − 546 if (next)
+ − 547 next->prev = prev;
+ − 548
+ − 549 /* Splice it in before BEFORE. */
+ − 550 prev = before->prev;
+ − 551
+ − 552 if (prev)
+ − 553 prev->next = bloc;
+ − 554 bloc->prev = prev;
+ − 555
+ − 556 before->prev = bloc;
+ − 557 bloc->next = before;
+ − 558 }
+ − 559 #endif /* unused */
+ − 560
+ − 561 /* Update the records of which heaps contain which blocs, starting
+ − 562 with heap HEAP and bloc BLOC. */
+ − 563
+ − 564 static void
+ − 565 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
+ − 566 {
+ − 567 register bloc_ptr b;
+ − 568
+ − 569 /* Initialize HEAP's status to reflect blocs before BLOC. */
+ − 570 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
+ − 571 {
+ − 572 /* The previous bloc is in HEAP. */
+ − 573 heap->last_bloc = bloc->prev;
+ − 574 heap->free = bloc->prev->data + bloc->prev->size;
+ − 575 }
+ − 576 else
+ − 577 {
+ − 578 /* HEAP contains no blocs before BLOC. */
+ − 579 heap->first_bloc = NIL_BLOC;
+ − 580 heap->last_bloc = NIL_BLOC;
+ − 581 heap->free = heap->bloc_start;
+ − 582 }
+ − 583
+ − 584 /* Advance through blocs one by one. */
+ − 585 for (b = bloc; b != NIL_BLOC; b = b->next)
+ − 586 {
+ − 587 /* Advance through heaps, marking them empty,
+ − 588 till we get to the one that B is in. */
+ − 589 while (heap)
+ − 590 {
+ − 591 if (heap->bloc_start <= b->data && b->data <= heap->end)
+ − 592 break;
+ − 593 heap = heap->next;
+ − 594 /* We know HEAP is not null now,
+ − 595 because there has to be space for bloc B. */
+ − 596 heap->first_bloc = NIL_BLOC;
+ − 597 heap->last_bloc = NIL_BLOC;
+ − 598 heap->free = heap->bloc_start;
+ − 599 }
+ − 600
+ − 601 /* Update HEAP's status for bloc B. */
+ − 602 heap->free = b->data + b->size;
+ − 603 heap->last_bloc = b;
+ − 604 if (heap->first_bloc == NIL_BLOC)
+ − 605 heap->first_bloc = b;
+ − 606
+ − 607 /* Record that B is in HEAP. */
+ − 608 b->heap = heap;
+ − 609 }
+ − 610
+ − 611 /* If there are any remaining heaps and no blocs left,
+ − 612 mark those heaps as empty. */
+ − 613 heap = heap->next;
+ − 614 while (heap)
+ − 615 {
+ − 616 heap->first_bloc = NIL_BLOC;
+ − 617 heap->last_bloc = NIL_BLOC;
+ − 618 heap->free = heap->bloc_start;
+ − 619 heap = heap->next;
+ − 620 }
+ − 621 }
+ − 622
+ − 623 /* Resize BLOC to SIZE bytes. This relocates the blocs
+ − 624 that come after BLOC in memory. */
+ − 625
+ − 626 static int
440
+ − 627 resize_bloc (bloc_ptr bloc, size_t size)
428
+ − 628 {
+ − 629 register bloc_ptr b;
+ − 630 heap_ptr heap;
+ − 631 POINTER address;
440
+ − 632 size_t old_size;
428
+ − 633
+ − 634 /* No need to ever call this if arena is frozen, bug somewhere! */
+ − 635 if (r_alloc_freeze_level)
+ − 636 abort();
+ − 637
+ − 638 if (bloc == NIL_BLOC || size == bloc->size)
+ − 639 return 1;
+ − 640
+ − 641 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
+ − 642 {
+ − 643 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
+ − 644 break;
+ − 645 }
+ − 646
+ − 647 if (heap == NIL_HEAP)
+ − 648 abort ();
+ − 649
+ − 650 old_size = bloc->size;
+ − 651 bloc->size = size;
+ − 652
+ − 653 /* Note that bloc could be moved into the previous heap. */
+ − 654 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
+ − 655 : first_heap->bloc_start);
+ − 656 while (heap)
+ − 657 {
+ − 658 if (heap->bloc_start <= address && address <= heap->end)
+ − 659 break;
+ − 660 heap = heap->prev;
+ − 661 }
+ − 662
+ − 663 if (! relocate_blocs (bloc, heap, address))
+ − 664 {
+ − 665 bloc->size = old_size;
+ − 666 return 0;
+ − 667 }
+ − 668
+ − 669 if (size > old_size)
+ − 670 {
+ − 671 for (b = last_bloc; b != bloc; b = b->prev)
+ − 672 {
+ − 673 if (!b->variable)
+ − 674 {
+ − 675 b->size = 0;
+ − 676 b->data = b->new_data;
+ − 677 }
+ − 678 else
+ − 679 {
440
+ − 680 memmove (b->new_data, b->data, b->size);
428
+ − 681 *b->variable = b->data = b->new_data;
+ − 682 }
+ − 683 }
+ − 684 if (!bloc->variable)
+ − 685 {
+ − 686 bloc->size = 0;
+ − 687 bloc->data = bloc->new_data;
+ − 688 }
+ − 689 else
+ − 690 {
440
+ − 691 memmove (bloc->new_data, bloc->data, old_size);
428
+ − 692 memset (bloc->new_data + old_size, 0, size - old_size);
+ − 693 *bloc->variable = bloc->data = bloc->new_data;
+ − 694 }
+ − 695 }
+ − 696 else
+ − 697 {
+ − 698 for (b = bloc; b != NIL_BLOC; b = b->next)
+ − 699 {
+ − 700 if (!b->variable)
+ − 701 {
+ − 702 b->size = 0;
+ − 703 b->data = b->new_data;
+ − 704 }
+ − 705 else
+ − 706 {
440
+ − 707 memmove (b->new_data, b->data, b->size);
428
+ − 708 *b->variable = b->data = b->new_data;
+ − 709 }
+ − 710 }
+ − 711 }
+ − 712
+ − 713 update_heap_bloc_correspondence (bloc, heap);
+ − 714
+ − 715 break_value = (last_bloc ? last_bloc->data + last_bloc->size
+ − 716 : first_heap->bloc_start);
+ − 717 return 1;
+ − 718 }
+ − 719
+ − 720 /* Free BLOC from the chain of blocs, relocating any blocs above it
+ − 721 and returning BLOC->size bytes to the free area. */
+ − 722
+ − 723 static void
+ − 724 free_bloc (bloc_ptr bloc)
+ − 725 {
+ − 726 heap_ptr heap = bloc->heap;
+ − 727
+ − 728 if (r_alloc_freeze_level)
+ − 729 {
+ − 730 bloc->variable = (POINTER *) NIL;
+ − 731 return;
+ − 732 }
+ − 733
+ − 734 resize_bloc (bloc, 0);
+ − 735
+ − 736 if (bloc == first_bloc && bloc == last_bloc)
+ − 737 {
+ − 738 first_bloc = last_bloc = NIL_BLOC;
+ − 739 }
+ − 740 else if (bloc == last_bloc)
+ − 741 {
+ − 742 last_bloc = bloc->prev;
+ − 743 last_bloc->next = NIL_BLOC;
+ − 744 }
+ − 745 else if (bloc == first_bloc)
+ − 746 {
+ − 747 first_bloc = bloc->next;
+ − 748 first_bloc->prev = NIL_BLOC;
+ − 749 }
+ − 750 else
+ − 751 {
+ − 752 bloc->next->prev = bloc->prev;
+ − 753 bloc->prev->next = bloc->next;
+ − 754 }
+ − 755
+ − 756 /* Update the records of which blocs are in HEAP. */
+ − 757 if (heap->first_bloc == bloc)
+ − 758 {
+ − 759 if (bloc->next != 0 && bloc->next->heap == heap)
+ − 760 heap->first_bloc = bloc->next;
+ − 761 else
+ − 762 heap->first_bloc = heap->last_bloc = NIL_BLOC;
+ − 763 }
+ − 764 if (heap->last_bloc == bloc)
+ − 765 {
+ − 766 if (bloc->prev != 0 && bloc->prev->heap == heap)
+ − 767 heap->last_bloc = bloc->prev;
+ − 768 else
+ − 769 heap->first_bloc = heap->last_bloc = NIL_BLOC;
+ − 770 }
+ − 771
+ − 772 relinquish ();
+ − 773 free (bloc);
+ − 774 }
+ − 775
+ − 776 /* Interface routines. */
+ − 777
+ − 778 /* Obtain SIZE bytes of storage from the free pool, or the system, as
+ − 779 necessary. If relocatable blocs are in use, this means relocating
+ − 780 them. This function gets plugged into the GNU malloc's __morecore
+ − 781 hook.
+ − 782
+ − 783 We provide hysteresis, never relocating by less than extra_bytes.
+ − 784
+ − 785 If we're out of memory, we should return zero, to imitate the other
+ − 786 __morecore hook values - in particular, __default_morecore in the
+ − 787 GNU malloc package. */
+ − 788
+ − 789 POINTER r_alloc_sbrk (ptrdiff_t size);
+ − 790 POINTER
+ − 791 r_alloc_sbrk (ptrdiff_t size)
+ − 792 {
+ − 793 register bloc_ptr b;
+ − 794 POINTER address;
+ − 795
+ − 796 if (! r_alloc_initialized)
+ − 797 init_ralloc ();
+ − 798
+ − 799 if (! use_relocatable_buffers)
+ − 800 return (*real_morecore) (size);
+ − 801
+ − 802 if (size == 0)
+ − 803 return virtual_break_value;
+ − 804
+ − 805 if (size > 0)
+ − 806 {
+ − 807 /* Allocate a page-aligned space. GNU malloc would reclaim an
+ − 808 extra space if we passed an unaligned one. But we could
+ − 809 not always find a space which is contiguous to the previous. */
+ − 810 POINTER new_bloc_start;
+ − 811 heap_ptr h = first_heap;
440
+ − 812 size_t get = ROUNDUP (size);
428
+ − 813
+ − 814 address = (POINTER) ROUNDUP (virtual_break_value);
+ − 815
+ − 816 /* Search the list upward for a heap which is large enough. */
+ − 817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
+ − 818 {
+ − 819 h = h->next;
+ − 820 if (h == NIL_HEAP)
+ − 821 break;
+ − 822 address = (POINTER) ROUNDUP (h->start);
+ − 823 }
+ − 824
+ − 825 /* If not found, obtain more space. */
+ − 826 if (h == NIL_HEAP)
+ − 827 {
+ − 828 get += extra_bytes + page_size;
+ − 829
+ − 830 if (! obtain (address, get))
+ − 831 return 0;
+ − 832
+ − 833 if (first_heap == last_heap)
+ − 834 address = (POINTER) ROUNDUP (virtual_break_value);
+ − 835 else
+ − 836 address = (POINTER) ROUNDUP (last_heap->start);
+ − 837 h = last_heap;
+ − 838 }
+ − 839
+ − 840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
+ − 841
+ − 842 if (first_heap->bloc_start < new_bloc_start)
+ − 843 {
+ − 844 /* This is no clean solution - no idea how to do it better. */
+ − 845 if (r_alloc_freeze_level)
+ − 846 return NIL;
+ − 847
+ − 848 /* There is a bug here: if the above obtain call succeeded, but the
+ − 849 relocate_blocs call below does not succeed, we need to free
+ − 850 the memory that we got with obtain. */
+ − 851
+ − 852 /* Move all blocs upward. */
+ − 853 if (! relocate_blocs (first_bloc, h, new_bloc_start))
+ − 854 return 0;
+ − 855
+ − 856 /* Note that (POINTER)(h+1) <= new_bloc_start since
+ − 857 get >= page_size, so the following does not destroy the heap
+ − 858 header. */
+ − 859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
+ − 860 {
440
+ − 861 memmove (b->new_data, b->data, b->size);
428
+ − 862 *b->variable = b->data = b->new_data;
+ − 863 }
+ − 864
+ − 865 h->bloc_start = new_bloc_start;
+ − 866
+ − 867 update_heap_bloc_correspondence (first_bloc, h);
+ − 868 }
+ − 869 if (h != first_heap)
+ − 870 {
+ − 871 /* Give up managing heaps below the one the new
+ − 872 virtual_break_value points to. */
+ − 873 first_heap->prev = NIL_HEAP;
+ − 874 first_heap->next = h->next;
+ − 875 first_heap->start = h->start;
+ − 876 first_heap->end = h->end;
+ − 877 first_heap->free = h->free;
+ − 878 first_heap->first_bloc = h->first_bloc;
+ − 879 first_heap->last_bloc = h->last_bloc;
+ − 880 first_heap->bloc_start = h->bloc_start;
+ − 881
+ − 882 if (first_heap->next)
+ − 883 first_heap->next->prev = first_heap;
+ − 884 else
+ − 885 last_heap = first_heap;
+ − 886 }
+ − 887
+ − 888 memset (address, 0, size);
+ − 889 }
+ − 890 else /* size < 0 */
+ − 891 {
440
+ − 892 size_t excess = (char *)first_heap->bloc_start
428
+ − 893 - ((char *)virtual_break_value + size);
+ − 894
+ − 895 address = virtual_break_value;
+ − 896
+ − 897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
+ − 898 {
+ − 899 excess -= extra_bytes;
+ − 900 first_heap->bloc_start
+ − 901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
+ − 902
+ − 903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
+ − 904
+ − 905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
+ − 906 {
440
+ − 907 memmove (b->new_data, b->data, b->size);
428
+ − 908 *b->variable = b->data = b->new_data;
+ − 909 }
+ − 910 }
+ − 911
+ − 912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
+ − 913 {
+ − 914 /* We found an additional space below the first heap */
+ − 915 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
+ − 916 }
+ − 917 }
+ − 918
+ − 919 virtual_break_value = (POINTER) ((char *)address + size);
+ − 920 break_value = (last_bloc
+ − 921 ? last_bloc->data + last_bloc->size
+ − 922 : first_heap->bloc_start);
+ − 923 if (size < 0)
+ − 924 relinquish ();
+ − 925
+ − 926 return address;
+ − 927 }
+ − 928
+ − 929 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
+ − 930 the data is returned in *PTR. PTR is thus the address of some variable
+ − 931 which will use the data area.
+ − 932
+ − 933 The allocation of 0 bytes is valid.
+ − 934 In case r_alloc_freeze is set, a best fit of unused blocs could be done
+ − 935 before allocating a new area. Not yet done.
+ − 936
+ − 937 If we can't allocate the necessary memory, set *PTR to zero, and
+ − 938 return zero. */
+ − 939
440
+ − 940 POINTER r_alloc (POINTER *ptr, size_t size);
428
+ − 941 POINTER
440
+ − 942 r_alloc (POINTER *ptr, size_t size)
428
+ − 943 {
+ − 944 bloc_ptr new_bloc;
+ − 945
+ − 946 if (! r_alloc_initialized)
+ − 947 init_ralloc ();
+ − 948
+ − 949 new_bloc = get_bloc (size);
+ − 950 if (new_bloc)
+ − 951 {
+ − 952 new_bloc->variable = ptr;
+ − 953 *ptr = new_bloc->data;
+ − 954 }
+ − 955 else
+ − 956 *ptr = 0;
+ − 957
+ − 958 return *ptr;
+ − 959 }
+ − 960
+ − 961 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
+ − 962 Store 0 in *PTR to show there's no block allocated. */
+ − 963
+ − 964 void r_alloc_free (POINTER *ptr);
+ − 965 void
+ − 966 r_alloc_free (POINTER *ptr)
+ − 967 {
+ − 968 register bloc_ptr dead_bloc;
+ − 969
+ − 970 if (! r_alloc_initialized)
+ − 971 init_ralloc ();
+ − 972
+ − 973 dead_bloc = find_bloc (ptr);
+ − 974 if (dead_bloc == NIL_BLOC)
+ − 975 abort ();
+ − 976
+ − 977 free_bloc (dead_bloc);
+ − 978 *ptr = 0;
+ − 979
+ − 980 #ifdef emacs
+ − 981 refill_memory_reserve ();
+ − 982 #endif
+ − 983 }
+ − 984
+ − 985 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
+ − 986 Do this by shifting all blocks above this one up in memory, unless
+ − 987 SIZE is less than or equal to the current bloc size, in which case
+ − 988 do nothing.
+ − 989
+ − 990 In case r_alloc_freeze is set, a new bloc is allocated, and the
+ − 991 memory copied to it. Not very efficient. We could traverse the
+ − 992 bloc_list for a best fit of free blocs first.
+ − 993
+ − 994 Change *PTR to reflect the new bloc, and return this value.
+ − 995
+ − 996 If more memory cannot be allocated, then leave *PTR unchanged, and
+ − 997 return zero. */
+ − 998
440
+ − 999 POINTER r_re_alloc (POINTER *ptr, size_t size);
428
+ − 1000 POINTER
440
+ − 1001 r_re_alloc (POINTER *ptr, size_t size)
428
+ − 1002 {
+ − 1003 register bloc_ptr bloc;
+ − 1004
+ − 1005 if (! r_alloc_initialized)
+ − 1006 init_ralloc ();
+ − 1007
+ − 1008 if (!*ptr)
+ − 1009 return r_alloc (ptr, size);
+ − 1010 if (!size)
+ − 1011 {
+ − 1012 r_alloc_free (ptr);
+ − 1013 return r_alloc (ptr, 0);
+ − 1014 }
+ − 1015
+ − 1016 bloc = find_bloc (ptr);
+ − 1017 if (bloc == NIL_BLOC)
+ − 1018 abort ();
+ − 1019
+ − 1020 if (size < bloc->size)
+ − 1021 {
+ − 1022 /* Wouldn't it be useful to actually resize the bloc here? */
+ − 1023 /* I think so too, but not if it's too expensive... */
+ − 1024 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
+ − 1025 && r_alloc_freeze_level == 0)
+ − 1026 {
+ − 1027 resize_bloc (bloc, MEM_ROUNDUP (size));
+ − 1028 /* Never mind if this fails, just do nothing... */
+ − 1029 /* It *should* be infallible! */
+ − 1030 }
+ − 1031 }
+ − 1032 else if (size > bloc->size)
+ − 1033 {
+ − 1034 if (r_alloc_freeze_level)
+ − 1035 {
+ − 1036 bloc_ptr new_bloc;
+ − 1037 new_bloc = get_bloc (MEM_ROUNDUP (size));
+ − 1038 if (new_bloc)
+ − 1039 {
+ − 1040 new_bloc->variable = ptr;
+ − 1041 *ptr = new_bloc->data;
+ − 1042 bloc->variable = (POINTER *) NIL;
+ − 1043 }
+ − 1044 else
+ − 1045 return NIL;
+ − 1046 }
+ − 1047 else
+ − 1048 {
+ − 1049 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
+ − 1050 return NIL;
+ − 1051 }
+ − 1052 }
+ − 1053 return *ptr;
+ − 1054 }
+ − 1055
+ − 1056 /* Disable relocations, after making room for at least SIZE bytes
+ − 1057 of non-relocatable heap if possible. The relocatable blocs are
+ − 1058 guaranteed to hold still until thawed, even if this means that
+ − 1059 malloc must return a null pointer. */
+ − 1060
+ − 1061 void r_alloc_freeze (long size);
+ − 1062 void
+ − 1063 r_alloc_freeze (long size)
+ − 1064 {
+ − 1065 if (! r_alloc_initialized)
+ − 1066 init_ralloc ();
+ − 1067
+ − 1068 /* If already frozen, we can't make any more room, so don't try. */
+ − 1069 if (r_alloc_freeze_level > 0)
+ − 1070 size = 0;
+ − 1071 /* If we can't get the amount requested, half is better than nothing. */
+ − 1072 while (size > 0 && r_alloc_sbrk (size) == 0)
+ − 1073 size /= 2;
+ − 1074 ++r_alloc_freeze_level;
+ − 1075 if (size > 0)
+ − 1076 r_alloc_sbrk (-size);
+ − 1077 }
+ − 1078
+ − 1079 void r_alloc_thaw (void);
+ − 1080 void
+ − 1081 r_alloc_thaw (void)
+ − 1082 {
+ − 1083
+ − 1084 if (! r_alloc_initialized)
+ − 1085 init_ralloc ();
+ − 1086
+ − 1087 if (--r_alloc_freeze_level < 0)
+ − 1088 abort ();
+ − 1089
+ − 1090 /* This frees all unused blocs. It is not too inefficient, as the resize
440
+ − 1091 and memmove is done only once. Afterwards, all unreferenced blocs are
428
+ − 1092 already shrunk to zero size. */
+ − 1093 if (!r_alloc_freeze_level)
+ − 1094 {
+ − 1095 bloc_ptr *b = &first_bloc;
+ − 1096 while (*b)
+ − 1097 if (!(*b)->variable)
+ − 1098 free_bloc (*b);
+ − 1099 else
+ − 1100 b = &(*b)->next;
+ − 1101 }
+ − 1102 }
+ − 1103
+ − 1104
+ − 1105 /* The hook `malloc' uses for the function which gets more space
+ − 1106 from the system. */
+ − 1107 #ifndef DOUG_LEA_MALLOC
+ − 1108 extern POINTER (*__morecore) (ptrdiff_t size);
+ − 1109 #endif
+ − 1110
+ − 1111 /* Initialize various things for memory allocation. */
+ − 1112
+ − 1113 void
+ − 1114 init_ralloc (void)
+ − 1115 {
+ − 1116 if (r_alloc_initialized)
+ − 1117 return;
+ − 1118
+ − 1119 r_alloc_initialized = 1;
+ − 1120 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
+ − 1121 __morecore =
+ − 1122 #ifdef __GNUC__
+ − 1123 (__typeof__ (__morecore))
+ − 1124 #endif
+ − 1125 r_alloc_sbrk;
+ − 1126
+ − 1127 first_heap = last_heap = &heap_base;
+ − 1128 first_heap->next = first_heap->prev = NIL_HEAP;
+ − 1129 first_heap->start = first_heap->bloc_start
+ − 1130 = virtual_break_value = break_value = (*real_morecore) (0);
+ − 1131 if (break_value == NIL)
+ − 1132 abort ();
+ − 1133
+ − 1134 page_size = PAGE;
+ − 1135 extra_bytes = ROUNDUP (50000);
+ − 1136
+ − 1137 #ifdef DOUG_LEA_MALLOC
+ − 1138 mallopt (M_TOP_PAD, 64 * 4096);
+ − 1139 #else
+ − 1140 #if 0 /* Hasn't been synched yet */
+ − 1141 /* Give GNU malloc's morecore some hysteresis
+ − 1142 so that we move all the relocatable blocks much less often. */
+ − 1143 __malloc_extra_blocks = 64;
+ − 1144 #endif
+ − 1145 #endif
+ − 1146
+ − 1147 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
+ − 1148
+ − 1149 /* The extra call to real_morecore guarantees that the end of the
+ − 1150 address space is a multiple of page_size, even if page_size is
+ − 1151 not really the page size of the system running the binary in
+ − 1152 which page_size is stored. This allows a binary to be built on a
+ − 1153 system with one page size and run on a system with a smaller page
+ − 1154 size. */
+ − 1155 (*real_morecore) (first_heap->end - first_heap->start);
+ − 1156
+ − 1157 /* Clear the rest of the last page; this memory is in our address space
+ − 1158 even though it is after the sbrk value. */
+ − 1159 /* Doubly true, with the additional call that explicitly adds the
+ − 1160 rest of that page to the address space. */
+ − 1161 memset (first_heap->start, 0, first_heap->end - first_heap->start);
+ − 1162 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
+ − 1163 use_relocatable_buffers = 1;
+ − 1164 }
+ − 1165
+ − 1166 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
+ − 1167
+ − 1168 /* Reinitialize the morecore hook variables after restarting a dumped
+ − 1169 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
+ − 1170 void r_alloc_reinit (void);
+ − 1171 void
+ − 1172 r_alloc_reinit (void)
+ − 1173 {
+ − 1174 /* Only do this if the hook has been reset, so that we don't get an
+ − 1175 infinite loop, in case Emacs was linked statically. */
+ − 1176 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk)
+ − 1177 {
+ − 1178 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
+ − 1179 __morecore =
+ − 1180 #ifdef __GNUC__
+ − 1181 (__typeof__ (__morecore))
+ − 1182 #endif
+ − 1183 r_alloc_sbrk;
+ − 1184 }
+ − 1185 }
+ − 1186 #if 0
+ − 1187 #ifdef DEBUG
+ − 1188
+ − 1189 void
+ − 1190 r_alloc_check (void)
+ − 1191 {
+ − 1192 int found = 0;
+ − 1193 heap_ptr h, ph = 0;
+ − 1194 bloc_ptr b, pb = 0;
+ − 1195
+ − 1196 if (!r_alloc_initialized)
+ − 1197 return;
+ − 1198
+ − 1199 assert (first_heap);
+ − 1200 assert (last_heap->end <= (POINTER) sbrk (0));
+ − 1201 assert ((POINTER) first_heap < first_heap->start);
+ − 1202 assert (first_heap->start <= virtual_break_value);
+ − 1203 assert (virtual_break_value <= first_heap->end);
+ − 1204
+ − 1205 for (h = first_heap; h; h = h->next)
+ − 1206 {
+ − 1207 assert (h->prev == ph);
+ − 1208 assert ((POINTER) ROUNDUP (h->end) == h->end);
+ − 1209 #if 0 /* ??? The code in ralloc.c does not really try to ensure
+ − 1210 the heap start has any sort of alignment.
+ − 1211 Perhaps it should. */
+ − 1212 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
+ − 1213 #endif
+ − 1214 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
+ − 1215 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
+ − 1216
+ − 1217 if (ph)
+ − 1218 {
+ − 1219 assert (ph->end < h->start);
+ − 1220 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
+ − 1221 }
+ − 1222
+ − 1223 if (h->bloc_start <= break_value && break_value <= h->end)
+ − 1224 found = 1;
+ − 1225
+ − 1226 ph = h;
+ − 1227 }
+ − 1228
+ − 1229 assert (found);
+ − 1230 assert (last_heap == ph);
+ − 1231
+ − 1232 for (b = first_bloc; b; b = b->next)
+ − 1233 {
+ − 1234 assert (b->prev == pb);
+ − 1235 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
440
+ − 1236 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
428
+ − 1237
+ − 1238 ph = 0;
+ − 1239 for (h = first_heap; h; h = h->next)
+ − 1240 {
+ − 1241 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
+ − 1242 break;
+ − 1243 ph = h;
+ − 1244 }
+ − 1245
+ − 1246 assert (h);
+ − 1247
+ − 1248 if (pb && pb->data + pb->size != b->data)
+ − 1249 {
+ − 1250 assert (ph && b->data == h->bloc_start);
+ − 1251 while (ph)
+ − 1252 {
+ − 1253 if (ph->bloc_start <= pb->data
+ − 1254 && pb->data + pb->size <= ph->end)
+ − 1255 {
+ − 1256 assert (pb->data + pb->size + b->size > ph->end);
+ − 1257 break;
+ − 1258 }
+ − 1259 else
+ − 1260 {
+ − 1261 assert (ph->bloc_start + b->size > ph->end);
+ − 1262 }
+ − 1263 ph = ph->prev;
+ − 1264 }
+ − 1265 }
+ − 1266 pb = b;
+ − 1267 }
+ − 1268
+ − 1269 assert (last_bloc == pb);
+ − 1270
+ − 1271 if (last_bloc)
+ − 1272 assert (last_bloc->data + last_bloc->size == break_value);
+ − 1273 else
+ − 1274 assert (first_heap->bloc_start == break_value);
+ − 1275 }
+ − 1276 #endif /* DEBUG */
+ − 1277 #endif /* 0 */
+ − 1278
+ − 1279 #endif
+ − 1280
+ − 1281 #else /* HAVE_MMAP */
+ − 1282
+ − 1283 /*
+ − 1284 A relocating allocator built using the mmap(2) facility available
+ − 1285 in some OSes. Based on another version written by Paul Flinders,
+ − 1286 from which code (and comments) are snarfed.
+ − 1287
+ − 1288 The OS should support mmap() with MAP_ANONYMOUS attribute, or have
+ − 1289 /dev/zero. It should support private memory mapping.
+ − 1290
+ − 1291 Paul Flinders wrote a version which works well for systems that
+ − 1292 allow callers to specify (virtual) addresses to mmap().
+ − 1293 Unfortunately, such a scheme doesn't work for certain systems like
+ − 1294 HP-UX that have a system-wide virtual->real address map, and
+ − 1295 consequently impose restrictions on the virtual address values
+ − 1296 permitted.
+ − 1297
+ − 1298 NB: The mapping scheme in HP-UX is motivated by the inverted page
+ − 1299 table design in some HP processors.
+ − 1300
+ − 1301 This alternate implementation allows for the addresses to be
+ − 1302 optionally chosen by the system. Fortunately, buffer allocation
+ − 1303 doesn't insist upon contiguous memory which Flinders' scheme
+ − 1304 provides, and this one doesn't.
+ − 1305
+ − 1306 We don't really provide for hysteresis here, but add some metering
+ − 1307 to monitor how poorly the allocator actually works. See the
+ − 1308 documentation for `mmap-hysteresis'.
+ − 1309
+ − 1310 This implementation actually cycles through the blocks allocated
+ − 1311 via mmap() and only sends it to free() if it wasn't one of them.
+ − 1312 Unfortunately, this is O(n) in the number of mmapped blocks. (Not
+ − 1313 really, as we have a hash table which tries to reduce the cost.)
+ − 1314 Also, this dereferences the pointer passed, so it would cause a
+ − 1315 segfault if garbage was passed to it. */
+ − 1316
+ − 1317 #include <fcntl.h>
+ − 1318 #include <sys/mman.h>
+ − 1319 #include <stdio.h>
+ − 1320
+ − 1321 typedef void *VM_ADDR; /* VM addresses */
442
+ − 1322 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
428
+ − 1323
+ − 1324 /* Configuration for relocating allocator. */
+ − 1325
+ − 1326 /* #define MMAP_GENERATE_ADDRESSES */
+ − 1327 /* Define this if you want Emacs to manage the address table.
+ − 1328 It is not recommended unless you have major problems with the
+ − 1329 default scheme, which allows the OS to pick addresses. */
+ − 1330
+ − 1331 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be
+ − 1332 discarded while computing the hash, as they're always zero. The
+ − 1333 default is appropriate for a page size of 4096 bytes. */
+ − 1334
+ − 1335 #define USELESS_LOWER_ADDRESS_BITS 12
+ − 1336
+ − 1337
+ − 1338 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */
+ − 1339
+ − 1340 #define MHASH_PRIME 89
+ − 1341
+ − 1342
+ − 1343 /* Whether we want to enable metering of some ralloc performance.
+ − 1344 This incurs a constant penalty for each mmap operation. */
+ − 1345
+ − 1346 #define MMAP_METERING
+ − 1347
+ − 1348
+ − 1349 /* Rename the following to protect against a some smartness elsewhere.
+ − 1350 We need access to the allocator used for non-mmap allocation
+ − 1351 elsewhere, in case we get passed a handle that we didn't allocate
+ − 1352 ourselves. Currently, this default allocator is also used to
+ − 1353 maintain local structures for relocatable blocks. */
+ − 1354
+ − 1355 #define UNDERLYING_MALLOC malloc
+ − 1356 #define UNDERLYING_FREE free
+ − 1357 #define UNDERLYING_REALLOC realloc
+ − 1358
+ − 1359 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES
+ − 1360 is defined, and MAP_VARIABLE otherwise. Some losing systems don't
+ − 1361 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */
+ − 1362
+ − 1363 #ifdef MMAP_GENERATE_ADDRESSES
+ − 1364 # ifdef MAP_FIXED
+ − 1365 # define MAP_ADDRCHOICE_FLAG MAP_FIXED
+ − 1366 # endif
+ − 1367 #else /* !MMAP_GENERATE_ADDRESSES */
+ − 1368 # ifdef MAP_VARIABLE
+ − 1369 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE
+ − 1370 # endif
+ − 1371 #endif /* MMAP_GENERATE_ADDRESSES */
+ − 1372
+ − 1373 /* Default case. */
+ − 1374 #ifndef MAP_ADDRCHOICE_FLAG
+ − 1375 # define MAP_ADDRCHOICE_FLAG 0
+ − 1376 #endif /* MAP_ADDRCHOICE_FLAG */
+ − 1377
+ − 1378 #ifdef MAP_ANONYMOUS
+ − 1379 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS)
+ − 1380 #else
+ − 1381 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG)
+ − 1382 #endif /* MAP_ANONYMOUS */
+ − 1383
+ − 1384
+ − 1385 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For
+ − 1386 Emacs's sake, please do not make this local to malloc_init; on some
+ − 1387 machines, the dumping procedure makes all static variables
+ − 1388 read-only. On these machines, the word static is #defined to be
+ − 1389 the empty string, meaning that r_alloc_initialized becomes an
+ − 1390 automatic variable, and loses its value each time Emacs is started up.
+ − 1391
+ − 1392 If we're using mmap this flag has three possible values
+ − 1393 0 - initial value
+ − 1394 1 - Normal value when running temacs. In this case buffers
+ − 1395 are allocated using malloc so that any data that they
+ − 1396 contain becomes part of the undumped executable.
+ − 1397 2 - Normal value when running emacs */
+ − 1398 static int r_alloc_initialized = 0;
+ − 1399
+ − 1400 /* (ptf): Macros for rounding. Note that rounding to any value is possible
+ − 1401 by changing the definition of PAGE. */
+ − 1402 #define PAGE (getpagesize ())
+ − 1403 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size)
+ − 1404 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size)
+ − 1405
+ − 1406
+ − 1407 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS
+ − 1408 points to a file descriptor opened on /dev/zero */
+ − 1409
+ − 1410 static int DEV_ZERO_FD = -1;
+ − 1411
+ − 1412
+ − 1413 /* We actually need a data structure that can be usefully structured
+ − 1414 based on the VM address, and allows an ~O(1) lookup on an arbitrary
+ − 1415 address, i.e. a hash table. Maybe the XEmacs hash table can be
+ − 1416 coaxed enough. At the moment, we use lookup on a hash table to
+ − 1417 decide whether to do an O(n) search on the malloced block list.
+ − 1418 Addresses are hashed to a bucket modulo MHASH_PRIME. */
+ − 1419
+ − 1420
+ − 1421 /* We settle for a standard doubly-linked-list. The dynarr type isn't
+ − 1422 very amenable to deletion of items in the middle, so we conjure up
+ − 1423 yet another stupid datastructure. The structure is maintained as a
+ − 1424 ring, and the singleton ring has the sole element as its left and
+ − 1425 right neighbours. */
+ − 1426
+ − 1427 static void init_MHASH_table (void); /* Forward reference */
+ − 1428
+ − 1429 typedef struct alloc_dll
+ − 1430 {
+ − 1431 size_t size; /* #bytes currently in use */
+ − 1432 size_t space_for; /* #bytes we really have */
+ − 1433 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */
+ − 1434 VM_ADDR vm_addr; /* VM address returned by mmap */
+ − 1435 struct alloc_dll *left; /* Left link in circular doubly linked list */
+ − 1436 struct alloc_dll *right;
+ − 1437 } *MMAP_HANDLE;
+ − 1438
+ − 1439 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */
+ − 1440 static size_t page_size = 0; /* Size of VM pages */
458
+ − 1441 static Fixnum mmap_hysteresis; /* Logically a "size_t" */
428
+ − 1442
+ − 1443 /* Get a new handle for a fresh block. */
+ − 1444 static MMAP_HANDLE
+ − 1445 new_mmap_handle (size_t nsiz)
+ − 1446 {
+ − 1447 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll));
+ − 1448 if ( h == 0) return 0;
+ − 1449 h->size = nsiz;
+ − 1450 if (mmap_start == 0)
+ − 1451 {
+ − 1452 init_MHASH_table ();
+ − 1453 mmap_start = h; mmap_start->left = h; mmap_start->right = h;
+ − 1454 }
+ − 1455 {
+ − 1456 MMAP_HANDLE prev = mmap_start->left;
+ − 1457 MMAP_HANDLE nex = mmap_start;
+ − 1458
+ − 1459 /* Four pointers need fixing. */
+ − 1460 h->right = nex;
+ − 1461 h->left = prev;
+ − 1462 prev->right = h;
+ − 1463 nex->left = h;
+ − 1464 }
+ − 1465 return h;
+ − 1466 }
+ − 1467
+ − 1468 /* Find a handle given the aliased address using linear search. */
+ − 1469 static MMAP_HANDLE
+ − 1470 find_mmap_handle_lsearch (POINTER *alias)
+ − 1471 {
+ − 1472 MMAP_HANDLE h = mmap_start;
+ − 1473 if (h == 0) return 0;
+ − 1474 do {
+ − 1475 if (h->aliased_address == alias && *alias == h->vm_addr)
+ − 1476 return h;
+ − 1477 h = h->right;
+ − 1478 } while( h != mmap_start );
+ − 1479 return 0; /* Bogus alias passed. */
+ − 1480 }
+ − 1481
+ − 1482 /* Free a handle. */
+ − 1483 static void
+ − 1484 free_mmap_handle (MMAP_HANDLE h)
+ − 1485 {
+ − 1486 MMAP_HANDLE prev = h->left;
+ − 1487 MMAP_HANDLE nex = h->right;
+ − 1488 if (prev == h || nex == h) /* In fact, this should be && */
+ − 1489 { /* We're the singleton dll */
+ − 1490 UNDERLYING_FREE( h ); /* Free the sole item */
+ − 1491 mmap_start = 0; return;
+ − 1492 }
+ − 1493 else if (h == mmap_start)
+ − 1494 {
+ − 1495 mmap_start = nex; /* Make sure mmap_start isn't bogus. */
+ − 1496 }
+ − 1497 prev->right = nex;
+ − 1498 nex->left = prev;
+ − 1499 UNDERLYING_FREE( h );
+ − 1500 }
+ − 1501
+ − 1502 /* A simple hash table to speed up the inverted lookup of
+ − 1503 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a
+ − 1504 particular bucket. We invalidate a hash table entry during block
+ − 1505 deletion if the hash has cached the deleted block's address. */
+ − 1506
+ − 1507 /* Simple hash check. */
+ − 1508 struct {
+ − 1509 int n_hits; /* How many addresses map to this? */
+ − 1510 MMAP_HANDLE handle; /* What is the current handle? */
+ − 1511 VM_ADDR addr; /* What is its VM address? */
+ − 1512 } MHASH_HITS[ MHASH_PRIME ];
+ − 1513
+ − 1514 static void
+ − 1515 init_MHASH_table (void)
+ − 1516 {
+ − 1517 int i = 0;
+ − 1518 for (; i < MHASH_PRIME; i++)
+ − 1519 {
+ − 1520 MHASH_HITS[i].n_hits = 0;
+ − 1521 MHASH_HITS[i].addr = 0;
+ − 1522 MHASH_HITS[i].handle = 0;
+ − 1523 }
+ − 1524 }
+ − 1525
+ − 1526 /* Compute the hash value for an address. */
+ − 1527 static int
+ − 1528 MHASH (VM_ADDR addr)
+ − 1529 {
+ − 1530 #if (LONGBITS == 64)
+ − 1531 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
+ − 1532 #else
+ − 1533 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
+ − 1534 #endif
+ − 1535 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve
+ − 1536 when converted to signed ints */
+ − 1537 return ((hval >= 0) ? hval : MHASH_PRIME + hval);
+ − 1538 }
+ − 1539
+ − 1540 /* Add a VM address with its corresponding handle to the table. */
+ − 1541 static void
+ − 1542 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h)
+ − 1543 {
+ − 1544 int kVal = MHASH( addr );
+ − 1545 if (MHASH_HITS[kVal].n_hits++ == 0)
+ − 1546 { /* Only overwrite the table if there were no hits so far. */
+ − 1547 MHASH_HITS[kVal].addr = addr;
+ − 1548 MHASH_HITS[kVal].handle = h;
+ − 1549 }
+ − 1550 }
+ − 1551
+ − 1552 /* Delete a VM address entry from the hash table. */
+ − 1553 static void
+ − 1554 MHASH_DEL (VM_ADDR addr)
+ − 1555 {
+ − 1556 int kVal = MHASH( addr );
+ − 1557 MHASH_HITS[kVal].n_hits--;
+ − 1558 if (addr == MHASH_HITS[kVal].addr)
+ − 1559 {
+ − 1560 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */
+ − 1561 MHASH_HITS[kVal].handle = 0;
+ − 1562 }
+ − 1563 }
+ − 1564
+ − 1565 /* End of hash buckets */
+ − 1566
+ − 1567 /* Metering malloc performance. */
+ − 1568 #ifdef MMAP_METERING
+ − 1569 /* If we're metering, we introduce some extra symbols to aid the noble
+ − 1570 cause of bloating XEmacs core size. */
+ − 1571
+ − 1572 static Lisp_Object Qmmap_times_mapped;
+ − 1573 static Lisp_Object Qmmap_pages_mapped;
+ − 1574 static Lisp_Object Qmmap_times_unmapped;
+ − 1575 static Lisp_Object Qmmap_times_remapped;
+ − 1576 static Lisp_Object Qmmap_didnt_copy;
+ − 1577 static Lisp_Object Qmmap_pages_copied;
+ − 1578 static Lisp_Object Qmmap_average_bumpval;
+ − 1579 static Lisp_Object Qmmap_wastage;
+ − 1580 static Lisp_Object Qmmap_live_pages;
+ − 1581 static Lisp_Object Qmmap_addr_looked_up;
+ − 1582 static Lisp_Object Qmmap_hash_worked;
+ − 1583 static Lisp_Object Qmmap_addrlist_size;
+ − 1584
+ − 1585 #define M_Map 0 /* How many times allocated? */
+ − 1586 #define M_Pages_Map 1 /* How many pages allocated? */
+ − 1587 #define M_Unmap 2 /* How many times freed? */
+ − 1588 #define M_Remap 3 /* How many times increased in size? */
+ − 1589 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */
+ − 1590 #define M_Copy_Pages 5 /* Total # pages copied */
+ − 1591 #define M_Average_Bumpval 6 /* Average bump value */
+ − 1592 #define M_Wastage 7 /* Remaining (unused space) */
+ − 1593 #define M_Live_Pages 8 /* #live pages */
+ − 1594 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */
+ − 1595 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */
+ − 1596 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */
+ − 1597
+ − 1598 #define N_Meterables 12 /* Total number of meterables */
+ − 1599 #define MEMMETER(x) {x;}
+ − 1600 #define MVAL(x) (meter[x])
+ − 1601 #define MLVAL(x) (make_int (meter[x]))
+ − 1602 static int meter[N_Meterables];
+ − 1603
+ − 1604 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /*
+ − 1605 Return some information about mmap-based allocator.
+ − 1606
+ − 1607 mmap-times-mapped: number of times r_alloc was called.
+ − 1608 mmap-pages-mapped: number of pages mapped by r_alloc calls only.
+ − 1609 mmap-times-unmapped: number of times r_free was called.
+ − 1610 mmap-times-remapped: number of times r_re_alloc was called.
+ − 1611 mmap-didnt-copy: number of times re-alloc did NOT have to move the block.
+ − 1612 mmap-pages-copied: total number of pages copied.
+ − 1613 mmap-average-bumpval: average increase in size demanded to re-alloc.
+ − 1614 mmap-wastage: total number of bytes allocated, but not currently in use.
+ − 1615 mmap-live-pages: total number of pages live.
+ − 1616 mmap-addr-looked-up: total number of times needed to check if addr is in block.
+ − 1617 mmap-hash-worked: total number of times the simple hash check worked.
+ − 1618 mmap-addrlist-size: number of entries in address picking list.
+ − 1619 */
+ − 1620 ())
+ − 1621 {
+ − 1622 Lisp_Object result = Qnil;
+ − 1623
+ − 1624 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result);
+ − 1625 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result);
+ − 1626 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result);
+ − 1627 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result);
+ − 1628 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result);
+ − 1629 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result);
+ − 1630 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result);
+ − 1631 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result);
+ − 1632 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result);
+ − 1633 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result);
+ − 1634 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result);
+ − 1635 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result);
+ − 1636
+ − 1637 return result;
+ − 1638 }
+ − 1639
+ − 1640 #else /* !MMAP_METERING */
+ − 1641
+ − 1642 #define MEMMETER(x)
+ − 1643 #define MVAL(x)
+ − 1644
+ − 1645 #endif /* MMAP_METERING */
+ − 1646
+ − 1647 static MMAP_HANDLE
+ − 1648 find_mmap_handle (POINTER *alias)
+ − 1649 {
+ − 1650 int kval = MHASH( *alias );
+ − 1651 MEMMETER( MVAL(M_Address_Lookup)++ )
+ − 1652 switch( MHASH_HITS[kval].n_hits)
+ − 1653 {
+ − 1654 case 0:
+ − 1655 MEMMETER( MVAL( M_Hash_Worked )++ )
+ − 1656 return 0;
+ − 1657
+ − 1658 case 1:
+ − 1659 if (*alias == MHASH_HITS[kval].addr)
+ − 1660 {
+ − 1661 MEMMETER( MVAL( M_Hash_Worked) ++ );
+ − 1662 return MHASH_HITS[kval].handle;
+ − 1663 }
+ − 1664 /* FALL THROUGH */
+ − 1665 default:
+ − 1666 return find_mmap_handle_lsearch( alias );
+ − 1667 } /* switch */
+ − 1668 }
+ − 1669
+ − 1670 /*
+ − 1671 Some kernels don't like being asked to pick addresses for mapping
+ − 1672 themselves---IRIX is known to become extremely slow if mmap is
+ − 1673 passed a ZERO as the first argument. In such cases, we use an
+ − 1674 address map which is managed local to the XEmacs process. The
+ − 1675 address map maintains an ordered linked list of (address, size,
+ − 1676 occupancy) triples ordered by the absolute address. Initially, a
+ − 1677 large address area is marked as being empty. The address picking
+ − 1678 scheme takes bites off the first block which is still empty and
+ − 1679 large enough. If mmap with the specified address fails, it is
+ − 1680 marked unavailable and not attempted thereafter. The scheme will
+ − 1681 keep fragmenting the large empty block until it finds an address
+ − 1682 which can be successfully mmapped, or until there are no free
+ − 1683 blocks of the given size left.
+ − 1684
+ − 1685 Note that this scheme, given its first-fit strategy, is prone to
+ − 1686 fragmentation of the first part of memory earmarked for this
+ − 1687 purpose. [ACP Vol I]. We can't use the workaround of using a
+ − 1688 randomized first fit because we don't want to presume too much
+ − 1689 about the memory map. Instead, we try to coalesce empty or
+ − 1690 unavailable blocks at any available opportunity. */
+ − 1691
+ − 1692 /* Initialization procedure for address picking scheme */
+ − 1693 static void Addr_Block_initialize(void);
+ − 1694
+ − 1695 /* Get a suitable VM_ADDR via mmap */
440
+ − 1696 static VM_ADDR New_Addr_Block (size_t sz);
428
+ − 1697
+ − 1698 /* Free a VM_ADDR allocated via New_Addr_Block */
440
+ − 1699 static void Free_Addr_Block (VM_ADDR addr, size_t sz);
428
+ − 1700
+ − 1701 #ifdef MMAP_GENERATE_ADDRESSES
+ − 1702 /* Implementation of the three calls for address picking when XEmacs is incharge */
+ − 1703
+ − 1704 /* The enum denotes the status of the following block. */
+ − 1705 typedef enum { empty = 0, occupied, unavailable } addr_status;
+ − 1706
+ − 1707 typedef struct addr_chain
+ − 1708 {
+ − 1709 POINTER addr;
440
+ − 1710 size_t sz;
428
+ − 1711 addr_status flag;
+ − 1712 struct addr_chain *next;
+ − 1713 } ADDRESS_BLOCK, *ADDRESS_CHAIN;
+ − 1714 /* NB: empty and unavailable blocks are concatenated. */
+ − 1715
+ − 1716 static ADDRESS_CHAIN addr_chain = 0;
+ − 1717 /* Start off the address block chain with a humongous address block
+ − 1718 which is empty to start with. Note that addr_chain is invariant
+ − 1719 WRT the addition/deletion of address blocks because of the assert
+ − 1720 in Coalesce() and the strict ordering of blocks by their address
+ − 1721 */
440
+ − 1722 static void
+ − 1723 Addr_Block_initialize (void)
428
+ − 1724 {
+ − 1725 MEMMETER( MVAL( M_Addrlist_Size )++)
+ − 1726 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
+ − 1727 addr_chain->next = 0; /* Last block in chain */
+ − 1728 addr_chain->sz = 0x0c000000; /* Size */
442
+ − 1729 addr_chain->addr = (POINTER) (0x04000000);
428
+ − 1730 addr_chain->flag = empty;
+ − 1731 }
+ − 1732
+ − 1733 /* Coalesce address blocks if they are contiguous. Only empty and
+ − 1734 unavailable slots are coalesced. */
440
+ − 1735 static void
+ − 1736 Coalesce_Addr_Blocks (void)
428
+ − 1737 {
+ − 1738 ADDRESS_CHAIN p;
+ − 1739 for (p = addr_chain; p; p = p->next)
+ − 1740 {
+ − 1741 while (p->next && p->flag == p->next->flag)
+ − 1742 {
+ − 1743 ADDRESS_CHAIN np;
+ − 1744 np = p->next;
+ − 1745
+ − 1746 if (p->flag == occupied) break; /* No cigar */
+ − 1747
+ − 1748 /* Check if the addresses are contiguous. */
+ − 1749 if (p->addr + p->sz != np->addr) break;
+ − 1750
+ − 1751 MEMMETER( MVAL( M_Addrlist_Size )--)
+ − 1752 /* We can coalesce these two. */
+ − 1753 p->sz += np->sz;
+ − 1754 p->next = np->next;
+ − 1755 assert( np != addr_chain ); /* We're not freeing the head of the list. */
+ − 1756 UNDERLYING_FREE( np );
+ − 1757 }
+ − 1758 } /* for all p */
+ − 1759 }
+ − 1760
+ − 1761 /* Get an empty address block of specified size. */
440
+ − 1762 static VM_ADDR
+ − 1763 New_Addr_Block (size_t sz)
428
+ − 1764 {
+ − 1765 ADDRESS_CHAIN p = addr_chain;
+ − 1766 VM_ADDR new_addr = VM_FAILURE_ADDR;
+ − 1767 for (; p; p = p->next)
+ − 1768 {
+ − 1769 if (p->flag == empty && p->sz > sz)
+ − 1770 {
+ − 1771 /* Create a new entry following p which is empty. */
+ − 1772 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) );
+ − 1773 remainder->next = p->next;
+ − 1774 remainder->flag = empty;
+ − 1775 remainder->addr = p->addr + sz;
+ − 1776 remainder->sz = p->sz - sz;
+ − 1777
+ − 1778 MEMMETER( MVAL( M_Addrlist_Size )++)
+ − 1779
+ − 1780 /* Now make p become an occupied block with the appropriate size */
+ − 1781 p->next = remainder;
+ − 1782 p->sz = sz;
+ − 1783 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE,
+ − 1784 MAP_FLAGS, DEV_ZERO_FD, 0 );
+ − 1785 if (new_addr == VM_FAILURE_ADDR)
+ − 1786 {
+ − 1787 p->flag = unavailable;
+ − 1788 continue;
+ − 1789 }
+ − 1790 p->flag = occupied;
+ − 1791 break;
+ − 1792 }
+ − 1793 }
+ − 1794 Coalesce_Addr_Blocks();
+ − 1795 return new_addr;
+ − 1796 }
+ − 1797
+ − 1798 /* Free an address block. We mark the block as being empty, and attempt to
+ − 1799 do any coalescing that may have resulted from this. */
440
+ − 1800 static void
+ − 1801 Free_Addr_Block (VM_ADDR addr, size_t sz)
428
+ − 1802 {
+ − 1803 ADDRESS_CHAIN p = addr_chain;
+ − 1804 for (; p; p = p->next )
+ − 1805 {
+ − 1806 if (p->addr == addr)
+ − 1807 {
+ − 1808 if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */
+ − 1809 munmap( (VM_ADDR) p->addr, p->sz );
+ − 1810 p->flag = empty;
+ − 1811 break;
+ − 1812 }
+ − 1813 }
+ − 1814 if (!p) abort(); /* Can't happen... we've got a block to free which is not in
+ − 1815 the address list. */
+ − 1816 Coalesce_Addr_Blocks();
+ − 1817 }
+ − 1818 #else /* !MMAP_GENERATE_ADDRESSES */
+ − 1819 /* This is an alternate (simpler) implementation in cases where the
+ − 1820 address is picked by the kernel. */
+ − 1821
440
+ − 1822 static void
+ − 1823 Addr_Block_initialize (void)
428
+ − 1824 {
+ − 1825 /* Nothing. */
+ − 1826 }
+ − 1827
440
+ − 1828 static VM_ADDR
+ − 1829 New_Addr_Block (size_t sz)
428
+ − 1830 {
+ − 1831 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
+ − 1832 DEV_ZERO_FD, 0 );
+ − 1833 }
+ − 1834
440
+ − 1835 static void
+ − 1836 Free_Addr_Block (VM_ADDR addr, size_t sz)
428
+ − 1837 {
+ − 1838 munmap ((caddr_t) addr, sz );
+ − 1839 }
+ − 1840
+ − 1841 #endif /* MMAP_GENERATE_ADDRESSES */
+ − 1842
+ − 1843
+ − 1844 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
+ − 1845
+ − 1846 /*
440
+ − 1847 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start
428
+ − 1848 address aliased to the first parameter.
+ − 1849 */
+ − 1850
440
+ − 1851 POINTER r_alloc (POINTER *ptr, size_t size);
428
+ − 1852 POINTER
440
+ − 1853 r_alloc (POINTER *ptr, size_t size)
428
+ − 1854 {
+ − 1855 MMAP_HANDLE mh;
+ − 1856
+ − 1857 switch(r_alloc_initialized)
+ − 1858 {
+ − 1859 case 0:
+ − 1860 abort();
+ − 1861 case 1:
+ − 1862 *ptr = (POINTER) UNDERLYING_MALLOC(size);
+ − 1863 break;
+ − 1864 default:
+ − 1865 mh = new_mmap_handle( size );
+ − 1866 if (mh)
+ − 1867 {
440
+ − 1868 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
+ − 1869 size_t mmapped_size = ROUNDUP( size + hysteresis );
428
+ − 1870 MEMMETER( MVAL(M_Map)++ )
+ − 1871 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
+ − 1872 MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
+ − 1873 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) )
+ − 1874 mh->vm_addr = New_Addr_Block( mmapped_size );
+ − 1875 if (mh->vm_addr == VM_FAILURE_ADDR) {
+ − 1876 free_mmap_handle( mh ); /* Free the loser */
+ − 1877 *ptr = 0;
+ − 1878 return 0; /* ralloc failed due to mmap() failure. */
+ − 1879 }
+ − 1880 MHASH_ADD( mh->vm_addr, mh );
+ − 1881 mh->space_for = mmapped_size;
+ − 1882 mh->aliased_address = ptr;
+ − 1883 *ptr = (POINTER) mh->vm_addr;
+ − 1884 }
+ − 1885 else
+ − 1886 *ptr = 0; /* Malloc of block failed */
+ − 1887 break;
+ − 1888 }
+ − 1889 return *ptr;
+ − 1890 }
+ − 1891
+ − 1892 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
+ − 1893 Store 0 in *PTR to show there's no block allocated. */
+ − 1894
+ − 1895 void r_alloc_free (POINTER *ptr);
+ − 1896 void
+ − 1897 r_alloc_free (POINTER *ptr)
+ − 1898 {
+ − 1899 switch( r_alloc_initialized) {
+ − 1900 case 0:
+ − 1901 abort();
+ − 1902
+ − 1903 case 1:
+ − 1904 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */
+ − 1905 break;
+ − 1906
+ − 1907 default:
+ − 1908 {
+ − 1909 MMAP_HANDLE dead_handle = find_mmap_handle( ptr );
+ − 1910 /* Check if we've got it. */
+ − 1911 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */
+ − 1912 {
+ − 1913 UNDERLYING_FREE( *ptr );
+ − 1914 }
+ − 1915 else
+ − 1916 {
+ − 1917 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) )
+ − 1918 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size ))
+ − 1919 MEMMETER(MVAL(M_Unmap)++)
+ − 1920 MHASH_DEL( dead_handle->vm_addr );
+ − 1921 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for );
+ − 1922 free_mmap_handle (dead_handle);
+ − 1923 }
+ − 1924 }
+ − 1925 break;
+ − 1926 } /* r_alloc_initialized */
+ − 1927 *ptr = 0; /* Zap the pointer's contents. */
+ − 1928 }
+ − 1929
+ − 1930 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
+ − 1931
+ − 1932 Change *PTR to reflect the new bloc, and return this value.
+ − 1933
+ − 1934 If more memory cannot be allocated, then leave *PTR unchanged, and
+ − 1935 return zero. */
+ − 1936
440
+ − 1937 POINTER r_re_alloc (POINTER *ptr, size_t sz);
428
+ − 1938 POINTER
440
+ − 1939 r_re_alloc (POINTER *ptr, size_t sz)
428
+ − 1940 {
+ − 1941 if (r_alloc_initialized == 0)
+ − 1942 {
+ − 1943 abort ();
+ − 1944 return 0; /* suppress compiler warning */
+ − 1945 }
+ − 1946 else if (r_alloc_initialized == 1)
+ − 1947 {
+ − 1948 POINTER tmp = (POINTER) realloc(*ptr, sz);
+ − 1949 if (tmp)
+ − 1950 *ptr = tmp;
+ − 1951 return tmp;
+ − 1952 }
+ − 1953 else
+ − 1954 {
440
+ − 1955 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
+ − 1956 size_t actual_sz = ROUNDUP( sz + hysteresis );
428
+ − 1957 MMAP_HANDLE h = find_mmap_handle( ptr );
+ − 1958 VM_ADDR new_vm_addr;
+ − 1959
+ − 1960 if ( h == 0 ) /* Was allocated using malloc. */
+ − 1961 {
+ − 1962 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz);
+ − 1963 if (tmp)
+ − 1964 *ptr = tmp;
+ − 1965 return tmp;
+ − 1966 }
+ − 1967
+ − 1968 MEMMETER(
+ − 1969 MVAL(M_Average_Bumpval) =
+ − 1970 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size))
+ − 1971 / (double) (MVAL(M_Remap) + 1))
+ − 1972 MEMMETER(MVAL(M_Remap)++)
+ − 1973 if (h->space_for > sz) /* We've got some more room */
+ − 1974 { /* Also, if a shrinkage was asked for. */
+ − 1975 MEMMETER( MVAL(M_Didnt_Copy)++ )
+ − 1976 MEMMETER( MVAL(M_Wastage) -= (sz - h->size))
+ − 1977 /* We're pretty dumb at handling shrinkage. We should check for
+ − 1978 a larger gap than the standard hysteresis allowable, and if so,
+ − 1979 shrink the number of pages. Right now, we simply reset the size
+ − 1980 component and return. */
+ − 1981 h->size = sz;
+ − 1982 return *ptr;
+ − 1983 }
+ − 1984
+ − 1985 new_vm_addr = New_Addr_Block( actual_sz );
+ − 1986 if (new_vm_addr == VM_FAILURE_ADDR)
+ − 1987 {/* Failed to realloc. */
+ − 1988 /* *ptr = 0; */
+ − 1989 return 0;
+ − 1990 }
+ − 1991
+ − 1992 MHASH_ADD( new_vm_addr, h );
+ − 1993 /* We got a block OK: now we should move the old contents to the
+ − 1994 new address. We use the old size of this block. */
+ − 1995 memmove(new_vm_addr, h->vm_addr, h->size);
+ − 1996 MHASH_DEL( h->vm_addr );
+ − 1997 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */
+ − 1998
+ − 1999 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) )
+ − 2000 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size))
+ − 2001 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size))
+ − 2002 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size))
+ − 2003 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) )
+ − 2004
+ − 2005 /* Update block datastructure. */
+ − 2006 h->space_for = actual_sz; /* New total space */
+ − 2007 h->size = sz; /* New (requested) size */
+ − 2008 h->vm_addr = new_vm_addr; /* New VM start address */
+ − 2009 h->aliased_address = ptr; /* Change alias to reflect block relocation. */
+ − 2010 *ptr = (POINTER) h->vm_addr;
+ − 2011 return *ptr;
+ − 2012 }
+ − 2013 }
+ − 2014
+ − 2015
+ − 2016 /* Initialize various things for memory allocation.
+ − 2017 */
+ − 2018 void
+ − 2019 init_ralloc (void)
+ − 2020 {
+ − 2021 int i = 0;
+ − 2022 if (r_alloc_initialized > 1)
+ − 2023 return; /* used to return 1 */
+ − 2024
909
+ − 2025 #ifdef PDUMP
+ − 2026 /* Under pdump, we need to activate ralloc on the first go. */
+ − 2027 ++r_alloc_initialized;
+ − 2028 #endif
428
+ − 2029 if (++r_alloc_initialized == 1)
+ − 2030 return; /* used to return 1 */
+ − 2031
+ − 2032 Addr_Block_initialize(); /* Initialize the address picker, if required. */
+ − 2033 page_size = PAGE;
+ − 2034 assert( page_size > 0 ); /* getpagesize() bogosity check. */
+ − 2035
+ − 2036 #ifndef MAP_ANONYMOUS
+ − 2037 DEV_ZERO_FD = open( "/dev/zero", O_RDWR );
+ − 2038 if (DEV_ZERO_FD < 0)
+ − 2039 /* Failed. Perhaps we should abort here? */
+ − 2040 return; /* used to return 0 */
+ − 2041 #endif
+ − 2042
+ − 2043 #ifdef MMAP_METERING
+ − 2044 for(i = 0; i < N_Meterables; i++ )
+ − 2045 {
+ − 2046 meter[i] = 0;
+ − 2047 }
+ − 2048 #endif /* MMAP_METERING */
+ − 2049 }
+ − 2050
+ − 2051 void
+ − 2052 syms_of_ralloc (void)
+ − 2053 {
+ − 2054 #ifdef MMAP_METERING
563
+ − 2055 DEFSYMBOL (Qmmap_times_mapped);
+ − 2056 DEFSYMBOL (Qmmap_pages_mapped);
+ − 2057 DEFSYMBOL (Qmmap_times_unmapped);
+ − 2058 DEFSYMBOL (Qmmap_times_remapped);
+ − 2059 DEFSYMBOL (Qmmap_didnt_copy);
+ − 2060 DEFSYMBOL (Qmmap_pages_copied);
+ − 2061 DEFSYMBOL (Qmmap_average_bumpval);
+ − 2062 DEFSYMBOL (Qmmap_wastage);
+ − 2063 DEFSYMBOL (Qmmap_live_pages);
+ − 2064 DEFSYMBOL (Qmmap_addr_looked_up);
+ − 2065 DEFSYMBOL (Qmmap_hash_worked);
+ − 2066 DEFSYMBOL (Qmmap_addrlist_size);
428
+ − 2067 DEFSUBR (Fmmap_allocator_status);
+ − 2068 #endif /* MMAP_METERING */
+ − 2069 }
+ − 2070
+ − 2071 void
+ − 2072 vars_of_ralloc (void)
+ − 2073 {
+ − 2074 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /*
+ − 2075 Extra room left at the end of an allocated arena,
+ − 2076 so that a re-alloc requesting extra space smaller than this
+ − 2077 does not actually cause a new arena to be allocated.
+ − 2078
+ − 2079 A negative value is considered equal to zero. This is the
+ − 2080 minimum amount of space guaranteed to be left at the end of
+ − 2081 the arena. Because allocation happens in multiples of the OS
+ − 2082 page size, it is possible for more space to be left unused.
+ − 2083 */ );
+ − 2084 mmap_hysteresis = 0;
+ − 2085 }
+ − 2086
+ − 2087 #endif /* HAVE_MMAP */