comparison src/ralloc.c @ 412:697ef44129c6 r21-2-14

Import from CVS: tag r21-2-14
author cvs
date Mon, 13 Aug 2007 11:20:41 +0200
parents a86b2b5e0111
children 11054d720c21
comparison
equal deleted inserted replaced
411:12e008d41344 412:697ef44129c6
51 #endif /* 0 */ 51 #endif /* 0 */
52 52
53 /* Unconditionally use unsigned char * for this. */ 53 /* Unconditionally use unsigned char * for this. */
54 typedef unsigned char *POINTER; 54 typedef unsigned char *POINTER;
55 55
56 typedef unsigned long SIZE;
57
56 #ifdef DOUG_LEA_MALLOC 58 #ifdef DOUG_LEA_MALLOC
57 #define M_TOP_PAD -2 59 #define M_TOP_PAD -2
58 #include <malloc.h> 60 #include <malloc.h>
59 #endif 61 #endif
60 62
65 67
66 #else /* Not emacs. */ 68 #else /* Not emacs. */
67 69
68 #include <stddef.h> 70 #include <stddef.h>
69 71
72 typedef size_t SIZE;
70 typedef void *POINTER; 73 typedef void *POINTER;
71 74
72 #include <unistd.h> 75 #include <unistd.h>
73 #include <malloc.h> 76 #include <malloc.h>
74 #include <string.h> 77 #include <string.h>
75 78
76 #endif /* emacs. */ 79 #endif /* emacs. */
77 80
78 void init_ralloc (void); 81 void init_ralloc (void);
82 #define safe_bcopy(x, y, z) memmove (y, x, z)
79 83
80 #define NIL ((POINTER) 0) 84 #define NIL ((POINTER) 0)
81 85
82 86
83 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) 87 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
92 96
93 97
94 /* Declarations for working with the malloc, ralloc, and system breaks. */ 98 /* Declarations for working with the malloc, ralloc, and system breaks. */
95 99
96 /* Function to set the real break value. */ 100 /* Function to set the real break value. */
97 static POINTER (*real_morecore) (ptrdiff_t size); 101 static POINTER (*real_morecore) (long size);
98 102
99 /* The break value, as seen by malloc (). */ 103 /* The break value, as seen by malloc (). */
100 static POINTER virtual_break_value; 104 static POINTER virtual_break_value;
101 105
102 /* The break value, viewed by the relocatable blocs. */ 106 /* The break value, viewed by the relocatable blocs. */
179 { 183 {
180 struct bp *next; 184 struct bp *next;
181 struct bp *prev; 185 struct bp *prev;
182 POINTER *variable; 186 POINTER *variable;
183 POINTER data; 187 POINTER data;
184 size_t size; 188 SIZE size;
185 POINTER new_data; /* temporarily used for relocation */ 189 POINTER new_data; /* temporarily used for relocation */
186 struct heap *heap; /* Heap this bloc is in. */ 190 struct heap *heap; /* Heap this bloc is in. */
187 } *bloc_ptr; 191 } *bloc_ptr;
188 192
189 #define NIL_BLOC ((bloc_ptr) 0) 193 #define NIL_BLOC ((bloc_ptr) 0)
239 243
240 Return the address of the space if all went well, or zero if we couldn't 244 Return the address of the space if all went well, or zero if we couldn't
241 allocate the memory. */ 245 allocate the memory. */
242 246
243 static POINTER 247 static POINTER
244 obtain (POINTER address, size_t size) 248 obtain (POINTER address, SIZE size)
245 { 249 {
246 heap_ptr heap; 250 heap_ptr heap;
247 size_t already_available; 251 SIZE already_available;
248 252
249 /* Find the heap that ADDRESS falls within. */ 253 /* Find the heap that ADDRESS falls within. */
250 for (heap = last_heap; heap; heap = heap->prev) 254 for (heap = last_heap; heap; heap = heap->prev)
251 { 255 {
252 if (heap->start <= address && address <= heap->end) 256 if (heap->start <= address && address <= heap->end)
269 /* If we can't fit them within any existing heap, 273 /* If we can't fit them within any existing heap,
270 get more space. */ 274 get more space. */
271 if (heap == NIL_HEAP) 275 if (heap == NIL_HEAP)
272 { 276 {
273 POINTER new = (*real_morecore)(0); 277 POINTER new = (*real_morecore)(0);
274 size_t get; 278 SIZE get;
275 279
276 already_available = (char *)last_heap->end - (char *)address; 280 already_available = (char *)last_heap->end - (char *)address;
277 281
278 if (new != last_heap->end) 282 if (new != last_heap->end)
279 { 283 {
319 #if 0 323 #if 0
320 /* Obtain SIZE bytes of space and return a pointer to the new area. 324 /* Obtain SIZE bytes of space and return a pointer to the new area.
321 If we could not allocate the space, return zero. */ 325 If we could not allocate the space, return zero. */
322 326
323 static POINTER 327 static POINTER
324 get_more_space (size_t size) 328 get_more_space (SIZE size)
325 { 329 {
326 POINTER ptr = break_value; 330 POINTER ptr = break_value;
327 if (obtain (size)) 331 if (obtain (size))
328 return ptr; 332 return ptr;
329 else 333 else
333 337
334 /* Note that SIZE bytes of space have been relinquished by the process. 338 /* Note that SIZE bytes of space have been relinquished by the process.
335 If SIZE is more than a page, return the space to the system. */ 339 If SIZE is more than a page, return the space to the system. */
336 340
337 static void 341 static void
338 relinquish (void) 342 relinquish ()
339 { 343 {
340 register heap_ptr h; 344 register heap_ptr h;
341 int excess = 0; 345 int excess = 0;
342 346
343 /* Add the amount of space beyond break_value 347 /* Add the amount of space beyond break_value
382 /* Return the total size in use by relocating allocator, 386 /* Return the total size in use by relocating allocator,
383 above where malloc gets space. */ 387 above where malloc gets space. */
384 388
385 long r_alloc_size_in_use (void); 389 long r_alloc_size_in_use (void);
386 long 390 long
387 r_alloc_size_in_use (void) 391 r_alloc_size_in_use ()
388 { 392 {
389 return break_value - virtual_break_value; 393 return break_value - virtual_break_value;
390 } 394 }
391 395
392 /* The meat - allocating, freeing, and relocating blocs. */ 396 /* The meat - allocating, freeing, and relocating blocs. */
414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. 418 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
415 Returns a pointer to the new bloc, or zero if we couldn't allocate 419 Returns a pointer to the new bloc, or zero if we couldn't allocate
416 memory for the new block. */ 420 memory for the new block. */
417 421
418 static bloc_ptr 422 static bloc_ptr
419 get_bloc (size_t size) 423 get_bloc (SIZE size)
420 { 424 {
421 register bloc_ptr new_bloc; 425 register bloc_ptr new_bloc;
422 register heap_ptr heap; 426 register heap_ptr heap;
423 427
424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) 428 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
495 /* If BLOC won't fit in any heap, 499 /* If BLOC won't fit in any heap,
496 get enough new space to hold BLOC and all following blocs. */ 500 get enough new space to hold BLOC and all following blocs. */
497 if (heap == NIL_HEAP) 501 if (heap == NIL_HEAP)
498 { 502 {
499 register bloc_ptr tb = b; 503 register bloc_ptr tb = b;
500 register size_t s = 0; 504 register SIZE s = 0;
501 505
502 /* Add up the size of all the following blocs. */ 506 /* Add up the size of all the following blocs. */
503 while (tb != NIL_BLOC) 507 while (tb != NIL_BLOC)
504 { 508 {
505 if (tb->variable) 509 if (tb->variable)
622 626
623 /* Resize BLOC to SIZE bytes. This relocates the blocs 627 /* Resize BLOC to SIZE bytes. This relocates the blocs
624 that come after BLOC in memory. */ 628 that come after BLOC in memory. */
625 629
626 static int 630 static int
627 resize_bloc (bloc_ptr bloc, size_t size) 631 resize_bloc (bloc_ptr bloc, SIZE size)
628 { 632 {
629 register bloc_ptr b; 633 register bloc_ptr b;
630 heap_ptr heap; 634 heap_ptr heap;
631 POINTER address; 635 POINTER address;
632 size_t old_size; 636 SIZE old_size;
633 637
634 /* No need to ever call this if arena is frozen, bug somewhere! */ 638 /* No need to ever call this if arena is frozen, bug somewhere! */
635 if (r_alloc_freeze_level) 639 if (r_alloc_freeze_level)
636 abort(); 640 abort();
637 641
675 b->size = 0; 679 b->size = 0;
676 b->data = b->new_data; 680 b->data = b->new_data;
677 } 681 }
678 else 682 else
679 { 683 {
680 memmove (b->new_data, b->data, b->size); 684 safe_bcopy (b->data, b->new_data, b->size);
681 *b->variable = b->data = b->new_data; 685 *b->variable = b->data = b->new_data;
682 } 686 }
683 } 687 }
684 if (!bloc->variable) 688 if (!bloc->variable)
685 { 689 {
686 bloc->size = 0; 690 bloc->size = 0;
687 bloc->data = bloc->new_data; 691 bloc->data = bloc->new_data;
688 } 692 }
689 else 693 else
690 { 694 {
691 memmove (bloc->new_data, bloc->data, old_size); 695 safe_bcopy (bloc->data, bloc->new_data, old_size);
692 memset (bloc->new_data + old_size, 0, size - old_size); 696 memset (bloc->new_data + old_size, 0, size - old_size);
693 *bloc->variable = bloc->data = bloc->new_data; 697 *bloc->variable = bloc->data = bloc->new_data;
694 } 698 }
695 } 699 }
696 else 700 else
702 b->size = 0; 706 b->size = 0;
703 b->data = b->new_data; 707 b->data = b->new_data;
704 } 708 }
705 else 709 else
706 { 710 {
707 memmove (b->new_data, b->data, b->size); 711 safe_bcopy (b->data, b->new_data, b->size);
708 *b->variable = b->data = b->new_data; 712 *b->variable = b->data = b->new_data;
709 } 713 }
710 } 714 }
711 } 715 }
712 716
784 788
785 If we're out of memory, we should return zero, to imitate the other 789 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the 790 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */ 791 GNU malloc package. */
788 792
789 POINTER r_alloc_sbrk (ptrdiff_t size); 793 POINTER r_alloc_sbrk (long size);
790 POINTER 794 POINTER
791 r_alloc_sbrk (ptrdiff_t size) 795 r_alloc_sbrk (long size)
792 { 796 {
793 register bloc_ptr b; 797 register bloc_ptr b;
794 POINTER address; 798 POINTER address;
795 799
796 if (! r_alloc_initialized) 800 if (! r_alloc_initialized)
807 /* Allocate a page-aligned space. GNU malloc would reclaim an 811 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could 812 extra space if we passed an unaligned one. But we could
809 not always find a space which is contiguous to the previous. */ 813 not always find a space which is contiguous to the previous. */
810 POINTER new_bloc_start; 814 POINTER new_bloc_start;
811 heap_ptr h = first_heap; 815 heap_ptr h = first_heap;
812 size_t get = ROUNDUP (size); 816 SIZE get = ROUNDUP (size);
813 817
814 address = (POINTER) ROUNDUP (virtual_break_value); 818 address = (POINTER) ROUNDUP (virtual_break_value);
815 819
816 /* Search the list upward for a heap which is large enough. */ 820 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) 821 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
856 /* Note that (POINTER)(h+1) <= new_bloc_start since 860 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap 861 get >= page_size, so the following does not destroy the heap
858 header. */ 862 header. */
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev) 863 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
860 { 864 {
861 memmove (b->new_data, b->data, b->size); 865 safe_bcopy (b->data, b->new_data, b->size);
862 *b->variable = b->data = b->new_data; 866 *b->variable = b->data = b->new_data;
863 } 867 }
864 868
865 h->bloc_start = new_bloc_start; 869 h->bloc_start = new_bloc_start;
866 870
887 891
888 memset (address, 0, size); 892 memset (address, 0, size);
889 } 893 }
890 else /* size < 0 */ 894 else /* size < 0 */
891 { 895 {
892 size_t excess = (char *)first_heap->bloc_start 896 SIZE excess = (char *)first_heap->bloc_start
893 - ((char *)virtual_break_value + size); 897 - ((char *)virtual_break_value + size);
894 898
895 address = virtual_break_value; 899 address = virtual_break_value;
896 900
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) 901 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
902 906
903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); 907 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
904 908
905 for (b = first_bloc; b != NIL_BLOC; b = b->next) 909 for (b = first_bloc; b != NIL_BLOC; b = b->next)
906 { 910 {
907 memmove (b->new_data, b->data, b->size); 911 safe_bcopy (b->data, b->new_data, b->size);
908 *b->variable = b->data = b->new_data; 912 *b->variable = b->data = b->new_data;
909 } 913 }
910 } 914 }
911 915
912 if ((char *)virtual_break_value + size < (char *)first_heap->start) 916 if ((char *)virtual_break_value + size < (char *)first_heap->start)
935 before allocating a new area. Not yet done. 939 before allocating a new area. Not yet done.
936 940
937 If we can't allocate the necessary memory, set *PTR to zero, and 941 If we can't allocate the necessary memory, set *PTR to zero, and
938 return zero. */ 942 return zero. */
939 943
940 POINTER r_alloc (POINTER *ptr, size_t size); 944 POINTER r_alloc (POINTER *ptr, SIZE size);
941 POINTER 945 POINTER
942 r_alloc (POINTER *ptr, size_t size) 946 r_alloc (POINTER *ptr, SIZE size)
943 { 947 {
944 bloc_ptr new_bloc; 948 bloc_ptr new_bloc;
945 949
946 if (! r_alloc_initialized) 950 if (! r_alloc_initialized)
947 init_ralloc (); 951 init_ralloc ();
994 Change *PTR to reflect the new bloc, and return this value. 998 Change *PTR to reflect the new bloc, and return this value.
995 999
996 If more memory cannot be allocated, then leave *PTR unchanged, and 1000 If more memory cannot be allocated, then leave *PTR unchanged, and
997 return zero. */ 1001 return zero. */
998 1002
999 POINTER r_re_alloc (POINTER *ptr, size_t size); 1003 POINTER r_re_alloc (POINTER *ptr, SIZE size);
1000 POINTER 1004 POINTER
1001 r_re_alloc (POINTER *ptr, size_t size) 1005 r_re_alloc (POINTER *ptr, SIZE size)
1002 { 1006 {
1003 register bloc_ptr bloc; 1007 register bloc_ptr bloc;
1004 1008
1005 if (! r_alloc_initialized) 1009 if (! r_alloc_initialized)
1006 init_ralloc (); 1010 init_ralloc ();
1076 r_alloc_sbrk (-size); 1080 r_alloc_sbrk (-size);
1077 } 1081 }
1078 1082
1079 void r_alloc_thaw (void); 1083 void r_alloc_thaw (void);
1080 void 1084 void
1081 r_alloc_thaw (void) 1085 r_alloc_thaw ()
1082 { 1086 {
1083 1087
1084 if (! r_alloc_initialized) 1088 if (! r_alloc_initialized)
1085 init_ralloc (); 1089 init_ralloc ();
1086 1090
1087 if (--r_alloc_freeze_level < 0) 1091 if (--r_alloc_freeze_level < 0)
1088 abort (); 1092 abort ();
1089 1093
1090 /* This frees all unused blocs. It is not too inefficient, as the resize 1094 /* This frees all unused blocs. It is not too inefficient, as the resize
1091 and memmove is done only once. Afterwards, all unreferenced blocs are 1095 and bcopy is done only once. Afterwards, all unreferenced blocs are
1092 already shrunk to zero size. */ 1096 already shrunk to zero size. */
1093 if (!r_alloc_freeze_level) 1097 if (!r_alloc_freeze_level)
1094 { 1098 {
1095 bloc_ptr *b = &first_bloc; 1099 bloc_ptr *b = &first_bloc;
1096 while (*b) 1100 while (*b)
1103 1107
1104 1108
1105 /* The hook `malloc' uses for the function which gets more space 1109 /* The hook `malloc' uses for the function which gets more space
1106 from the system. */ 1110 from the system. */
1107 #ifndef DOUG_LEA_MALLOC 1111 #ifndef DOUG_LEA_MALLOC
1108 extern POINTER (*__morecore) (ptrdiff_t size); 1112 extern POINTER (*__morecore) (long size);
1109 #endif 1113 #endif
1110 1114
1111 /* Initialize various things for memory allocation. */ 1115 /* Initialize various things for memory allocation. */
1116
1117 #define SET_FUN_PTR(fun_ptr, fun_val) \
1118 (*((void **) (&fun_ptr)) = ((void *) (fun_val)))
1112 1119
1113 void 1120 void
1114 init_ralloc (void) 1121 init_ralloc (void)
1115 { 1122 {
1116 if (r_alloc_initialized) 1123 if (r_alloc_initialized)
1117 return; 1124 return;
1118 1125
1119 r_alloc_initialized = 1; 1126 r_alloc_initialized = 1;
1120 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; 1127 SET_FUN_PTR (real_morecore, __morecore);
1121 __morecore = 1128 SET_FUN_PTR (__morecore, r_alloc_sbrk);
1122 #ifdef __GNUC__
1123 (__typeof__ (__morecore))
1124 #endif
1125 r_alloc_sbrk;
1126 1129
1127 first_heap = last_heap = &heap_base; 1130 first_heap = last_heap = &heap_base;
1128 first_heap->next = first_heap->prev = NIL_HEAP; 1131 first_heap->next = first_heap->prev = NIL_HEAP;
1129 first_heap->start = first_heap->bloc_start 1132 first_heap->start = first_heap->bloc_start
1130 = virtual_break_value = break_value = (*real_morecore) (0); 1133 = virtual_break_value = break_value = (*real_morecore) (0);
1167 1170
1168 /* Reinitialize the morecore hook variables after restarting a dumped 1171 /* Reinitialize the morecore hook variables after restarting a dumped
1169 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ 1172 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1170 void r_alloc_reinit (void); 1173 void r_alloc_reinit (void);
1171 void 1174 void
1172 r_alloc_reinit (void) 1175 r_alloc_reinit ()
1173 { 1176 {
1174 /* Only do this if the hook has been reset, so that we don't get an 1177 /* Only do this if the hook has been reset, so that we don't get an
1175 infinite loop, in case Emacs was linked statically. */ 1178 infinite loop, in case Emacs was linked statically. */
1176 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) 1179 if ( ((void*) __morecore) != (void *) (r_alloc_sbrk))
1177 { 1180 {
1178 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; 1181 SET_FUN_PTR (real_morecore, __morecore);
1179 __morecore = 1182 SET_FUN_PTR (__morecore, r_alloc_sbrk);
1180 #ifdef __GNUC__
1181 (__typeof__ (__morecore))
1182 #endif
1183 r_alloc_sbrk;
1184 } 1183 }
1185 } 1184 }
1186 #if 0 1185 #if 0
1187 #ifdef DEBUG 1186 #ifdef DEBUG
1188 1187
1189 void 1188 void
1190 r_alloc_check (void) 1189 r_alloc_check ()
1191 { 1190 {
1192 int found = 0; 1191 int found = 0;
1193 heap_ptr h, ph = 0; 1192 heap_ptr h, ph = 0;
1194 bloc_ptr b, pb = 0; 1193 bloc_ptr b, pb = 0;
1195 1194
1231 1230
1232 for (b = first_bloc; b; b = b->next) 1231 for (b = first_bloc; b; b = b->next)
1233 { 1232 {
1234 assert (b->prev == pb); 1233 assert (b->prev == pb);
1235 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); 1234 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1236 assert ((size_t) MEM_ROUNDUP (b->size) == b->size); 1235 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1237 1236
1238 ph = 0; 1237 ph = 0;
1239 for (h = first_heap; h; h = h->next) 1238 for (h = first_heap; h; h = h->next)
1240 { 1239 {
1241 if (h->bloc_start <= b->data && b->data + b->size <= h->end) 1240 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1317 #include <fcntl.h> 1316 #include <fcntl.h>
1318 #include <sys/mman.h> 1317 #include <sys/mman.h>
1319 #include <stdio.h> 1318 #include <stdio.h>
1320 1319
1321 typedef void *VM_ADDR; /* VM addresses */ 1320 typedef void *VM_ADDR; /* VM addresses */
1322 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ 1321 static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
1323 1322
1324 /* Configuration for relocating allocator. */ 1323 /* Configuration for relocating allocator. */
1325 1324
1326 /* #define MMAP_GENERATE_ADDRESSES */ 1325 /* #define MMAP_GENERATE_ADDRESSES */
1327 /* Define this if you want Emacs to manage the address table. 1326 /* Define this if you want Emacs to manage the address table.
1691 1690
1692 /* Initialization procedure for address picking scheme */ 1691 /* Initialization procedure for address picking scheme */
1693 static void Addr_Block_initialize(void); 1692 static void Addr_Block_initialize(void);
1694 1693
1695 /* Get a suitable VM_ADDR via mmap */ 1694 /* Get a suitable VM_ADDR via mmap */
1696 static VM_ADDR New_Addr_Block (size_t sz); 1695 static VM_ADDR New_Addr_Block( SIZE sz );
1697 1696
1698 /* Free a VM_ADDR allocated via New_Addr_Block */ 1697 /* Free a VM_ADDR allocated via New_Addr_Block */
1699 static void Free_Addr_Block (VM_ADDR addr, size_t sz); 1698 static void Free_Addr_Block( VM_ADDR addr, SIZE sz );
1700 1699
1701 #ifdef MMAP_GENERATE_ADDRESSES 1700 #ifdef MMAP_GENERATE_ADDRESSES
1702 /* Implementation of the three calls for address picking when XEmacs is incharge */ 1701 /* Implementation of the three calls for address picking when XEmacs is incharge */
1703 1702
1704 /* The enum denotes the status of the following block. */ 1703 /* The enum denotes the status of the following block. */
1705 typedef enum { empty = 0, occupied, unavailable } addr_status; 1704 typedef enum { empty = 0, occupied, unavailable } addr_status;
1706 1705
1707 typedef struct addr_chain 1706 typedef struct addr_chain
1708 { 1707 {
1709 POINTER addr; 1708 POINTER addr;
1710 size_t sz; 1709 SIZE sz;
1711 addr_status flag; 1710 addr_status flag;
1712 struct addr_chain *next; 1711 struct addr_chain *next;
1713 } ADDRESS_BLOCK, *ADDRESS_CHAIN; 1712 } ADDRESS_BLOCK, *ADDRESS_CHAIN;
1714 /* NB: empty and unavailable blocks are concatenated. */ 1713 /* NB: empty and unavailable blocks are concatenated. */
1715 1714
1717 /* Start off the address block chain with a humongous address block 1716 /* Start off the address block chain with a humongous address block
1718 which is empty to start with. Note that addr_chain is invariant 1717 which is empty to start with. Note that addr_chain is invariant
1719 WRT the addition/deletion of address blocks because of the assert 1718 WRT the addition/deletion of address blocks because of the assert
1720 in Coalesce() and the strict ordering of blocks by their address 1719 in Coalesce() and the strict ordering of blocks by their address
1721 */ 1720 */
1722 static void 1721 static void Addr_Block_initialize()
1723 Addr_Block_initialize (void)
1724 { 1722 {
1725 MEMMETER( MVAL( M_Addrlist_Size )++) 1723 MEMMETER( MVAL( M_Addrlist_Size )++)
1726 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); 1724 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
1727 addr_chain->next = 0; /* Last block in chain */ 1725 addr_chain->next = 0; /* Last block in chain */
1728 addr_chain->sz = 0x0c000000; /* Size */ 1726 addr_chain->sz = 0x0c000000; /* Size */
1729 addr_chain->addr = (POINTER) (0x04000000); 1727 addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS);
1730 addr_chain->flag = empty; 1728 addr_chain->flag = empty;
1731 } 1729 }
1732 1730
1733 /* Coalesce address blocks if they are contiguous. Only empty and 1731 /* Coalesce address blocks if they are contiguous. Only empty and
1734 unavailable slots are coalesced. */ 1732 unavailable slots are coalesced. */
1735 static void 1733 static void Coalesce_Addr_Blocks()
1736 Coalesce_Addr_Blocks (void)
1737 { 1734 {
1738 ADDRESS_CHAIN p; 1735 ADDRESS_CHAIN p;
1739 for (p = addr_chain; p; p = p->next) 1736 for (p = addr_chain; p; p = p->next)
1740 { 1737 {
1741 while (p->next && p->flag == p->next->flag) 1738 while (p->next && p->flag == p->next->flag)
1757 } 1754 }
1758 } /* for all p */ 1755 } /* for all p */
1759 } 1756 }
1760 1757
1761 /* Get an empty address block of specified size. */ 1758 /* Get an empty address block of specified size. */
1762 static VM_ADDR 1759 static VM_ADDR New_Addr_Block( SIZE sz )
1763 New_Addr_Block (size_t sz)
1764 { 1760 {
1765 ADDRESS_CHAIN p = addr_chain; 1761 ADDRESS_CHAIN p = addr_chain;
1766 VM_ADDR new_addr = VM_FAILURE_ADDR; 1762 VM_ADDR new_addr = VM_FAILURE_ADDR;
1767 for (; p; p = p->next) 1763 for (; p; p = p->next)
1768 { 1764 {
1795 return new_addr; 1791 return new_addr;
1796 } 1792 }
1797 1793
1798 /* Free an address block. We mark the block as being empty, and attempt to 1794 /* Free an address block. We mark the block as being empty, and attempt to
1799 do any coalescing that may have resulted from this. */ 1795 do any coalescing that may have resulted from this. */
1800 static void 1796 static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
1801 Free_Addr_Block (VM_ADDR addr, size_t sz)
1802 { 1797 {
1803 ADDRESS_CHAIN p = addr_chain; 1798 ADDRESS_CHAIN p = addr_chain;
1804 for (; p; p = p->next ) 1799 for (; p; p = p->next )
1805 { 1800 {
1806 if (p->addr == addr) 1801 if (p->addr == addr)
1817 } 1812 }
1818 #else /* !MMAP_GENERATE_ADDRESSES */ 1813 #else /* !MMAP_GENERATE_ADDRESSES */
1819 /* This is an alternate (simpler) implementation in cases where the 1814 /* This is an alternate (simpler) implementation in cases where the
1820 address is picked by the kernel. */ 1815 address is picked by the kernel. */
1821 1816
1822 static void 1817 static void Addr_Block_initialize(void)
1823 Addr_Block_initialize (void)
1824 { 1818 {
1825 /* Nothing. */ 1819 /* Nothing. */
1826 } 1820 }
1827 1821
1828 static VM_ADDR 1822 static VM_ADDR New_Addr_Block( SIZE sz )
1829 New_Addr_Block (size_t sz)
1830 { 1823 {
1831 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, 1824 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
1832 DEV_ZERO_FD, 0 ); 1825 DEV_ZERO_FD, 0 );
1833 } 1826 }
1834 1827
1835 static void 1828 static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
1836 Free_Addr_Block (VM_ADDR addr, size_t sz)
1837 { 1829 {
1838 munmap ((caddr_t) addr, sz ); 1830 munmap ((caddr_t) addr, sz );
1839 } 1831 }
1840 1832
1841 #endif /* MMAP_GENERATE_ADDRESSES */ 1833 #endif /* MMAP_GENERATE_ADDRESSES */
1842 1834
1843 1835
1844 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ 1836 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
1845 1837
1846 /* 1838 /*
1847 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start 1839 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start
1848 address aliased to the first parameter. 1840 address aliased to the first parameter.
1849 */ 1841 */
1850 1842
1851 POINTER r_alloc (POINTER *ptr, size_t size); 1843 POINTER r_alloc (POINTER *ptr, SIZE size);
1852 POINTER 1844 POINTER
1853 r_alloc (POINTER *ptr, size_t size) 1845 r_alloc (POINTER *ptr, SIZE size)
1854 { 1846 {
1855 MMAP_HANDLE mh; 1847 MMAP_HANDLE mh;
1856 1848
1857 switch(r_alloc_initialized) 1849 switch(r_alloc_initialized)
1858 { 1850 {
1863 break; 1855 break;
1864 default: 1856 default:
1865 mh = new_mmap_handle( size ); 1857 mh = new_mmap_handle( size );
1866 if (mh) 1858 if (mh)
1867 { 1859 {
1868 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); 1860 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1869 size_t mmapped_size = ROUNDUP( size + hysteresis ); 1861 SIZE mmapped_size = ROUNDUP( size + hysteresis );
1870 MEMMETER( MVAL(M_Map)++ ) 1862 MEMMETER( MVAL(M_Map)++ )
1871 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) 1863 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
1872 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) 1864 MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
1873 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) 1865 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) )
1874 mh->vm_addr = New_Addr_Block( mmapped_size ); 1866 mh->vm_addr = New_Addr_Block( mmapped_size );
1932 Change *PTR to reflect the new bloc, and return this value. 1924 Change *PTR to reflect the new bloc, and return this value.
1933 1925
1934 If more memory cannot be allocated, then leave *PTR unchanged, and 1926 If more memory cannot be allocated, then leave *PTR unchanged, and
1935 return zero. */ 1927 return zero. */
1936 1928
1937 POINTER r_re_alloc (POINTER *ptr, size_t sz); 1929 POINTER r_re_alloc (POINTER *ptr, SIZE sz);
1938 POINTER 1930 POINTER
1939 r_re_alloc (POINTER *ptr, size_t sz) 1931 r_re_alloc (POINTER *ptr, SIZE sz)
1940 { 1932 {
1941 if (r_alloc_initialized == 0) 1933 if (r_alloc_initialized == 0)
1942 { 1934 {
1943 abort (); 1935 abort ();
1944 return 0; /* suppress compiler warning */ 1936 return 0; /* suppress compiler warning */
1950 *ptr = tmp; 1942 *ptr = tmp;
1951 return tmp; 1943 return tmp;
1952 } 1944 }
1953 else 1945 else
1954 { 1946 {
1955 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); 1947 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1956 size_t actual_sz = ROUNDUP( sz + hysteresis ); 1948 SIZE actual_sz = ROUNDUP( sz + hysteresis );
1957 MMAP_HANDLE h = find_mmap_handle( ptr ); 1949 MMAP_HANDLE h = find_mmap_handle( ptr );
1958 VM_ADDR new_vm_addr; 1950 VM_ADDR new_vm_addr;
1959 1951
1960 if ( h == 0 ) /* Was allocated using malloc. */ 1952 if ( h == 0 ) /* Was allocated using malloc. */
1961 { 1953 {