Mercurial > hg > xemacs-beta
comparison src/ralloc.c @ 398:74fd4e045ea6 r21-2-29
Import from CVS: tag r21-2-29
author | cvs |
---|---|
date | Mon, 13 Aug 2007 11:13:30 +0200 |
parents | 8626e4521993 |
children | a86b2b5e0111 |
comparison
equal
deleted
inserted
replaced
397:f4aeb21a5bad | 398:74fd4e045ea6 |
---|---|
51 #endif /* 0 */ | 51 #endif /* 0 */ |
52 | 52 |
53 /* Unconditionally use unsigned char * for this. */ | 53 /* Unconditionally use unsigned char * for this. */ |
54 typedef unsigned char *POINTER; | 54 typedef unsigned char *POINTER; |
55 | 55 |
56 typedef unsigned long SIZE; | |
57 | |
58 #ifdef DOUG_LEA_MALLOC | 56 #ifdef DOUG_LEA_MALLOC |
59 #define M_TOP_PAD -2 | 57 #define M_TOP_PAD -2 |
60 #include <malloc.h> | 58 #include <malloc.h> |
61 #endif | 59 #endif |
62 | 60 |
67 | 65 |
68 #else /* Not emacs. */ | 66 #else /* Not emacs. */ |
69 | 67 |
70 #include <stddef.h> | 68 #include <stddef.h> |
71 | 69 |
72 typedef size_t SIZE; | |
73 typedef void *POINTER; | 70 typedef void *POINTER; |
74 | 71 |
75 #include <unistd.h> | 72 #include <unistd.h> |
76 #include <malloc.h> | 73 #include <malloc.h> |
77 #include <string.h> | 74 #include <string.h> |
78 | 75 |
79 #endif /* emacs. */ | 76 #endif /* emacs. */ |
80 | 77 |
81 void init_ralloc (void); | 78 void init_ralloc (void); |
82 #define safe_bcopy(x, y, z) memmove (y, x, z) | |
83 | 79 |
84 #define NIL ((POINTER) 0) | 80 #define NIL ((POINTER) 0) |
85 | 81 |
86 | 82 |
87 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | 83 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) |
96 | 92 |
97 | 93 |
98 /* Declarations for working with the malloc, ralloc, and system breaks. */ | 94 /* Declarations for working with the malloc, ralloc, and system breaks. */ |
99 | 95 |
100 /* Function to set the real break value. */ | 96 /* Function to set the real break value. */ |
101 static POINTER (*real_morecore) (long size); | 97 static POINTER (*real_morecore) (ptrdiff_t size); |
102 | 98 |
103 /* The break value, as seen by malloc (). */ | 99 /* The break value, as seen by malloc (). */ |
104 static POINTER virtual_break_value; | 100 static POINTER virtual_break_value; |
105 | 101 |
106 /* The break value, viewed by the relocatable blocs. */ | 102 /* The break value, viewed by the relocatable blocs. */ |
183 { | 179 { |
184 struct bp *next; | 180 struct bp *next; |
185 struct bp *prev; | 181 struct bp *prev; |
186 POINTER *variable; | 182 POINTER *variable; |
187 POINTER data; | 183 POINTER data; |
188 SIZE size; | 184 size_t size; |
189 POINTER new_data; /* temporarily used for relocation */ | 185 POINTER new_data; /* temporarily used for relocation */ |
190 struct heap *heap; /* Heap this bloc is in. */ | 186 struct heap *heap; /* Heap this bloc is in. */ |
191 } *bloc_ptr; | 187 } *bloc_ptr; |
192 | 188 |
193 #define NIL_BLOC ((bloc_ptr) 0) | 189 #define NIL_BLOC ((bloc_ptr) 0) |
243 | 239 |
244 Return the address of the space if all went well, or zero if we couldn't | 240 Return the address of the space if all went well, or zero if we couldn't |
245 allocate the memory. */ | 241 allocate the memory. */ |
246 | 242 |
247 static POINTER | 243 static POINTER |
248 obtain (POINTER address, SIZE size) | 244 obtain (POINTER address, size_t size) |
249 { | 245 { |
250 heap_ptr heap; | 246 heap_ptr heap; |
251 SIZE already_available; | 247 size_t already_available; |
252 | 248 |
253 /* Find the heap that ADDRESS falls within. */ | 249 /* Find the heap that ADDRESS falls within. */ |
254 for (heap = last_heap; heap; heap = heap->prev) | 250 for (heap = last_heap; heap; heap = heap->prev) |
255 { | 251 { |
256 if (heap->start <= address && address <= heap->end) | 252 if (heap->start <= address && address <= heap->end) |
273 /* If we can't fit them within any existing heap, | 269 /* If we can't fit them within any existing heap, |
274 get more space. */ | 270 get more space. */ |
275 if (heap == NIL_HEAP) | 271 if (heap == NIL_HEAP) |
276 { | 272 { |
277 POINTER new = (*real_morecore)(0); | 273 POINTER new = (*real_morecore)(0); |
278 SIZE get; | 274 size_t get; |
279 | 275 |
280 already_available = (char *)last_heap->end - (char *)address; | 276 already_available = (char *)last_heap->end - (char *)address; |
281 | 277 |
282 if (new != last_heap->end) | 278 if (new != last_heap->end) |
283 { | 279 { |
323 #if 0 | 319 #if 0 |
324 /* Obtain SIZE bytes of space and return a pointer to the new area. | 320 /* Obtain SIZE bytes of space and return a pointer to the new area. |
325 If we could not allocate the space, return zero. */ | 321 If we could not allocate the space, return zero. */ |
326 | 322 |
327 static POINTER | 323 static POINTER |
328 get_more_space (SIZE size) | 324 get_more_space (size_t size) |
329 { | 325 { |
330 POINTER ptr = break_value; | 326 POINTER ptr = break_value; |
331 if (obtain (size)) | 327 if (obtain (size)) |
332 return ptr; | 328 return ptr; |
333 else | 329 else |
337 | 333 |
338 /* Note that SIZE bytes of space have been relinquished by the process. | 334 /* Note that SIZE bytes of space have been relinquished by the process. |
339 If SIZE is more than a page, return the space to the system. */ | 335 If SIZE is more than a page, return the space to the system. */ |
340 | 336 |
341 static void | 337 static void |
342 relinquish () | 338 relinquish (void) |
343 { | 339 { |
344 register heap_ptr h; | 340 register heap_ptr h; |
345 int excess = 0; | 341 int excess = 0; |
346 | 342 |
347 /* Add the amount of space beyond break_value | 343 /* Add the amount of space beyond break_value |
386 /* Return the total size in use by relocating allocator, | 382 /* Return the total size in use by relocating allocator, |
387 above where malloc gets space. */ | 383 above where malloc gets space. */ |
388 | 384 |
389 long r_alloc_size_in_use (void); | 385 long r_alloc_size_in_use (void); |
390 long | 386 long |
391 r_alloc_size_in_use () | 387 r_alloc_size_in_use (void) |
392 { | 388 { |
393 return break_value - virtual_break_value; | 389 return break_value - virtual_break_value; |
394 } | 390 } |
395 | 391 |
396 /* The meat - allocating, freeing, and relocating blocs. */ | 392 /* The meat - allocating, freeing, and relocating blocs. */ |
418 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | 414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. |
419 Returns a pointer to the new bloc, or zero if we couldn't allocate | 415 Returns a pointer to the new bloc, or zero if we couldn't allocate |
420 memory for the new block. */ | 416 memory for the new block. */ |
421 | 417 |
422 static bloc_ptr | 418 static bloc_ptr |
423 get_bloc (SIZE size) | 419 get_bloc (size_t size) |
424 { | 420 { |
425 register bloc_ptr new_bloc; | 421 register bloc_ptr new_bloc; |
426 register heap_ptr heap; | 422 register heap_ptr heap; |
427 | 423 |
428 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | 424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) |
499 /* If BLOC won't fit in any heap, | 495 /* If BLOC won't fit in any heap, |
500 get enough new space to hold BLOC and all following blocs. */ | 496 get enough new space to hold BLOC and all following blocs. */ |
501 if (heap == NIL_HEAP) | 497 if (heap == NIL_HEAP) |
502 { | 498 { |
503 register bloc_ptr tb = b; | 499 register bloc_ptr tb = b; |
504 register SIZE s = 0; | 500 register size_t s = 0; |
505 | 501 |
506 /* Add up the size of all the following blocs. */ | 502 /* Add up the size of all the following blocs. */ |
507 while (tb != NIL_BLOC) | 503 while (tb != NIL_BLOC) |
508 { | 504 { |
509 if (tb->variable) | 505 if (tb->variable) |
626 | 622 |
627 /* Resize BLOC to SIZE bytes. This relocates the blocs | 623 /* Resize BLOC to SIZE bytes. This relocates the blocs |
628 that come after BLOC in memory. */ | 624 that come after BLOC in memory. */ |
629 | 625 |
630 static int | 626 static int |
631 resize_bloc (bloc_ptr bloc, SIZE size) | 627 resize_bloc (bloc_ptr bloc, size_t size) |
632 { | 628 { |
633 register bloc_ptr b; | 629 register bloc_ptr b; |
634 heap_ptr heap; | 630 heap_ptr heap; |
635 POINTER address; | 631 POINTER address; |
636 SIZE old_size; | 632 size_t old_size; |
637 | 633 |
638 /* No need to ever call this if arena is frozen, bug somewhere! */ | 634 /* No need to ever call this if arena is frozen, bug somewhere! */ |
639 if (r_alloc_freeze_level) | 635 if (r_alloc_freeze_level) |
640 abort(); | 636 abort(); |
641 | 637 |
679 b->size = 0; | 675 b->size = 0; |
680 b->data = b->new_data; | 676 b->data = b->new_data; |
681 } | 677 } |
682 else | 678 else |
683 { | 679 { |
684 safe_bcopy (b->data, b->new_data, b->size); | 680 memmove (b->new_data, b->data, b->size); |
685 *b->variable = b->data = b->new_data; | 681 *b->variable = b->data = b->new_data; |
686 } | 682 } |
687 } | 683 } |
688 if (!bloc->variable) | 684 if (!bloc->variable) |
689 { | 685 { |
690 bloc->size = 0; | 686 bloc->size = 0; |
691 bloc->data = bloc->new_data; | 687 bloc->data = bloc->new_data; |
692 } | 688 } |
693 else | 689 else |
694 { | 690 { |
695 safe_bcopy (bloc->data, bloc->new_data, old_size); | 691 memmove (bloc->new_data, bloc->data, old_size); |
696 memset (bloc->new_data + old_size, 0, size - old_size); | 692 memset (bloc->new_data + old_size, 0, size - old_size); |
697 *bloc->variable = bloc->data = bloc->new_data; | 693 *bloc->variable = bloc->data = bloc->new_data; |
698 } | 694 } |
699 } | 695 } |
700 else | 696 else |
706 b->size = 0; | 702 b->size = 0; |
707 b->data = b->new_data; | 703 b->data = b->new_data; |
708 } | 704 } |
709 else | 705 else |
710 { | 706 { |
711 safe_bcopy (b->data, b->new_data, b->size); | 707 memmove (b->new_data, b->data, b->size); |
712 *b->variable = b->data = b->new_data; | 708 *b->variable = b->data = b->new_data; |
713 } | 709 } |
714 } | 710 } |
715 } | 711 } |
716 | 712 |
788 | 784 |
789 If we're out of memory, we should return zero, to imitate the other | 785 If we're out of memory, we should return zero, to imitate the other |
790 __morecore hook values - in particular, __default_morecore in the | 786 __morecore hook values - in particular, __default_morecore in the |
791 GNU malloc package. */ | 787 GNU malloc package. */ |
792 | 788 |
793 POINTER r_alloc_sbrk (long size); | 789 POINTER r_alloc_sbrk (ptrdiff_t size); |
794 POINTER | 790 POINTER |
795 r_alloc_sbrk (long size) | 791 r_alloc_sbrk (ptrdiff_t size) |
796 { | 792 { |
797 register bloc_ptr b; | 793 register bloc_ptr b; |
798 POINTER address; | 794 POINTER address; |
799 | 795 |
800 if (! r_alloc_initialized) | 796 if (! r_alloc_initialized) |
811 /* Allocate a page-aligned space. GNU malloc would reclaim an | 807 /* Allocate a page-aligned space. GNU malloc would reclaim an |
812 extra space if we passed an unaligned one. But we could | 808 extra space if we passed an unaligned one. But we could |
813 not always find a space which is contiguous to the previous. */ | 809 not always find a space which is contiguous to the previous. */ |
814 POINTER new_bloc_start; | 810 POINTER new_bloc_start; |
815 heap_ptr h = first_heap; | 811 heap_ptr h = first_heap; |
816 SIZE get = ROUNDUP (size); | 812 size_t get = ROUNDUP (size); |
817 | 813 |
818 address = (POINTER) ROUNDUP (virtual_break_value); | 814 address = (POINTER) ROUNDUP (virtual_break_value); |
819 | 815 |
820 /* Search the list upward for a heap which is large enough. */ | 816 /* Search the list upward for a heap which is large enough. */ |
821 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | 817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) |
860 /* Note that (POINTER)(h+1) <= new_bloc_start since | 856 /* Note that (POINTER)(h+1) <= new_bloc_start since |
861 get >= page_size, so the following does not destroy the heap | 857 get >= page_size, so the following does not destroy the heap |
862 header. */ | 858 header. */ |
863 for (b = last_bloc; b != NIL_BLOC; b = b->prev) | 859 for (b = last_bloc; b != NIL_BLOC; b = b->prev) |
864 { | 860 { |
865 safe_bcopy (b->data, b->new_data, b->size); | 861 memmove (b->new_data, b->data, b->size); |
866 *b->variable = b->data = b->new_data; | 862 *b->variable = b->data = b->new_data; |
867 } | 863 } |
868 | 864 |
869 h->bloc_start = new_bloc_start; | 865 h->bloc_start = new_bloc_start; |
870 | 866 |
891 | 887 |
892 memset (address, 0, size); | 888 memset (address, 0, size); |
893 } | 889 } |
894 else /* size < 0 */ | 890 else /* size < 0 */ |
895 { | 891 { |
896 SIZE excess = (char *)first_heap->bloc_start | 892 size_t excess = (char *)first_heap->bloc_start |
897 - ((char *)virtual_break_value + size); | 893 - ((char *)virtual_break_value + size); |
898 | 894 |
899 address = virtual_break_value; | 895 address = virtual_break_value; |
900 | 896 |
901 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | 897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) |
906 | 902 |
907 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); | 903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); |
908 | 904 |
909 for (b = first_bloc; b != NIL_BLOC; b = b->next) | 905 for (b = first_bloc; b != NIL_BLOC; b = b->next) |
910 { | 906 { |
911 safe_bcopy (b->data, b->new_data, b->size); | 907 memmove (b->new_data, b->data, b->size); |
912 *b->variable = b->data = b->new_data; | 908 *b->variable = b->data = b->new_data; |
913 } | 909 } |
914 } | 910 } |
915 | 911 |
916 if ((char *)virtual_break_value + size < (char *)first_heap->start) | 912 if ((char *)virtual_break_value + size < (char *)first_heap->start) |
939 before allocating a new area. Not yet done. | 935 before allocating a new area. Not yet done. |
940 | 936 |
941 If we can't allocate the necessary memory, set *PTR to zero, and | 937 If we can't allocate the necessary memory, set *PTR to zero, and |
942 return zero. */ | 938 return zero. */ |
943 | 939 |
944 POINTER r_alloc (POINTER *ptr, SIZE size); | 940 POINTER r_alloc (POINTER *ptr, size_t size); |
945 POINTER | 941 POINTER |
946 r_alloc (POINTER *ptr, SIZE size) | 942 r_alloc (POINTER *ptr, size_t size) |
947 { | 943 { |
948 bloc_ptr new_bloc; | 944 bloc_ptr new_bloc; |
949 | 945 |
950 if (! r_alloc_initialized) | 946 if (! r_alloc_initialized) |
951 init_ralloc (); | 947 init_ralloc (); |
998 Change *PTR to reflect the new bloc, and return this value. | 994 Change *PTR to reflect the new bloc, and return this value. |
999 | 995 |
1000 If more memory cannot be allocated, then leave *PTR unchanged, and | 996 If more memory cannot be allocated, then leave *PTR unchanged, and |
1001 return zero. */ | 997 return zero. */ |
1002 | 998 |
1003 POINTER r_re_alloc (POINTER *ptr, SIZE size); | 999 POINTER r_re_alloc (POINTER *ptr, size_t size); |
1004 POINTER | 1000 POINTER |
1005 r_re_alloc (POINTER *ptr, SIZE size) | 1001 r_re_alloc (POINTER *ptr, size_t size) |
1006 { | 1002 { |
1007 register bloc_ptr bloc; | 1003 register bloc_ptr bloc; |
1008 | 1004 |
1009 if (! r_alloc_initialized) | 1005 if (! r_alloc_initialized) |
1010 init_ralloc (); | 1006 init_ralloc (); |
1080 r_alloc_sbrk (-size); | 1076 r_alloc_sbrk (-size); |
1081 } | 1077 } |
1082 | 1078 |
1083 void r_alloc_thaw (void); | 1079 void r_alloc_thaw (void); |
1084 void | 1080 void |
1085 r_alloc_thaw () | 1081 r_alloc_thaw (void) |
1086 { | 1082 { |
1087 | 1083 |
1088 if (! r_alloc_initialized) | 1084 if (! r_alloc_initialized) |
1089 init_ralloc (); | 1085 init_ralloc (); |
1090 | 1086 |
1091 if (--r_alloc_freeze_level < 0) | 1087 if (--r_alloc_freeze_level < 0) |
1092 abort (); | 1088 abort (); |
1093 | 1089 |
1094 /* This frees all unused blocs. It is not too inefficient, as the resize | 1090 /* This frees all unused blocs. It is not too inefficient, as the resize |
1095 and bcopy is done only once. Afterwards, all unreferenced blocs are | 1091 and memmove is done only once. Afterwards, all unreferenced blocs are |
1096 already shrunk to zero size. */ | 1092 already shrunk to zero size. */ |
1097 if (!r_alloc_freeze_level) | 1093 if (!r_alloc_freeze_level) |
1098 { | 1094 { |
1099 bloc_ptr *b = &first_bloc; | 1095 bloc_ptr *b = &first_bloc; |
1100 while (*b) | 1096 while (*b) |
1107 | 1103 |
1108 | 1104 |
1109 /* The hook `malloc' uses for the function which gets more space | 1105 /* The hook `malloc' uses for the function which gets more space |
1110 from the system. */ | 1106 from the system. */ |
1111 #ifndef DOUG_LEA_MALLOC | 1107 #ifndef DOUG_LEA_MALLOC |
1112 extern POINTER (*__morecore) (long size); | 1108 extern POINTER (*__morecore) (ptrdiff_t size); |
1113 #endif | 1109 #endif |
1114 | 1110 |
1115 /* Initialize various things for memory allocation. */ | 1111 /* Initialize various things for memory allocation. */ |
1116 | |
1117 #define SET_FUN_PTR(fun_ptr, fun_val) \ | |
1118 (*((void **) (&fun_ptr)) = ((void *) (fun_val))) | |
1119 | 1112 |
1120 void | 1113 void |
1121 init_ralloc (void) | 1114 init_ralloc (void) |
1122 { | 1115 { |
1123 if (r_alloc_initialized) | 1116 if (r_alloc_initialized) |
1124 return; | 1117 return; |
1125 | 1118 |
1126 r_alloc_initialized = 1; | 1119 r_alloc_initialized = 1; |
1127 SET_FUN_PTR (real_morecore, __morecore); | 1120 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; |
1128 SET_FUN_PTR (__morecore, r_alloc_sbrk); | 1121 __morecore = |
1122 #ifdef __GNUC__ | |
1123 (__typeof__ (__morecore)) | |
1124 #endif | |
1125 r_alloc_sbrk; | |
1129 | 1126 |
1130 first_heap = last_heap = &heap_base; | 1127 first_heap = last_heap = &heap_base; |
1131 first_heap->next = first_heap->prev = NIL_HEAP; | 1128 first_heap->next = first_heap->prev = NIL_HEAP; |
1132 first_heap->start = first_heap->bloc_start | 1129 first_heap->start = first_heap->bloc_start |
1133 = virtual_break_value = break_value = (*real_morecore) (0); | 1130 = virtual_break_value = break_value = (*real_morecore) (0); |
1170 | 1167 |
1171 /* Reinitialize the morecore hook variables after restarting a dumped | 1168 /* Reinitialize the morecore hook variables after restarting a dumped |
1172 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | 1169 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ |
1173 void r_alloc_reinit (void); | 1170 void r_alloc_reinit (void); |
1174 void | 1171 void |
1175 r_alloc_reinit () | 1172 r_alloc_reinit (void) |
1176 { | 1173 { |
1177 /* Only do this if the hook has been reset, so that we don't get an | 1174 /* Only do this if the hook has been reset, so that we don't get an |
1178 infinite loop, in case Emacs was linked statically. */ | 1175 infinite loop, in case Emacs was linked statically. */ |
1179 if ( ((void*) __morecore) != (void *) (r_alloc_sbrk)) | 1176 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) |
1180 { | 1177 { |
1181 SET_FUN_PTR (real_morecore, __morecore); | 1178 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; |
1182 SET_FUN_PTR (__morecore, r_alloc_sbrk); | 1179 __morecore = |
1180 #ifdef __GNUC__ | |
1181 (__typeof__ (__morecore)) | |
1182 #endif | |
1183 r_alloc_sbrk; | |
1183 } | 1184 } |
1184 } | 1185 } |
1185 #if 0 | 1186 #if 0 |
1186 #ifdef DEBUG | 1187 #ifdef DEBUG |
1187 | 1188 |
1188 void | 1189 void |
1189 r_alloc_check () | 1190 r_alloc_check (void) |
1190 { | 1191 { |
1191 int found = 0; | 1192 int found = 0; |
1192 heap_ptr h, ph = 0; | 1193 heap_ptr h, ph = 0; |
1193 bloc_ptr b, pb = 0; | 1194 bloc_ptr b, pb = 0; |
1194 | 1195 |
1230 | 1231 |
1231 for (b = first_bloc; b; b = b->next) | 1232 for (b = first_bloc; b; b = b->next) |
1232 { | 1233 { |
1233 assert (b->prev == pb); | 1234 assert (b->prev == pb); |
1234 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | 1235 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); |
1235 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); | 1236 assert ((size_t) MEM_ROUNDUP (b->size) == b->size); |
1236 | 1237 |
1237 ph = 0; | 1238 ph = 0; |
1238 for (h = first_heap; h; h = h->next) | 1239 for (h = first_heap; h; h = h->next) |
1239 { | 1240 { |
1240 if (h->bloc_start <= b->data && b->data + b->size <= h->end) | 1241 if (h->bloc_start <= b->data && b->data + b->size <= h->end) |
1316 #include <fcntl.h> | 1317 #include <fcntl.h> |
1317 #include <sys/mman.h> | 1318 #include <sys/mman.h> |
1318 #include <stdio.h> | 1319 #include <stdio.h> |
1319 | 1320 |
1320 typedef void *VM_ADDR; /* VM addresses */ | 1321 typedef void *VM_ADDR; /* VM addresses */ |
1321 static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ | 1322 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ |
1322 | 1323 |
1323 /* Configuration for relocating allocator. */ | 1324 /* Configuration for relocating allocator. */ |
1324 | 1325 |
1325 /* #define MMAP_GENERATE_ADDRESSES */ | 1326 /* #define MMAP_GENERATE_ADDRESSES */ |
1326 /* Define this if you want Emacs to manage the address table. | 1327 /* Define this if you want Emacs to manage the address table. |
1690 | 1691 |
1691 /* Initialization procedure for address picking scheme */ | 1692 /* Initialization procedure for address picking scheme */ |
1692 static void Addr_Block_initialize(void); | 1693 static void Addr_Block_initialize(void); |
1693 | 1694 |
1694 /* Get a suitable VM_ADDR via mmap */ | 1695 /* Get a suitable VM_ADDR via mmap */ |
1695 static VM_ADDR New_Addr_Block( SIZE sz ); | 1696 static VM_ADDR New_Addr_Block (size_t sz); |
1696 | 1697 |
1697 /* Free a VM_ADDR allocated via New_Addr_Block */ | 1698 /* Free a VM_ADDR allocated via New_Addr_Block */ |
1698 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); | 1699 static void Free_Addr_Block (VM_ADDR addr, size_t sz); |
1699 | 1700 |
1700 #ifdef MMAP_GENERATE_ADDRESSES | 1701 #ifdef MMAP_GENERATE_ADDRESSES |
1701 /* Implementation of the three calls for address picking when XEmacs is incharge */ | 1702 /* Implementation of the three calls for address picking when XEmacs is incharge */ |
1702 | 1703 |
1703 /* The enum denotes the status of the following block. */ | 1704 /* The enum denotes the status of the following block. */ |
1704 typedef enum { empty = 0, occupied, unavailable } addr_status; | 1705 typedef enum { empty = 0, occupied, unavailable } addr_status; |
1705 | 1706 |
1706 typedef struct addr_chain | 1707 typedef struct addr_chain |
1707 { | 1708 { |
1708 POINTER addr; | 1709 POINTER addr; |
1709 SIZE sz; | 1710 size_t sz; |
1710 addr_status flag; | 1711 addr_status flag; |
1711 struct addr_chain *next; | 1712 struct addr_chain *next; |
1712 } ADDRESS_BLOCK, *ADDRESS_CHAIN; | 1713 } ADDRESS_BLOCK, *ADDRESS_CHAIN; |
1713 /* NB: empty and unavailable blocks are concatenated. */ | 1714 /* NB: empty and unavailable blocks are concatenated. */ |
1714 | 1715 |
1716 /* Start off the address block chain with a humongous address block | 1717 /* Start off the address block chain with a humongous address block |
1717 which is empty to start with. Note that addr_chain is invariant | 1718 which is empty to start with. Note that addr_chain is invariant |
1718 WRT the addition/deletion of address blocks because of the assert | 1719 WRT the addition/deletion of address blocks because of the assert |
1719 in Coalesce() and the strict ordering of blocks by their address | 1720 in Coalesce() and the strict ordering of blocks by their address |
1720 */ | 1721 */ |
1721 static void Addr_Block_initialize() | 1722 static void |
1723 Addr_Block_initialize (void) | |
1722 { | 1724 { |
1723 MEMMETER( MVAL( M_Addrlist_Size )++) | 1725 MEMMETER( MVAL( M_Addrlist_Size )++) |
1724 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | 1726 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); |
1725 addr_chain->next = 0; /* Last block in chain */ | 1727 addr_chain->next = 0; /* Last block in chain */ |
1726 addr_chain->sz = 0x0c000000; /* Size */ | 1728 addr_chain->sz = 0x0c000000; /* Size */ |
1728 addr_chain->flag = empty; | 1730 addr_chain->flag = empty; |
1729 } | 1731 } |
1730 | 1732 |
1731 /* Coalesce address blocks if they are contiguous. Only empty and | 1733 /* Coalesce address blocks if they are contiguous. Only empty and |
1732 unavailable slots are coalesced. */ | 1734 unavailable slots are coalesced. */ |
1733 static void Coalesce_Addr_Blocks() | 1735 static void |
1736 Coalesce_Addr_Blocks (void) | |
1734 { | 1737 { |
1735 ADDRESS_CHAIN p; | 1738 ADDRESS_CHAIN p; |
1736 for (p = addr_chain; p; p = p->next) | 1739 for (p = addr_chain; p; p = p->next) |
1737 { | 1740 { |
1738 while (p->next && p->flag == p->next->flag) | 1741 while (p->next && p->flag == p->next->flag) |
1754 } | 1757 } |
1755 } /* for all p */ | 1758 } /* for all p */ |
1756 } | 1759 } |
1757 | 1760 |
1758 /* Get an empty address block of specified size. */ | 1761 /* Get an empty address block of specified size. */ |
1759 static VM_ADDR New_Addr_Block( SIZE sz ) | 1762 static VM_ADDR |
1763 New_Addr_Block (size_t sz) | |
1760 { | 1764 { |
1761 ADDRESS_CHAIN p = addr_chain; | 1765 ADDRESS_CHAIN p = addr_chain; |
1762 VM_ADDR new_addr = VM_FAILURE_ADDR; | 1766 VM_ADDR new_addr = VM_FAILURE_ADDR; |
1763 for (; p; p = p->next) | 1767 for (; p; p = p->next) |
1764 { | 1768 { |
1791 return new_addr; | 1795 return new_addr; |
1792 } | 1796 } |
1793 | 1797 |
1794 /* Free an address block. We mark the block as being empty, and attempt to | 1798 /* Free an address block. We mark the block as being empty, and attempt to |
1795 do any coalescing that may have resulted from this. */ | 1799 do any coalescing that may have resulted from this. */ |
1796 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | 1800 static void |
1801 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
1797 { | 1802 { |
1798 ADDRESS_CHAIN p = addr_chain; | 1803 ADDRESS_CHAIN p = addr_chain; |
1799 for (; p; p = p->next ) | 1804 for (; p; p = p->next ) |
1800 { | 1805 { |
1801 if (p->addr == addr) | 1806 if (p->addr == addr) |
1812 } | 1817 } |
1813 #else /* !MMAP_GENERATE_ADDRESSES */ | 1818 #else /* !MMAP_GENERATE_ADDRESSES */ |
1814 /* This is an alternate (simpler) implementation in cases where the | 1819 /* This is an alternate (simpler) implementation in cases where the |
1815 address is picked by the kernel. */ | 1820 address is picked by the kernel. */ |
1816 | 1821 |
1817 static void Addr_Block_initialize(void) | 1822 static void |
1823 Addr_Block_initialize (void) | |
1818 { | 1824 { |
1819 /* Nothing. */ | 1825 /* Nothing. */ |
1820 } | 1826 } |
1821 | 1827 |
1822 static VM_ADDR New_Addr_Block( SIZE sz ) | 1828 static VM_ADDR |
1829 New_Addr_Block (size_t sz) | |
1823 { | 1830 { |
1824 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | 1831 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, |
1825 DEV_ZERO_FD, 0 ); | 1832 DEV_ZERO_FD, 0 ); |
1826 } | 1833 } |
1827 | 1834 |
1828 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | 1835 static void |
1836 Free_Addr_Block (VM_ADDR addr, size_t sz) | |
1829 { | 1837 { |
1830 munmap ((caddr_t) addr, sz ); | 1838 munmap ((caddr_t) addr, sz ); |
1831 } | 1839 } |
1832 | 1840 |
1833 #endif /* MMAP_GENERATE_ADDRESSES */ | 1841 #endif /* MMAP_GENERATE_ADDRESSES */ |
1834 | 1842 |
1835 | 1843 |
1836 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | 1844 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ |
1837 | 1845 |
1838 /* | 1846 /* |
1839 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start | 1847 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start |
1840 address aliased to the first parameter. | 1848 address aliased to the first parameter. |
1841 */ | 1849 */ |
1842 | 1850 |
1843 POINTER r_alloc (POINTER *ptr, SIZE size); | 1851 POINTER r_alloc (POINTER *ptr, size_t size); |
1844 POINTER | 1852 POINTER |
1845 r_alloc (POINTER *ptr, SIZE size) | 1853 r_alloc (POINTER *ptr, size_t size) |
1846 { | 1854 { |
1847 MMAP_HANDLE mh; | 1855 MMAP_HANDLE mh; |
1848 | 1856 |
1849 switch(r_alloc_initialized) | 1857 switch(r_alloc_initialized) |
1850 { | 1858 { |
1855 break; | 1863 break; |
1856 default: | 1864 default: |
1857 mh = new_mmap_handle( size ); | 1865 mh = new_mmap_handle( size ); |
1858 if (mh) | 1866 if (mh) |
1859 { | 1867 { |
1860 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | 1868 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1861 SIZE mmapped_size = ROUNDUP( size + hysteresis ); | 1869 size_t mmapped_size = ROUNDUP( size + hysteresis ); |
1862 MEMMETER( MVAL(M_Map)++ ) | 1870 MEMMETER( MVAL(M_Map)++ ) |
1863 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) | 1871 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) |
1864 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) | 1872 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) |
1865 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) | 1873 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) |
1866 mh->vm_addr = New_Addr_Block( mmapped_size ); | 1874 mh->vm_addr = New_Addr_Block( mmapped_size ); |
1924 Change *PTR to reflect the new bloc, and return this value. | 1932 Change *PTR to reflect the new bloc, and return this value. |
1925 | 1933 |
1926 If more memory cannot be allocated, then leave *PTR unchanged, and | 1934 If more memory cannot be allocated, then leave *PTR unchanged, and |
1927 return zero. */ | 1935 return zero. */ |
1928 | 1936 |
1929 POINTER r_re_alloc (POINTER *ptr, SIZE sz); | 1937 POINTER r_re_alloc (POINTER *ptr, size_t sz); |
1930 POINTER | 1938 POINTER |
1931 r_re_alloc (POINTER *ptr, SIZE sz) | 1939 r_re_alloc (POINTER *ptr, size_t sz) |
1932 { | 1940 { |
1933 if (r_alloc_initialized == 0) | 1941 if (r_alloc_initialized == 0) |
1934 { | 1942 { |
1935 abort (); | 1943 abort (); |
1936 return 0; /* suppress compiler warning */ | 1944 return 0; /* suppress compiler warning */ |
1942 *ptr = tmp; | 1950 *ptr = tmp; |
1943 return tmp; | 1951 return tmp; |
1944 } | 1952 } |
1945 else | 1953 else |
1946 { | 1954 { |
1947 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); | 1955 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); |
1948 SIZE actual_sz = ROUNDUP( sz + hysteresis ); | 1956 size_t actual_sz = ROUNDUP( sz + hysteresis ); |
1949 MMAP_HANDLE h = find_mmap_handle( ptr ); | 1957 MMAP_HANDLE h = find_mmap_handle( ptr ); |
1950 VM_ADDR new_vm_addr; | 1958 VM_ADDR new_vm_addr; |
1951 | 1959 |
1952 if ( h == 0 ) /* Was allocated using malloc. */ | 1960 if ( h == 0 ) /* Was allocated using malloc. */ |
1953 { | 1961 { |