Mercurial > hg > xemacs-beta
comparison src/ralloc.c @ 185:3d6bfa290dbd r20-3b19
Import from CVS: tag r20-3b19
author | cvs |
---|---|
date | Mon, 13 Aug 2007 09:55:28 +0200 |
parents | 0132846995bd |
children | 084402c475ba |
comparison
equal
deleted
inserted
replaced
184:bcd2674570bf | 185:3d6bfa290dbd |
---|---|
1 /* Block-relocating memory allocator. | 1 /* Block-relocating memory allocator. |
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | 2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc. |
3 | 3 |
4 This file is part of XEmacs. | 4 This file is part of XEmacs. |
5 | 5 |
6 XEmacs is free software; you can redistribute it and/or modify it | 6 XEmacs is free software; you can redistribute it and/or modify it |
95 static POINTER page_break_value; | 95 static POINTER page_break_value; |
96 | 96 |
97 /* This is the size of a page. We round memory requests to this boundary. */ | 97 /* This is the size of a page. We round memory requests to this boundary. */ |
98 static int page_size; | 98 static int page_size; |
99 | 99 |
100 /* Whenever we get memory from the system, get this many extra bytes. This | 100 /* Whenever we get memory from the system, get this many extra bytes. This |
101 must be a multiple of page_size. */ | 101 must be a multiple of page_size. */ |
102 static int extra_bytes; | 102 static int extra_bytes; |
103 | 103 |
104 /* Macros for rounding. Note that rounding to any value is possible | 104 /* Macros for rounding. Note that rounding to any value is possible |
105 by changing the definition of PAGE. */ | 105 by changing the definition of PAGE. */ |
162 int excess; | 162 int excess; |
163 | 163 |
164 break_value -= size; | 164 break_value -= size; |
165 new_page_break = (POINTER) ROUNDUP (break_value); | 165 new_page_break = (POINTER) ROUNDUP (break_value); |
166 excess = (char *) page_break_value - (char *) new_page_break; | 166 excess = (char *) page_break_value - (char *) new_page_break; |
167 | 167 |
168 if (excess > extra_bytes * 2) | 168 if (excess > extra_bytes * 2) |
169 { | 169 { |
170 /* Keep extra_bytes worth of empty space. | 170 /* Keep extra_bytes worth of empty space. |
171 And don't free anything unless we can free at least extra_bytes. */ | 171 And don't free anything unless we can free at least extra_bytes. */ |
172 if ((*real_morecore) (extra_bytes - excess) == 0) | 172 if ((*real_morecore) (extra_bytes - excess) == 0) |
271 if (bloc != NIL_BLOC) | 271 if (bloc != NIL_BLOC) |
272 { | 272 { |
273 SIZE offset = address - bloc->data; | 273 SIZE offset = address - bloc->data; |
274 SIZE data_size = 0; | 274 SIZE data_size = 0; |
275 bloc_ptr b; | 275 bloc_ptr b; |
276 | 276 |
277 for (b = bloc; b != NIL_BLOC; b = b->next) | 277 for (b = bloc; b != NIL_BLOC; b = b->next) |
278 { | 278 { |
279 data_size += b->size; | 279 data_size += b->size; |
280 b->data += offset; | 280 b->data += offset; |
281 *b->variable = b->data; | 281 *b->variable = b->data; |
329 | 329 |
330 If we're out of memory, we should return zero, to imitate the other | 330 If we're out of memory, we should return zero, to imitate the other |
331 __morecore hook values - in particular, __default_morecore in the | 331 __morecore hook values - in particular, __default_morecore in the |
332 GNU malloc package. */ | 332 GNU malloc package. */ |
333 | 333 |
334 POINTER | 334 POINTER |
335 r_alloc_sbrk (long size) | 335 r_alloc_sbrk (long size) |
336 { | 336 { |
337 /* This is the first address not currently available for the heap. */ | 337 /* This is the first address not currently available for the heap. */ |
338 POINTER top; | 338 POINTER top; |
339 /* Amount of empty space below that. */ | 339 /* Amount of empty space below that. */ |
508 virtual_break_value = break_value = page_break_value; | 508 virtual_break_value = break_value = page_break_value; |
509 use_relocatable_buffers = 1; | 509 use_relocatable_buffers = 1; |
510 } | 510 } |
511 #else /* HAVE_MMAP */ | 511 #else /* HAVE_MMAP */ |
512 | 512 |
513 /* | 513 /* |
514 A relocating allocator built using the mmap(2) facility available | 514 A relocating allocator built using the mmap(2) facility available |
515 in some OSes. Based on another version written by Paul Flinders, | 515 in some OSes. Based on another version written by Paul Flinders, |
516 from which code (and comments) are snarfed. | 516 from which code (and comments) are snarfed. |
517 | 517 |
518 The OS should support mmap() with MAP_ANONYMOUS attribute, or have | 518 The OS should support mmap() with MAP_ANONYMOUS attribute, or have |
521 Paul Flinders wrote a version which works well for systems that | 521 Paul Flinders wrote a version which works well for systems that |
522 allow callers to specify (virtual) addresses to mmap(). | 522 allow callers to specify (virtual) addresses to mmap(). |
523 Unfortunately, such a scheme doesn't work for certain systems like | 523 Unfortunately, such a scheme doesn't work for certain systems like |
524 HP-UX that have a system-wide virtual->real address map, and | 524 HP-UX that have a system-wide virtual->real address map, and |
525 consequently impose restrictions on the virtual address values | 525 consequently impose restrictions on the virtual address values |
526 permitted. | 526 permitted. |
527 | 527 |
528 NB: The mapping scheme in HP-UX is motivated by the inverted page | 528 NB: The mapping scheme in HP-UX is motivated by the inverted page |
529 table design in some HP processors. | 529 table design in some HP processors. |
530 | 530 |
531 This alternate implementation allows for the addresses to be | 531 This alternate implementation allows for the addresses to be |
718 if (prev == h || nex == h) /* In fact, this should be && */ | 718 if (prev == h || nex == h) /* In fact, this should be && */ |
719 { /* We're the singleton dll */ | 719 { /* We're the singleton dll */ |
720 UNDERLYING_FREE( h ); /* Free the sole item */ | 720 UNDERLYING_FREE( h ); /* Free the sole item */ |
721 mmap_start = 0; return; | 721 mmap_start = 0; return; |
722 } | 722 } |
723 else if (h == mmap_start) | 723 else if (h == mmap_start) |
724 { | 724 { |
725 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ | 725 mmap_start = nex; /* Make sure mmap_start isn't bogus. */ |
726 } | 726 } |
727 prev->right = nex; | 727 prev->right = nex; |
728 nex->left = prev; | 728 nex->left = prev; |
743 | 743 |
744 static void | 744 static void |
745 init_MHASH_table (void) | 745 init_MHASH_table (void) |
746 { | 746 { |
747 int i = 0; | 747 int i = 0; |
748 for (; i < MHASH_PRIME; i++) | 748 for (; i < MHASH_PRIME; i++) |
749 { | 749 { |
750 MHASH_HITS[i].n_hits = 0; | 750 MHASH_HITS[i].n_hits = 0; |
751 MHASH_HITS[i].addr = 0; | 751 MHASH_HITS[i].addr = 0; |
752 MHASH_HITS[i].handle = 0; | 752 MHASH_HITS[i].handle = 0; |
753 } | 753 } |
760 #if (LONGBITS == 64) | 760 #if (LONGBITS == 64) |
761 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | 761 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS; |
762 #else | 762 #else |
763 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; | 763 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS; |
764 #endif | 764 #endif |
765 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve | 765 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve |
766 when converted to signed ints */ | 766 when converted to signed ints */ |
767 return ((hval >= 0) ? hval : MHASH_PRIME + hval); | 767 return ((hval >= 0) ? hval : MHASH_PRIME + hval); |
768 } | 768 } |
769 | 769 |
770 /* Add a VM address with it's corresponding handle to the table. */ | 770 /* Add a VM address with it's corresponding handle to the table. */ |
832 static int meter[N_Meterables]; | 832 static int meter[N_Meterables]; |
833 | 833 |
834 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /* | 834 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /* |
835 Return some information about mmap-based allocator. | 835 Return some information about mmap-based allocator. |
836 | 836 |
837 mmap-addrlist-size: number of entries in address picking list. | 837 mmap-addrlist-size: number of entries in address picking list. |
838 mmap-times-mapped: number of times r_alloc was called. | 838 mmap-times-mapped: number of times r_alloc was called. |
839 mmap-pages-mapped: number of pages mapped by r_alloc calls only. | 839 mmap-pages-mapped: number of pages mapped by r_alloc calls only. |
840 mmap-times-unmapped: number of times r_free was called. | 840 mmap-times-unmapped: number of times r_free was called. |
841 mmap-times-remapped: number of times r_re_alloc was called. | 841 mmap-times-remapped: number of times r_re_alloc was called. |
842 mmap-didnt-copy: number of times re-alloc didn\'t have to move the block. | 842 mmap-didnt-copy: number of times re-alloc didn\'t have to move the block. |
843 mmap-pages-copied: total number of pages copied. | 843 mmap-pages-copied: total number of pages copied. |
844 mmap-average-bumpval: average increase in size demanded to re-alloc. | 844 mmap-average-bumpval: average increase in size demanded to re-alloc. |
845 mmap-wastage: total number of bytes allocated, but not currently in use. | 845 mmap-wastage: total number of bytes allocated, but not currently in use. |
866 return result; | 866 return result; |
867 } | 867 } |
868 | 868 |
869 #else /* !MMAP_METERING */ | 869 #else /* !MMAP_METERING */ |
870 | 870 |
871 #define MEMMETER(x) | 871 #define MEMMETER(x) |
872 #define MVAL(x) | 872 #define MVAL(x) |
873 | 873 |
874 #endif /* MMAP_METERING */ | 874 #endif /* MMAP_METERING */ |
875 | 875 |
876 static MMAP_HANDLE | 876 static MMAP_HANDLE |
883 case 0: | 883 case 0: |
884 MEMMETER( MVAL( M_Hash_Worked )++ ) | 884 MEMMETER( MVAL( M_Hash_Worked )++ ) |
885 return 0; | 885 return 0; |
886 | 886 |
887 case 1: | 887 case 1: |
888 if (*alias == MHASH_HITS[kval].addr) | 888 if (*alias == MHASH_HITS[kval].addr) |
889 { | 889 { |
890 MEMMETER( MVAL( M_Hash_Worked) ++ ); | 890 MEMMETER( MVAL( M_Hash_Worked) ++ ); |
891 return MHASH_HITS[kval].handle; | 891 return MHASH_HITS[kval].handle; |
892 } | 892 } |
893 /* FALL THROUGH */ | 893 /* FALL THROUGH */ |
894 default: | 894 default: |
895 return find_mmap_handle_lsearch( alias ); | 895 return find_mmap_handle_lsearch( alias ); |
896 } /* switch */ | 896 } /* switch */ |
897 } | 897 } |
898 | 898 |
899 /* | 899 /* |
900 Some kernels don't like being asked to pick addresses for mapping | 900 Some kernels don't like being asked to pick addresses for mapping |
901 themselves---IRIX is known to become extremely slow if mmap is | 901 themselves---IRIX is known to become extremely slow if mmap is |
902 passed a ZERO as the first argument. In such cases, we use an | 902 passed a ZERO as the first argument. In such cases, we use an |
903 address map which is managed local to the XEmacs process. The | 903 address map which is managed local to the XEmacs process. The |
904 address map maintains an ordered linked list of (address, size, | 904 address map maintains an ordered linked list of (address, size, |
944 | 944 |
945 static ADDRESS_CHAIN addr_chain = 0; | 945 static ADDRESS_CHAIN addr_chain = 0; |
946 /* Start off the address block chain with a humongous address block | 946 /* Start off the address block chain with a humongous address block |
947 which is empty to start with. Note that addr_chain is invariant | 947 which is empty to start with. Note that addr_chain is invariant |
948 WRT the addition/deletion of address blocks because of the assert | 948 WRT the addition/deletion of address blocks because of the assert |
949 in Coalesce() and the strict ordering of blocks by their address | 949 in Coalesce() and the strict ordering of blocks by their address |
950 */ | 950 */ |
951 static void Addr_Block_initialize() | 951 static void Addr_Block_initialize() |
952 { | 952 { |
953 MEMMETER( MVAL( M_Addrlist_Size )++) | 953 MEMMETER( MVAL( M_Addrlist_Size )++) |
954 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); | 954 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); |
971 np = p->next; | 971 np = p->next; |
972 | 972 |
973 if (p->flag == occupied) break; /* No cigar */ | 973 if (p->flag == occupied) break; /* No cigar */ |
974 | 974 |
975 /* Check if the addresses are contiguous. */ | 975 /* Check if the addresses are contiguous. */ |
976 if (p->addr + p->sz != np->addr) break; | 976 if (p->addr + p->sz != np->addr) break; |
977 | 977 |
978 MEMMETER( MVAL( M_Addrlist_Size )--) | 978 MEMMETER( MVAL( M_Addrlist_Size )--) |
979 /* We can coalesce these two. */ | 979 /* We can coalesce these two. */ |
980 p->sz += np->sz; | 980 p->sz += np->sz; |
981 p->next = np->next; | 981 p->next = np->next; |
982 assert( np != addr_chain ); /* We're not freeing the head of the list. */ | 982 assert( np != addr_chain ); /* We're not freeing the head of the list. */ |
1000 remainder->flag = empty; | 1000 remainder->flag = empty; |
1001 remainder->addr = p->addr + sz; | 1001 remainder->addr = p->addr + sz; |
1002 remainder->sz = p->sz - sz; | 1002 remainder->sz = p->sz - sz; |
1003 | 1003 |
1004 MEMMETER( MVAL( M_Addrlist_Size )++) | 1004 MEMMETER( MVAL( M_Addrlist_Size )++) |
1005 | 1005 |
1006 /* Now make p become an occupied block with the appropriate size */ | 1006 /* Now make p become an occupied block with the appropriate size */ |
1007 p->next = remainder; | 1007 p->next = remainder; |
1008 p->sz = sz; | 1008 p->sz = sz; |
1009 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, | 1009 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE, |
1010 MAP_FLAGS, DEV_ZERO_FD, 0 ); | 1010 MAP_FLAGS, DEV_ZERO_FD, 0 ); |
1034 munmap( (VM_ADDR) p->addr, p->sz ); | 1034 munmap( (VM_ADDR) p->addr, p->sz ); |
1035 p->flag = empty; | 1035 p->flag = empty; |
1036 break; | 1036 break; |
1037 } | 1037 } |
1038 } | 1038 } |
1039 if (!p) abort(); /* Can't happen... we've got a block to free which is not in | 1039 if (!p) abort(); /* Can't happen... we've got a block to free which is not in |
1040 the address list. */ | 1040 the address list. */ |
1041 Coalesce_Addr_Blocks(); | 1041 Coalesce_Addr_Blocks(); |
1042 } | 1042 } |
1043 #else /* !MMAP_GENERATE_ADDRESSES */ | 1043 #else /* !MMAP_GENERATE_ADDRESSES */ |
1044 /* This is an alternate (simpler) implementation in cases where the | 1044 /* This is an alternate (simpler) implementation in cases where the |
1049 /* Nothing. */ | 1049 /* Nothing. */ |
1050 } | 1050 } |
1051 | 1051 |
1052 static VM_ADDR New_Addr_Block( SIZE sz ) | 1052 static VM_ADDR New_Addr_Block( SIZE sz ) |
1053 { | 1053 { |
1054 return mmap( 0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, | 1054 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, |
1055 DEV_ZERO_FD, 0 ); | 1055 DEV_ZERO_FD, 0 ); |
1056 } | 1056 } |
1057 | 1057 |
1058 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) | 1058 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) |
1059 { | 1059 { |
1060 munmap( addr, sz ); | 1060 munmap ((caddr_t) addr, sz ); |
1061 } | 1061 } |
1062 | 1062 |
1063 #endif /* MMAP_GENERATE_ADDRESSES */ | 1063 #endif /* MMAP_GENERATE_ADDRESSES */ |
1064 | 1064 |
1065 | 1065 |
1066 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ | 1066 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ |
1067 | 1067 |
1068 /* | 1068 /* |
1069 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start | 1069 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start |
1070 address aliased to the first parameter. | 1070 address aliased to the first parameter. |
1071 */ | 1071 */ |
1072 | 1072 |
1073 POINTER r_alloc (POINTER *ptr, SIZE size); | 1073 POINTER r_alloc (POINTER *ptr, SIZE size); |
1074 POINTER | 1074 POINTER |
1075 r_alloc (POINTER *ptr, SIZE size) | 1075 r_alloc (POINTER *ptr, SIZE size) |
1076 { | 1076 { |
1077 MMAP_HANDLE mh; | 1077 MMAP_HANDLE mh; |
1078 | 1078 |
1079 switch(r_alloc_initialized) | 1079 switch(r_alloc_initialized) |
1080 { | 1080 { |
1081 case 0: | 1081 case 0: |
1082 abort(); | 1082 abort(); |
1083 case 1: | 1083 case 1: |
1100 return 0; /* ralloc failed due to mmap() failure. */ | 1100 return 0; /* ralloc failed due to mmap() failure. */ |
1101 } | 1101 } |
1102 MHASH_ADD( mh->vm_addr, mh ); | 1102 MHASH_ADD( mh->vm_addr, mh ); |
1103 mh->space_for = mmapped_size; | 1103 mh->space_for = mmapped_size; |
1104 mh->aliased_address = ptr; | 1104 mh->aliased_address = ptr; |
1105 *ptr = mh->vm_addr; | 1105 *ptr = (POINTER) mh->vm_addr; |
1106 } | 1106 } |
1107 else | 1107 else |
1108 *ptr = 0; /* Malloc of block failed */ | 1108 *ptr = 0; /* Malloc of block failed */ |
1109 break; | 1109 break; |
1110 } | 1110 } |
1194 MEMMETER(MVAL(M_Remap)++) | 1194 MEMMETER(MVAL(M_Remap)++) |
1195 if (h->space_for > sz) /* We've got some more room */ | 1195 if (h->space_for > sz) /* We've got some more room */ |
1196 { /* Also, if a shrinkage was asked for. */ | 1196 { /* Also, if a shrinkage was asked for. */ |
1197 MEMMETER( MVAL(M_Didnt_Copy)++ ) | 1197 MEMMETER( MVAL(M_Didnt_Copy)++ ) |
1198 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) | 1198 MEMMETER( MVAL(M_Wastage) -= (sz - h->size)) |
1199 /* We're pretty dumb at handling shrinkage. We should check for | 1199 /* We're pretty dumb at handling shrinkage. We should check for |
1200 a larger gap than the standard hysteresis allowable, and if so, | 1200 a larger gap than the standard hysteresis allowable, and if so, |
1201 shrink the number of pages. Right now, we simply reset the size | 1201 shrink the number of pages. Right now, we simply reset the size |
1202 component and return. */ | 1202 component and return. */ |
1203 h->size = sz; | 1203 h->size = sz; |
1204 return *ptr; | 1204 return *ptr; |
1205 } | 1205 } |
1206 | 1206 |
1207 new_vm_addr = New_Addr_Block( actual_sz ); | 1207 new_vm_addr = New_Addr_Block( actual_sz ); |
1208 if (new_vm_addr == VM_FAILURE_ADDR) | 1208 if (new_vm_addr == VM_FAILURE_ADDR) |
1209 {/* Failed to realloc. */ | 1209 {/* Failed to realloc. */ |
1210 /* *ptr = 0; */ | 1210 /* *ptr = 0; */ |
1211 return 0; | 1211 return 0; |
1212 } | 1212 } |
1213 | 1213 |
1227 /* Update block datastructure. */ | 1227 /* Update block datastructure. */ |
1228 h->space_for = actual_sz; /* New total space */ | 1228 h->space_for = actual_sz; /* New total space */ |
1229 h->size = sz; /* New (requested) size */ | 1229 h->size = sz; /* New (requested) size */ |
1230 h->vm_addr = new_vm_addr; /* New VM start address */ | 1230 h->vm_addr = new_vm_addr; /* New VM start address */ |
1231 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ | 1231 h->aliased_address = ptr; /* Change alias to reflect block relocation. */ |
1232 *ptr = h->vm_addr; | 1232 *ptr = (POINTER) h->vm_addr; |
1233 return *ptr; | 1233 return *ptr; |
1234 } | 1234 } |
1235 } | 1235 } |
1236 | 1236 |
1237 | 1237 |
1289 void | 1289 void |
1290 vars_of_ralloc (void) | 1290 vars_of_ralloc (void) |
1291 { | 1291 { |
1292 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* | 1292 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /* |
1293 Extra room left at the end of an allocated arena, | 1293 Extra room left at the end of an allocated arena, |
1294 so that a re-alloc requesting extra space smaller than this | 1294 so that a re-alloc requesting extra space smaller than this |
1295 does not actually cause a new arena to be allocated. | 1295 does not actually cause a new arena to be allocated. |
1296 | 1296 |
1297 A negative value is considered equal to zero. This is the | 1297 A negative value is considered equal to zero. This is the |
1298 minimum amount of space guaranteed to be left at the end of | 1298 minimum amount of space guaranteed to be left at the end of |
1299 the arena. Because allocation happens in multiples of the OS | 1299 the arena. Because allocation happens in multiples of the OS |
1300 page size, it is possible for more space to be left unused. | 1300 page size, it is possible for more space to be left unused. |
1301 */ ); | 1301 */ ); |
1302 mmap_hysteresis = 0; | 1302 mmap_hysteresis = 0; |
1303 } | 1303 } |