comparison src/ralloc.c @ 440:8de8e3f6228a r21-2-28

Import from CVS: tag r21-2-28
author cvs
date Mon, 13 Aug 2007 11:33:38 +0200
parents 3ecd8885ac67
children abe6d1db359e
comparison
equal deleted inserted replaced
439:357dd071b03c 440:8de8e3f6228a
51 #endif /* 0 */ 51 #endif /* 0 */
52 52
53 /* Unconditionally use unsigned char * for this. */ 53 /* Unconditionally use unsigned char * for this. */
54 typedef unsigned char *POINTER; 54 typedef unsigned char *POINTER;
55 55
56 typedef unsigned long SIZE;
57
58 #ifdef DOUG_LEA_MALLOC 56 #ifdef DOUG_LEA_MALLOC
59 #define M_TOP_PAD -2 57 #define M_TOP_PAD -2
60 #include <malloc.h> 58 #include <malloc.h>
61 #endif 59 #endif
62 60
67 65
68 #else /* Not emacs. */ 66 #else /* Not emacs. */
69 67
70 #include <stddef.h> 68 #include <stddef.h>
71 69
72 typedef size_t SIZE;
73 typedef void *POINTER; 70 typedef void *POINTER;
74 71
75 #include <unistd.h> 72 #include <unistd.h>
76 #include <malloc.h> 73 #include <malloc.h>
77 #include <string.h> 74 #include <string.h>
78 75
79 #endif /* emacs. */ 76 #endif /* emacs. */
80 77
81 void init_ralloc (void); 78 void init_ralloc (void);
82 #define safe_bcopy(x, y, z) memmove (y, x, z)
83 79
84 #define NIL ((POINTER) 0) 80 #define NIL ((POINTER) 0)
85 81
86 82
87 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) 83 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
183 { 179 {
184 struct bp *next; 180 struct bp *next;
185 struct bp *prev; 181 struct bp *prev;
186 POINTER *variable; 182 POINTER *variable;
187 POINTER data; 183 POINTER data;
188 SIZE size; 184 size_t size;
189 POINTER new_data; /* temporarily used for relocation */ 185 POINTER new_data; /* temporarily used for relocation */
190 struct heap *heap; /* Heap this bloc is in. */ 186 struct heap *heap; /* Heap this bloc is in. */
191 } *bloc_ptr; 187 } *bloc_ptr;
192 188
193 #define NIL_BLOC ((bloc_ptr) 0) 189 #define NIL_BLOC ((bloc_ptr) 0)
243 239
244 Return the address of the space if all went well, or zero if we couldn't 240 Return the address of the space if all went well, or zero if we couldn't
245 allocate the memory. */ 241 allocate the memory. */
246 242
247 static POINTER 243 static POINTER
248 obtain (POINTER address, SIZE size) 244 obtain (POINTER address, size_t size)
249 { 245 {
250 heap_ptr heap; 246 heap_ptr heap;
251 SIZE already_available; 247 size_t already_available;
252 248
253 /* Find the heap that ADDRESS falls within. */ 249 /* Find the heap that ADDRESS falls within. */
254 for (heap = last_heap; heap; heap = heap->prev) 250 for (heap = last_heap; heap; heap = heap->prev)
255 { 251 {
256 if (heap->start <= address && address <= heap->end) 252 if (heap->start <= address && address <= heap->end)
273 /* If we can't fit them within any existing heap, 269 /* If we can't fit them within any existing heap,
274 get more space. */ 270 get more space. */
275 if (heap == NIL_HEAP) 271 if (heap == NIL_HEAP)
276 { 272 {
277 POINTER new = (*real_morecore)(0); 273 POINTER new = (*real_morecore)(0);
278 SIZE get; 274 size_t get;
279 275
280 already_available = (char *)last_heap->end - (char *)address; 276 already_available = (char *)last_heap->end - (char *)address;
281 277
282 if (new != last_heap->end) 278 if (new != last_heap->end)
283 { 279 {
323 #if 0 319 #if 0
324 /* Obtain SIZE bytes of space and return a pointer to the new area. 320 /* Obtain SIZE bytes of space and return a pointer to the new area.
325 If we could not allocate the space, return zero. */ 321 If we could not allocate the space, return zero. */
326 322
327 static POINTER 323 static POINTER
328 get_more_space (SIZE size) 324 get_more_space (size_t size)
329 { 325 {
330 POINTER ptr = break_value; 326 POINTER ptr = break_value;
331 if (obtain (size)) 327 if (obtain (size))
332 return ptr; 328 return ptr;
333 else 329 else
386 /* Return the total size in use by relocating allocator, 382 /* Return the total size in use by relocating allocator,
387 above where malloc gets space. */ 383 above where malloc gets space. */
388 384
389 long r_alloc_size_in_use (void); 385 long r_alloc_size_in_use (void);
390 long 386 long
391 r_alloc_size_in_use () 387 r_alloc_size_in_use (void)
392 { 388 {
393 return break_value - virtual_break_value; 389 return break_value - virtual_break_value;
394 } 390 }
395 391
396 /* The meat - allocating, freeing, and relocating blocs. */ 392 /* The meat - allocating, freeing, and relocating blocs. */
418 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. 414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
419 Returns a pointer to the new bloc, or zero if we couldn't allocate 415 Returns a pointer to the new bloc, or zero if we couldn't allocate
420 memory for the new block. */ 416 memory for the new block. */
421 417
422 static bloc_ptr 418 static bloc_ptr
423 get_bloc (SIZE size) 419 get_bloc (size_t size)
424 { 420 {
425 register bloc_ptr new_bloc; 421 register bloc_ptr new_bloc;
426 register heap_ptr heap; 422 register heap_ptr heap;
427 423
428 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) 424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
499 /* If BLOC won't fit in any heap, 495 /* If BLOC won't fit in any heap,
500 get enough new space to hold BLOC and all following blocs. */ 496 get enough new space to hold BLOC and all following blocs. */
501 if (heap == NIL_HEAP) 497 if (heap == NIL_HEAP)
502 { 498 {
503 register bloc_ptr tb = b; 499 register bloc_ptr tb = b;
504 register SIZE s = 0; 500 register size_t s = 0;
505 501
506 /* Add up the size of all the following blocs. */ 502 /* Add up the size of all the following blocs. */
507 while (tb != NIL_BLOC) 503 while (tb != NIL_BLOC)
508 { 504 {
509 if (tb->variable) 505 if (tb->variable)
626 622
627 /* Resize BLOC to SIZE bytes. This relocates the blocs 623 /* Resize BLOC to SIZE bytes. This relocates the blocs
628 that come after BLOC in memory. */ 624 that come after BLOC in memory. */
629 625
630 static int 626 static int
631 resize_bloc (bloc_ptr bloc, SIZE size) 627 resize_bloc (bloc_ptr bloc, size_t size)
632 { 628 {
633 register bloc_ptr b; 629 register bloc_ptr b;
634 heap_ptr heap; 630 heap_ptr heap;
635 POINTER address; 631 POINTER address;
636 SIZE old_size; 632 size_t old_size;
637 633
638 /* No need to ever call this if arena is frozen, bug somewhere! */ 634 /* No need to ever call this if arena is frozen, bug somewhere! */
639 if (r_alloc_freeze_level) 635 if (r_alloc_freeze_level)
640 abort(); 636 abort();
641 637
679 b->size = 0; 675 b->size = 0;
680 b->data = b->new_data; 676 b->data = b->new_data;
681 } 677 }
682 else 678 else
683 { 679 {
684 safe_bcopy (b->data, b->new_data, b->size); 680 memmove (b->new_data, b->data, b->size);
685 *b->variable = b->data = b->new_data; 681 *b->variable = b->data = b->new_data;
686 } 682 }
687 } 683 }
688 if (!bloc->variable) 684 if (!bloc->variable)
689 { 685 {
690 bloc->size = 0; 686 bloc->size = 0;
691 bloc->data = bloc->new_data; 687 bloc->data = bloc->new_data;
692 } 688 }
693 else 689 else
694 { 690 {
695 safe_bcopy (bloc->data, bloc->new_data, old_size); 691 memmove (bloc->new_data, bloc->data, old_size);
696 memset (bloc->new_data + old_size, 0, size - old_size); 692 memset (bloc->new_data + old_size, 0, size - old_size);
697 *bloc->variable = bloc->data = bloc->new_data; 693 *bloc->variable = bloc->data = bloc->new_data;
698 } 694 }
699 } 695 }
700 else 696 else
706 b->size = 0; 702 b->size = 0;
707 b->data = b->new_data; 703 b->data = b->new_data;
708 } 704 }
709 else 705 else
710 { 706 {
711 safe_bcopy (b->data, b->new_data, b->size); 707 memmove (b->new_data, b->data, b->size);
712 *b->variable = b->data = b->new_data; 708 *b->variable = b->data = b->new_data;
713 } 709 }
714 } 710 }
715 } 711 }
716 712
811 /* Allocate a page-aligned space. GNU malloc would reclaim an 807 /* Allocate a page-aligned space. GNU malloc would reclaim an
812 extra space if we passed an unaligned one. But we could 808 extra space if we passed an unaligned one. But we could
813 not always find a space which is contiguous to the previous. */ 809 not always find a space which is contiguous to the previous. */
814 POINTER new_bloc_start; 810 POINTER new_bloc_start;
815 heap_ptr h = first_heap; 811 heap_ptr h = first_heap;
816 SIZE get = ROUNDUP (size); 812 size_t get = ROUNDUP (size);
817 813
818 address = (POINTER) ROUNDUP (virtual_break_value); 814 address = (POINTER) ROUNDUP (virtual_break_value);
819 815
820 /* Search the list upward for a heap which is large enough. */ 816 /* Search the list upward for a heap which is large enough. */
821 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) 817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
860 /* Note that (POINTER)(h+1) <= new_bloc_start since 856 /* Note that (POINTER)(h+1) <= new_bloc_start since
861 get >= page_size, so the following does not destroy the heap 857 get >= page_size, so the following does not destroy the heap
862 header. */ 858 header. */
863 for (b = last_bloc; b != NIL_BLOC; b = b->prev) 859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
864 { 860 {
865 safe_bcopy (b->data, b->new_data, b->size); 861 memmove (b->new_data, b->data, b->size);
866 *b->variable = b->data = b->new_data; 862 *b->variable = b->data = b->new_data;
867 } 863 }
868 864
869 h->bloc_start = new_bloc_start; 865 h->bloc_start = new_bloc_start;
870 866
891 887
892 memset (address, 0, size); 888 memset (address, 0, size);
893 } 889 }
894 else /* size < 0 */ 890 else /* size < 0 */
895 { 891 {
896 SIZE excess = (char *)first_heap->bloc_start 892 size_t excess = (char *)first_heap->bloc_start
897 - ((char *)virtual_break_value + size); 893 - ((char *)virtual_break_value + size);
898 894
899 address = virtual_break_value; 895 address = virtual_break_value;
900 896
901 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) 897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
906 902
907 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); 903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
908 904
909 for (b = first_bloc; b != NIL_BLOC; b = b->next) 905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
910 { 906 {
911 safe_bcopy (b->data, b->new_data, b->size); 907 memmove (b->new_data, b->data, b->size);
912 *b->variable = b->data = b->new_data; 908 *b->variable = b->data = b->new_data;
913 } 909 }
914 } 910 }
915 911
916 if ((char *)virtual_break_value + size < (char *)first_heap->start) 912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
939 before allocating a new area. Not yet done. 935 before allocating a new area. Not yet done.
940 936
941 If we can't allocate the necessary memory, set *PTR to zero, and 937 If we can't allocate the necessary memory, set *PTR to zero, and
942 return zero. */ 938 return zero. */
943 939
944 POINTER r_alloc (POINTER *ptr, SIZE size); 940 POINTER r_alloc (POINTER *ptr, size_t size);
945 POINTER 941 POINTER
946 r_alloc (POINTER *ptr, SIZE size) 942 r_alloc (POINTER *ptr, size_t size)
947 { 943 {
948 bloc_ptr new_bloc; 944 bloc_ptr new_bloc;
949 945
950 if (! r_alloc_initialized) 946 if (! r_alloc_initialized)
951 init_ralloc (); 947 init_ralloc ();
998 Change *PTR to reflect the new bloc, and return this value. 994 Change *PTR to reflect the new bloc, and return this value.
999 995
1000 If more memory cannot be allocated, then leave *PTR unchanged, and 996 If more memory cannot be allocated, then leave *PTR unchanged, and
1001 return zero. */ 997 return zero. */
1002 998
1003 POINTER r_re_alloc (POINTER *ptr, SIZE size); 999 POINTER r_re_alloc (POINTER *ptr, size_t size);
1004 POINTER 1000 POINTER
1005 r_re_alloc (POINTER *ptr, SIZE size) 1001 r_re_alloc (POINTER *ptr, size_t size)
1006 { 1002 {
1007 register bloc_ptr bloc; 1003 register bloc_ptr bloc;
1008 1004
1009 if (! r_alloc_initialized) 1005 if (! r_alloc_initialized)
1010 init_ralloc (); 1006 init_ralloc ();
1090 1086
1091 if (--r_alloc_freeze_level < 0) 1087 if (--r_alloc_freeze_level < 0)
1092 abort (); 1088 abort ();
1093 1089
1094 /* This frees all unused blocs. It is not too inefficient, as the resize 1090 /* This frees all unused blocs. It is not too inefficient, as the resize
1095 and bcopy is done only once. Afterwards, all unreferenced blocs are 1091 and memmove is done only once. Afterwards, all unreferenced blocs are
1096 already shrunk to zero size. */ 1092 already shrunk to zero size. */
1097 if (!r_alloc_freeze_level) 1093 if (!r_alloc_freeze_level)
1098 { 1094 {
1099 bloc_ptr *b = &first_bloc; 1095 bloc_ptr *b = &first_bloc;
1100 while (*b) 1096 while (*b)
1235 1231
1236 for (b = first_bloc; b; b = b->next) 1232 for (b = first_bloc; b; b = b->next)
1237 { 1233 {
1238 assert (b->prev == pb); 1234 assert (b->prev == pb);
1239 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); 1235 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1240 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); 1236 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
1241 1237
1242 ph = 0; 1238 ph = 0;
1243 for (h = first_heap; h; h = h->next) 1239 for (h = first_heap; h; h = h->next)
1244 { 1240 {
1245 if (h->bloc_start <= b->data && b->data + b->size <= h->end) 1241 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1695 1691
1696 /* Initialization procedure for address picking scheme */ 1692 /* Initialization procedure for address picking scheme */
1697 static void Addr_Block_initialize(void); 1693 static void Addr_Block_initialize(void);
1698 1694
1699 /* Get a suitable VM_ADDR via mmap */ 1695 /* Get a suitable VM_ADDR via mmap */
1700 static VM_ADDR New_Addr_Block( SIZE sz ); 1696 static VM_ADDR New_Addr_Block (size_t sz);
1701 1697
1702 /* Free a VM_ADDR allocated via New_Addr_Block */ 1698 /* Free a VM_ADDR allocated via New_Addr_Block */
1703 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); 1699 static void Free_Addr_Block (VM_ADDR addr, size_t sz);
1704 1700
1705 #ifdef MMAP_GENERATE_ADDRESSES 1701 #ifdef MMAP_GENERATE_ADDRESSES
1706 /* Implementation of the three calls for address picking when XEmacs is incharge */ 1702 /* Implementation of the three calls for address picking when XEmacs is incharge */
1707 1703
1708 /* The enum denotes the status of the following block. */ 1704 /* The enum denotes the status of the following block. */
1709 typedef enum { empty = 0, occupied, unavailable } addr_status; 1705 typedef enum { empty = 0, occupied, unavailable } addr_status;
1710 1706
1711 typedef struct addr_chain 1707 typedef struct addr_chain
1712 { 1708 {
1713 POINTER addr; 1709 POINTER addr;
1714 SIZE sz; 1710 size_t sz;
1715 addr_status flag; 1711 addr_status flag;
1716 struct addr_chain *next; 1712 struct addr_chain *next;
1717 } ADDRESS_BLOCK, *ADDRESS_CHAIN; 1713 } ADDRESS_BLOCK, *ADDRESS_CHAIN;
1718 /* NB: empty and unavailable blocks are concatenated. */ 1714 /* NB: empty and unavailable blocks are concatenated. */
1719 1715
1721 /* Start off the address block chain with a humongous address block 1717 /* Start off the address block chain with a humongous address block
1722 which is empty to start with. Note that addr_chain is invariant 1718 which is empty to start with. Note that addr_chain is invariant
1723 WRT the addition/deletion of address blocks because of the assert 1719 WRT the addition/deletion of address blocks because of the assert
1724 in Coalesce() and the strict ordering of blocks by their address 1720 in Coalesce() and the strict ordering of blocks by their address
1725 */ 1721 */
1726 static void Addr_Block_initialize() 1722 static void
1723 Addr_Block_initialize (void)
1727 { 1724 {
1728 MEMMETER( MVAL( M_Addrlist_Size )++) 1725 MEMMETER( MVAL( M_Addrlist_Size )++)
1729 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); 1726 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
1730 addr_chain->next = 0; /* Last block in chain */ 1727 addr_chain->next = 0; /* Last block in chain */
1731 addr_chain->sz = 0x0c000000; /* Size */ 1728 addr_chain->sz = 0x0c000000; /* Size */
1733 addr_chain->flag = empty; 1730 addr_chain->flag = empty;
1734 } 1731 }
1735 1732
1736 /* Coalesce address blocks if they are contiguous. Only empty and 1733 /* Coalesce address blocks if they are contiguous. Only empty and
1737 unavailable slots are coalesced. */ 1734 unavailable slots are coalesced. */
1738 static void Coalesce_Addr_Blocks() 1735 static void
1736 Coalesce_Addr_Blocks (void)
1739 { 1737 {
1740 ADDRESS_CHAIN p; 1738 ADDRESS_CHAIN p;
1741 for (p = addr_chain; p; p = p->next) 1739 for (p = addr_chain; p; p = p->next)
1742 { 1740 {
1743 while (p->next && p->flag == p->next->flag) 1741 while (p->next && p->flag == p->next->flag)
1759 } 1757 }
1760 } /* for all p */ 1758 } /* for all p */
1761 } 1759 }
1762 1760
1763 /* Get an empty address block of specified size. */ 1761 /* Get an empty address block of specified size. */
1764 static VM_ADDR New_Addr_Block( SIZE sz ) 1762 static VM_ADDR
1763 New_Addr_Block (size_t sz)
1765 { 1764 {
1766 ADDRESS_CHAIN p = addr_chain; 1765 ADDRESS_CHAIN p = addr_chain;
1767 VM_ADDR new_addr = VM_FAILURE_ADDR; 1766 VM_ADDR new_addr = VM_FAILURE_ADDR;
1768 for (; p; p = p->next) 1767 for (; p; p = p->next)
1769 { 1768 {
1796 return new_addr; 1795 return new_addr;
1797 } 1796 }
1798 1797
1799 /* Free an address block. We mark the block as being empty, and attempt to 1798 /* Free an address block. We mark the block as being empty, and attempt to
1800 do any coalescing that may have resulted from this. */ 1799 do any coalescing that may have resulted from this. */
1801 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) 1800 static void
1801 Free_Addr_Block (VM_ADDR addr, size_t sz)
1802 { 1802 {
1803 ADDRESS_CHAIN p = addr_chain; 1803 ADDRESS_CHAIN p = addr_chain;
1804 for (; p; p = p->next ) 1804 for (; p; p = p->next )
1805 { 1805 {
1806 if (p->addr == addr) 1806 if (p->addr == addr)
1817 } 1817 }
1818 #else /* !MMAP_GENERATE_ADDRESSES */ 1818 #else /* !MMAP_GENERATE_ADDRESSES */
1819 /* This is an alternate (simpler) implementation in cases where the 1819 /* This is an alternate (simpler) implementation in cases where the
1820 address is picked by the kernel. */ 1820 address is picked by the kernel. */
1821 1821
1822 static void Addr_Block_initialize(void) 1822 static void
1823 Addr_Block_initialize (void)
1823 { 1824 {
1824 /* Nothing. */ 1825 /* Nothing. */
1825 } 1826 }
1826 1827
1827 static VM_ADDR New_Addr_Block( SIZE sz ) 1828 static VM_ADDR
1829 New_Addr_Block (size_t sz)
1828 { 1830 {
1829 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, 1831 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
1830 DEV_ZERO_FD, 0 ); 1832 DEV_ZERO_FD, 0 );
1831 } 1833 }
1832 1834
1833 static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) 1835 static void
1836 Free_Addr_Block (VM_ADDR addr, size_t sz)
1834 { 1837 {
1835 munmap ((caddr_t) addr, sz ); 1838 munmap ((caddr_t) addr, sz );
1836 } 1839 }
1837 1840
1838 #endif /* MMAP_GENERATE_ADDRESSES */ 1841 #endif /* MMAP_GENERATE_ADDRESSES */
1839 1842
1840 1843
1841 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ 1844 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
1842 1845
1843 /* 1846 /*
1844 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start 1847 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start
1845 address aliased to the first parameter. 1848 address aliased to the first parameter.
1846 */ 1849 */
1847 1850
1848 POINTER r_alloc (POINTER *ptr, SIZE size); 1851 POINTER r_alloc (POINTER *ptr, size_t size);
1849 POINTER 1852 POINTER
1850 r_alloc (POINTER *ptr, SIZE size) 1853 r_alloc (POINTER *ptr, size_t size)
1851 { 1854 {
1852 MMAP_HANDLE mh; 1855 MMAP_HANDLE mh;
1853 1856
1854 switch(r_alloc_initialized) 1857 switch(r_alloc_initialized)
1855 { 1858 {
1860 break; 1863 break;
1861 default: 1864 default:
1862 mh = new_mmap_handle( size ); 1865 mh = new_mmap_handle( size );
1863 if (mh) 1866 if (mh)
1864 { 1867 {
1865 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); 1868 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1866 SIZE mmapped_size = ROUNDUP( size + hysteresis ); 1869 size_t mmapped_size = ROUNDUP( size + hysteresis );
1867 MEMMETER( MVAL(M_Map)++ ) 1870 MEMMETER( MVAL(M_Map)++ )
1868 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) 1871 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
1869 MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) 1872 MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
1870 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) ) 1873 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) )
1871 mh->vm_addr = New_Addr_Block( mmapped_size ); 1874 mh->vm_addr = New_Addr_Block( mmapped_size );
1929 Change *PTR to reflect the new bloc, and return this value. 1932 Change *PTR to reflect the new bloc, and return this value.
1930 1933
1931 If more memory cannot be allocated, then leave *PTR unchanged, and 1934 If more memory cannot be allocated, then leave *PTR unchanged, and
1932 return zero. */ 1935 return zero. */
1933 1936
1934 POINTER r_re_alloc (POINTER *ptr, SIZE sz); 1937 POINTER r_re_alloc (POINTER *ptr, size_t sz);
1935 POINTER 1938 POINTER
1936 r_re_alloc (POINTER *ptr, SIZE sz) 1939 r_re_alloc (POINTER *ptr, size_t sz)
1937 { 1940 {
1938 if (r_alloc_initialized == 0) 1941 if (r_alloc_initialized == 0)
1939 { 1942 {
1940 abort (); 1943 abort ();
1941 return 0; /* suppress compiler warning */ 1944 return 0; /* suppress compiler warning */
1947 *ptr = tmp; 1950 *ptr = tmp;
1948 return tmp; 1951 return tmp;
1949 } 1952 }
1950 else 1953 else
1951 { 1954 {
1952 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); 1955 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1953 SIZE actual_sz = ROUNDUP( sz + hysteresis ); 1956 size_t actual_sz = ROUNDUP( sz + hysteresis );
1954 MMAP_HANDLE h = find_mmap_handle( ptr ); 1957 MMAP_HANDLE h = find_mmap_handle( ptr );
1955 VM_ADDR new_vm_addr; 1958 VM_ADDR new_vm_addr;
1956 1959
1957 if ( h == 0 ) /* Was allocated using malloc. */ 1960 if ( h == 0 ) /* Was allocated using malloc. */
1958 { 1961 {