Mercurial > hg > xemacs-beta
comparison src/gmalloc.c @ 428:3ecd8885ac67 r21-2-22
Import from CVS: tag r21-2-22
author | cvs |
---|---|
date | Mon, 13 Aug 2007 11:28:15 +0200 |
parents | |
children | 8de8e3f6228a |
comparison
equal
deleted
inserted
replaced
427:0a0253eac470 | 428:3ecd8885ac67 |
---|---|
1 /* Synched up with: Not synched up with FSF 19.28, even though I | |
2 thought I did so. */ | |
3 | |
4 /* Get the configuration files if we're being compiled for Emacs. */ | |
5 #ifdef emacs | |
6 # include <config.h> | |
7 # include "getpagesize.h" | |
8 # ifndef HAVE_CONFIG_H | |
9 # define HAVE_CONFIG_H | |
10 # endif | |
11 #endif | |
12 | |
13 #if defined (__STDC__) && !defined (STDC_HEADERS) | |
14 /* The ANSI standard says that defining __STDC__ to a non-zero value means | |
15 that the compiler conforms to that standard. The standard requires | |
16 certain header files and library functions to be present. Therefore, | |
17 if your compiler defines __STDC__ to non-0 but does not have ANSI headers | |
18 and the ANSI library routines, then your compiler is buggy. Conversely, | |
19 an ANSI-conforming environment (which has both the ANSI headers and | |
20 library routines, i.e., stdlib.h and `memmove') does not necessarily | |
21 define the STDC_HEADERS flag. Lucid Emacs requires an ANSI compiler. | |
22 Therefore, there is no need to consult the abominable STDC_HEADERS flag. | |
23 -- jwz | |
24 */ | |
25 # define STDC_HEADERS | |
26 #endif | |
27 | |
28 #define __const const | |
29 | |
30 | |
31 /* DO NOT EDIT THIS FILE -- it is automagically generated. -*- C -*- */ | |
32 /* Bwaa-haa-haa! Not a chance that this is actually true! */ | |
33 | |
34 #define _MALLOC_INTERNAL | |
35 | |
36 /* The malloc headers and source files from the C library follow here. */ | |
37 | |
38 /* Declarations for `malloc' and friends. | |
39 Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc. | |
40 Written May 1989 by Mike Haertel. | |
41 | |
42 This library is free software; you can redistribute it and/or | |
43 modify it under the terms of the GNU Library General Public License as | |
44 published by the Free Software Foundation; either version 2 of the | |
45 License, or (at your option) any later version. | |
46 | |
47 This library is distributed in the hope that it will be useful, | |
48 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
49 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
50 Library General Public License for more details. | |
51 | |
52 You should have received a copy of the GNU General Public License | |
53 along with this library; see the file COPYING. If not, write to | |
54 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
55 Boston, MA 02111-1307, USA. | |
56 | |
57 The author may be reached (Email) at the address mike@ai.mit.edu, | |
58 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
59 | |
60 #ifndef _MALLOC_H | |
61 | |
62 #define _MALLOC_H 1 | |
63 | |
64 #ifdef _MALLOC_INTERNAL | |
65 | |
66 #ifdef HAVE_CONFIG_H | |
67 #include <config.h> | |
68 #endif | |
69 | |
70 #include <string.h> | |
71 #include <limits.h> | |
72 | |
73 #ifdef HAVE_UNISTD_H | |
74 #include <unistd.h> | |
75 #endif | |
76 | |
77 #endif /* _MALLOC_INTERNAL. */ | |
78 | |
79 | |
80 #ifdef __cplusplus | |
81 extern "C" | |
82 { | |
83 #endif | |
84 | |
85 #undef __P | |
86 #define __P(args) args | |
87 #undef __ptr_t | |
88 #define __ptr_t void * | |
89 | |
90 #include <stddef.h> | |
91 #define __malloc_size_t size_t | |
92 | |
93 #ifndef NULL | |
94 #define NULL 0 | |
95 #endif | |
96 | |
97 /* XEmacs: I thought this should be int under SunOS, but that | |
98 apparently fails. Curses on all this shit. */ | |
99 #define __free_ret_t void | |
100 | |
101 /* XEmacs: I tried commenting these out and including stdlib.h, | |
102 but that fails badly. Urk! This sucks. */ | |
103 /* Allocate SIZE bytes of memory. */ | |
104 extern __ptr_t malloc __P ((size_t __size)); | |
105 /* Re-allocate the previously allocated block | |
106 in __ptr_t, making the new block SIZE bytes long. */ | |
107 extern __ptr_t realloc __P ((__ptr_t __ptr, size_t __size)); | |
108 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */ | |
109 extern __ptr_t calloc __P ((size_t __nmemb, size_t __size)); | |
110 /* Free a block allocated by `malloc', `realloc' or `calloc'. */ | |
111 extern __free_ret_t free __P ((__ptr_t __ptr)); | |
112 | |
113 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */ | |
114 extern __ptr_t memalign __P ((size_t __alignment, size_t __size)); | |
115 | |
116 /* Allocate SIZE bytes on a page boundary. */ | |
117 extern __ptr_t valloc __P ((size_t __size)); | |
118 | |
119 | |
120 #ifdef _MALLOC_INTERNAL | |
121 | |
122 /* The allocator divides the heap into blocks of fixed size; large | |
123 requests receive one or more whole blocks, and small requests | |
124 receive a fragment of a block. Fragment sizes are powers of two, | |
125 and all fragments of a block are the same size. When all the | |
126 fragments in a block have been freed, the block itself is freed. */ | |
127 #define INT_BIT (CHAR_BIT * sizeof(int)) | |
128 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9) | |
129 #define BLOCKSIZE (1 << BLOCKLOG) | |
130 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) | |
131 | |
132 /* Determine the amount of memory spanned by the initial heap table | |
133 (not an absolute limit). */ | |
134 #define HEAP (INT_BIT > 16 ? 4194304 : 65536) | |
135 | |
136 /* Number of contiguous free blocks allowed to build up at the end of | |
137 memory before they will be returned to the system. */ | |
138 #define FINAL_FREE_BLOCKS 8 | |
139 | |
140 /* Data structure giving per-block information. */ | |
141 typedef union | |
142 { | |
143 /* Heap information for a busy block. */ | |
144 struct | |
145 { | |
146 /* Zero for a large block, or positive giving the | |
147 logarithm to the base two of the fragment size. */ | |
148 int type; | |
149 union | |
150 { | |
151 struct | |
152 { | |
153 __malloc_size_t nfree; /* Free frags in a fragmented block. */ | |
154 __malloc_size_t first; /* First free fragment of the block. */ | |
155 } frag; | |
156 /* Size (in blocks) of a large cluster. */ | |
157 __malloc_size_t size; | |
158 } info; | |
159 } busy; | |
160 /* Heap information for a free block | |
161 (that may be the first of a free cluster). */ | |
162 struct | |
163 { | |
164 __malloc_size_t size; /* Size (in blocks) of a free cluster. */ | |
165 __malloc_size_t next; /* Index of next free cluster. */ | |
166 __malloc_size_t prev; /* Index of previous free cluster. */ | |
167 } free; | |
168 } malloc_info; | |
169 | |
170 /* Pointer to first block of the heap. */ | |
171 extern char *_heapbase; | |
172 | |
173 /* Table indexed by block number giving per-block information. */ | |
174 extern malloc_info *_heapinfo; | |
175 | |
176 /* Address to block number and vice versa. */ | |
177 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1) | |
178 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase)) | |
179 | |
180 /* Current search index for the heap table. */ | |
181 extern __malloc_size_t _heapindex; | |
182 | |
183 /* Limit of valid info table indices. */ | |
184 extern __malloc_size_t _heaplimit; | |
185 | |
186 /* Doubly linked lists of free fragments. */ | |
187 struct list | |
188 { | |
189 struct list *next; | |
190 struct list *prev; | |
191 }; | |
192 | |
193 /* Free list headers for each fragment size. */ | |
194 extern struct list _fraghead[]; | |
195 | |
196 /* List of blocks allocated with `memalign' (or `valloc'). */ | |
197 struct alignlist | |
198 { | |
199 struct alignlist *next; | |
200 __ptr_t aligned; /* The address that memaligned returned. */ | |
201 __ptr_t exact; /* The address that malloc returned. */ | |
202 }; | |
203 extern struct alignlist *_aligned_blocks; | |
204 | |
205 /* Instrumentation. */ | |
206 extern __malloc_size_t _chunks_used; | |
207 extern __malloc_size_t _bytes_used; | |
208 extern __malloc_size_t _chunks_free; | |
209 extern __malloc_size_t _bytes_free; | |
210 | |
211 /* Internal version of `free' used in `morecore' (malloc.c). */ | |
212 extern void _free_internal __P ((__ptr_t __ptr)); | |
213 | |
214 #endif /* _MALLOC_INTERNAL. */ | |
215 | |
216 /* Underlying allocation function; successive calls should | |
217 return contiguous pieces of memory. */ | |
218 extern __ptr_t (*__morecore) __P ((ptrdiff_t __size)); | |
219 | |
220 /* Default value of `__morecore'. */ | |
221 extern __ptr_t __default_morecore __P ((ptrdiff_t __size)); | |
222 | |
223 /* If not NULL, this function is called after each time | |
224 `__morecore' is called to increase the data size. */ | |
225 extern void (*__after_morecore_hook) __P ((void)); | |
226 | |
227 /* Nonzero if `malloc' has been called and done its initialization. */ | |
228 /* extern int __malloc_initialized; */ | |
229 | |
230 /* Hooks for debugging versions. */ | |
231 extern void (*__free_hook) __P ((__ptr_t __ptr)); | |
232 extern __ptr_t (*__malloc_hook) __P ((size_t __size)); | |
233 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size)); | |
234 | |
235 /* Return values for `mprobe': these are the kinds of inconsistencies that | |
236 `mcheck' enables detection of. */ | |
237 enum mcheck_status | |
238 { | |
239 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */ | |
240 MCHECK_OK, /* Block is fine. */ | |
241 MCHECK_FREE, /* Block freed twice. */ | |
242 MCHECK_HEAD, /* Memory before the block was clobbered. */ | |
243 MCHECK_TAIL /* Memory after the block was clobbered. */ | |
244 }; | |
245 | |
246 /* Activate a standard collection of debugging hooks. This must be called | |
247 before `malloc' is ever called. ABORTFUNC is called with an error code | |
248 (see enum above) when an inconsistency is detected. If ABORTFUNC is | |
249 null, the standard function prints on stderr and then calls `abort'. */ | |
250 extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status)))); | |
251 | |
252 /* Check for aberrations in a particular malloc'd block. You must have | |
253 called `mcheck' already. These are the same checks that `mcheck' does | |
254 when you free or reallocate a block. */ | |
255 extern enum mcheck_status mprobe __P ((__ptr_t __ptr)); | |
256 | |
257 /* Activate a standard collection of tracing hooks. */ | |
258 extern void mtrace __P ((void)); | |
259 extern void muntrace __P ((void)); | |
260 | |
261 /* Statistics available to the user. */ | |
262 struct mstats | |
263 { | |
264 __malloc_size_t bytes_total; /* Total size of the heap. */ | |
265 __malloc_size_t chunks_used; /* Chunks allocated by the user. */ | |
266 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */ | |
267 __malloc_size_t chunks_free; /* Chunks in the free list. */ | |
268 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */ | |
269 }; | |
270 | |
271 /* Pick up the current statistics. */ | |
272 extern struct mstats mstats __P ((void)); | |
273 | |
274 /* Call WARNFUN with a warning message when memory usage is high. */ | |
275 extern void memory_warnings __P ((__ptr_t __start, | |
276 void (*__warnfun) __P ((__const char *)))); | |
277 | |
278 | |
279 #if 0 /* unused in this file, and conflicting prototypes anyway */ | |
280 /* Relocating allocator. */ | |
281 | |
282 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */ | |
283 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, size_t __size)); | |
284 | |
285 /* Free the storage allocated in HANDLEPTR. */ | |
286 extern void r_alloc_free __P ((__ptr_t *__handleptr)); | |
287 | |
288 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */ | |
289 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, size_t __size)); | |
290 #endif /* 0 */ | |
291 | |
292 #ifdef __cplusplus | |
293 } | |
294 #endif | |
295 | |
296 #endif /* malloc.h */ | |
297 /* Allocate memory on a page boundary. | |
298 Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc. | |
299 | |
300 This library is free software; you can redistribute it and/or | |
301 modify it under the terms of the GNU Library General Public License as | |
302 published by the Free Software Foundation; either version 2 of the | |
303 License, or (at your option) any later version. | |
304 | |
305 This library is distributed in the hope that it will be useful, | |
306 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
308 Library General Public License for more details. | |
309 | |
310 You should have received a copy of the GNU General Public License | |
311 along with this library; see the file COPYING. If not, write to | |
312 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
313 Boston, MA 02111-1307, USA. | |
314 | |
315 The author may be reached (Email) at the address mike@ai.mit.edu, | |
316 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
317 | |
318 #if defined (__GNU_LIBRARY__) || defined (_LIBC) | |
319 #include <stddef.h> | |
320 #include <sys/cdefs.h> | |
321 #if ! (defined (__GLIBC__) && (__GLIBC__ >= 2)) | |
322 extern size_t __getpagesize __P ((void)); | |
323 #endif | |
324 #else | |
325 #include "getpagesize.h" | |
326 #define __getpagesize() getpagesize() | |
327 #endif | |
328 | |
329 #ifndef _MALLOC_INTERNAL | |
330 #define _MALLOC_INTERNAL | |
331 #include <malloc.h> | |
332 #endif | |
333 | |
334 static __malloc_size_t pagesize; | |
335 | |
336 __ptr_t | |
337 valloc (__malloc_size_t size) | |
338 { | |
339 if (pagesize == 0) | |
340 pagesize = __getpagesize (); | |
341 | |
342 return memalign (pagesize, size); | |
343 } | |
344 /* Memory allocator `malloc'. | |
345 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation | |
346 Written May 1989 by Mike Haertel. | |
347 | |
348 This library is free software; you can redistribute it and/or | |
349 modify it under the terms of the GNU Library General Public License as | |
350 published by the Free Software Foundation; either version 2 of the | |
351 License, or (at your option) any later version. | |
352 | |
353 This library is distributed in the hope that it will be useful, | |
354 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
355 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
356 Library General Public License for more details. | |
357 | |
358 You should have received a copy of the GNU General Public License | |
359 along with this library; see the file COPYING. If not, write to | |
360 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
361 Boston, MA 02111-1307, USA. | |
362 | |
363 The author may be reached (Email) at the address mike@ai.mit.edu, | |
364 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
365 | |
366 #ifndef _MALLOC_INTERNAL | |
367 #define _MALLOC_INTERNAL | |
368 #include <malloc.h> | |
369 #endif | |
370 | |
371 /* How to really get more memory. */ | |
372 #ifdef HEAP_IN_DATA | |
373 /* once dumped, free() & realloc() on static heap space will fail */ | |
374 #define PURE_DATA(x) \ | |
375 ((static_heap_dumped && (char*)x >= static_heap_base \ | |
376 && (char*)x <= (static_heap_base + static_heap_size) ) ? 1 : 0) | |
377 extern int initialized; | |
378 extern int purify_flag; | |
379 extern char* static_heap_base; | |
380 extern char* static_heap_ptr; | |
381 extern char* static_heap_dumped; | |
382 extern unsigned long static_heap_size; | |
383 extern __ptr_t more_static_core __P ((ptrdiff_t __size)); | |
384 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = more_static_core; | |
385 #else | |
386 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore; | |
387 #define PURE_DATA(x) 0 | |
388 #endif | |
389 | |
390 /* Debugging hook for `malloc'. */ | |
391 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size)); | |
392 | |
393 /* Pointer to the base of the first block. */ | |
394 char *_heapbase; | |
395 | |
396 /* Block information table. Allocated with align/__free (not malloc/free). */ | |
397 malloc_info *_heapinfo; | |
398 | |
399 /* Number of info entries. */ | |
400 static __malloc_size_t heapsize; | |
401 | |
402 /* Search index in the info table. */ | |
403 __malloc_size_t _heapindex; | |
404 | |
405 /* Limit of valid info table indices. */ | |
406 __malloc_size_t _heaplimit; | |
407 | |
408 /* Free lists for each fragment size. */ | |
409 struct list _fraghead[BLOCKLOG]; | |
410 | |
411 /* Instrumentation. */ | |
412 __malloc_size_t _chunks_used; | |
413 __malloc_size_t _bytes_used; | |
414 __malloc_size_t _chunks_free; | |
415 __malloc_size_t _bytes_free; | |
416 | |
417 /* Are you experienced? */ | |
418 int __malloc_initialized; | |
419 | |
420 void (*__after_morecore_hook) __P ((void)); | |
421 | |
422 /* Aligned allocation. */ | |
423 static __ptr_t align __P ((__malloc_size_t)); | |
424 static __ptr_t | |
425 align (__malloc_size_t size) | |
426 { | |
427 __ptr_t result; | |
428 unsigned long int adj; | |
429 | |
430 result = (*__morecore) (size); | |
431 adj = (unsigned long int) ((unsigned long int) ((char *) result - | |
432 (char *) NULL)) % BLOCKSIZE; | |
433 if (adj != 0) | |
434 { | |
435 adj = BLOCKSIZE - adj; | |
436 (void) (*__morecore) (adj); | |
437 result = (char *) result + adj; | |
438 } | |
439 | |
440 if (__after_morecore_hook) | |
441 (*__after_morecore_hook) (); | |
442 | |
443 return result; | |
444 } | |
445 | |
446 /* Set everything up and remember that we have. */ | |
447 static int initialize __P ((void)); | |
448 static int | |
449 initialize () | |
450 { | |
451 #ifdef HEAP_IN_DATA | |
452 if (static_heap_dumped && __morecore == more_static_core) | |
453 { | |
454 __morecore = __default_morecore; | |
455 } | |
456 #endif | |
457 heapsize = HEAP / BLOCKSIZE; | |
458 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info)); | |
459 if (_heapinfo == NULL) | |
460 return 0; | |
461 memset (_heapinfo, 0, heapsize * sizeof (malloc_info)); | |
462 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list)); | |
463 _heapinfo[0].free.size = 0; | |
464 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0; | |
465 _heapindex = 0; | |
466 _heaplimit = 0; | |
467 _heapbase = (char *) _heapinfo; | |
468 | |
469 /* Account for the _heapinfo block itself in the statistics. */ | |
470 _bytes_used = heapsize * sizeof (malloc_info); | |
471 _chunks_used = 1; | |
472 _chunks_free=0; | |
473 _bytes_free=0; | |
474 _aligned_blocks=0; | |
475 | |
476 __malloc_initialized = 1; | |
477 return 1; | |
478 } | |
479 | |
480 /* Get neatly aligned memory, initializing or | |
481 growing the heap info table as necessary. */ | |
482 static __ptr_t morecore __P ((__malloc_size_t)); | |
483 static __ptr_t | |
484 morecore (__malloc_size_t size) | |
485 { | |
486 __ptr_t result; | |
487 malloc_info *newinfo, *oldinfo; | |
488 __malloc_size_t newsize; | |
489 | |
490 result = align (size); | |
491 if (result == NULL) | |
492 return NULL; | |
493 | |
494 /* Check if we need to grow the info table. */ | |
495 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) | |
496 { | |
497 newsize = heapsize; | |
498 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize) | |
499 newsize *= 2; | |
500 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info)); | |
501 if (newinfo == NULL) | |
502 { | |
503 (*__morecore) (-(int)size); | |
504 return NULL; | |
505 } | |
506 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info)); | |
507 memset (&newinfo[heapsize], 0, | |
508 (newsize - heapsize) * sizeof (malloc_info)); | |
509 oldinfo = _heapinfo; | |
510 newinfo[BLOCK (oldinfo)].busy.type = 0; | |
511 newinfo[BLOCK (oldinfo)].busy.info.size | |
512 = BLOCKIFY (heapsize * sizeof (malloc_info)); | |
513 _heapinfo = newinfo; | |
514 /* Account for the _heapinfo block itself in the statistics. */ | |
515 _bytes_used += newsize * sizeof (malloc_info); | |
516 ++_chunks_used; | |
517 _free_internal (oldinfo); | |
518 heapsize = newsize; | |
519 } | |
520 | |
521 _heaplimit = BLOCK ((char *) result + size); | |
522 return result; | |
523 } | |
524 | |
525 /* Allocate memory from the heap. */ | |
526 __ptr_t | |
527 malloc (__malloc_size_t size) | |
528 { | |
529 __ptr_t result; | |
530 __malloc_size_t block, blocks, lastblocks, start; | |
531 __malloc_size_t i; | |
532 struct list *next; | |
533 | |
534 /* ANSI C allows `malloc (0)' to either return NULL, or to return a | |
535 valid address you can realloc and free (though not dereference). | |
536 | |
537 It turns out that some extant code (sunrpc, at least Ultrix's version) | |
538 expects `malloc (0)' to return non-NULL and breaks otherwise. | |
539 Be compatible. */ | |
540 | |
541 #ifdef HAVE_X_WINDOWS | |
542 /* there is at least one Xt bug where calloc(n,x) is blindly called | |
543 where n can be 0, and yet if 0 is returned, Xt barfs */ | |
544 if (size == 0) | |
545 size = sizeof (struct list); | |
546 #else | |
547 if (size == 0) | |
548 return NULL; | |
549 #endif | |
550 | |
551 if (__malloc_hook != NULL) | |
552 return (*__malloc_hook) (size); | |
553 | |
554 if (!__malloc_initialized) | |
555 if (!initialize ()) | |
556 return NULL; | |
557 | |
558 #ifdef SUNOS_LOCALTIME_BUG | |
559 /* Workaround for localtime() allocating 8 bytes and writing 9 bug... */ | |
560 if (size < 16) | |
561 size = 16; | |
562 #endif | |
563 | |
564 if (size < sizeof (struct list)) | |
565 size = sizeof (struct list); | |
566 | |
567 /* Determine the allocation policy based on the request size. */ | |
568 if (size <= BLOCKSIZE / 2) | |
569 { | |
570 /* Small allocation to receive a fragment of a block. | |
571 Determine the logarithm to base two of the fragment size. */ | |
572 __malloc_size_t log = 1; | |
573 --size; | |
574 while ((size /= 2) != 0) | |
575 ++log; | |
576 | |
577 /* Look in the fragment lists for a | |
578 free fragment of the desired size. */ | |
579 next = _fraghead[log].next; | |
580 if (next != NULL) | |
581 { | |
582 /* There are free fragments of this size. | |
583 Pop a fragment out of the fragment list and return it. | |
584 Update the block's nfree and first counters. */ | |
585 result = (__ptr_t) next; | |
586 next->prev->next = next->next; | |
587 if (next->next != NULL) | |
588 next->next->prev = next->prev; | |
589 block = BLOCK (result); | |
590 if (--_heapinfo[block].busy.info.frag.nfree != 0) | |
591 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
592 ((unsigned long int) ((char *) next->next - (char *) NULL) | |
593 % BLOCKSIZE) >> log; | |
594 | |
595 /* Update the statistics. */ | |
596 ++_chunks_used; | |
597 _bytes_used += 1 << log; | |
598 --_chunks_free; | |
599 _bytes_free -= 1 << log; | |
600 } | |
601 else | |
602 { | |
603 /* No free fragments of the desired size, so get a new block | |
604 and break it into fragments, returning the first. */ | |
605 result = malloc (BLOCKSIZE); | |
606 if (result == NULL) | |
607 return NULL; | |
608 | |
609 /* Link all fragments but the first into the free list. */ | |
610 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i) | |
611 { | |
612 next = (struct list *) ((char *) result + (i << log)); | |
613 next->next = _fraghead[log].next; | |
614 next->prev = &_fraghead[log]; | |
615 next->prev->next = next; | |
616 if (next->next != NULL) | |
617 next->next->prev = next; | |
618 } | |
619 | |
620 /* Initialize the nfree and first counters for this block. */ | |
621 block = BLOCK (result); | |
622 _heapinfo[block].busy.type = log; | |
623 _heapinfo[block].busy.info.frag.nfree = i - 1; | |
624 _heapinfo[block].busy.info.frag.first = i - 1; | |
625 | |
626 _chunks_free += (BLOCKSIZE >> log) - 1; | |
627 _bytes_free += BLOCKSIZE - (1 << log); | |
628 _bytes_used -= BLOCKSIZE - (1 << log); | |
629 } | |
630 } | |
631 else | |
632 { | |
633 /* Large allocation to receive one or more blocks. | |
634 Search the free list in a circle starting at the last place visited. | |
635 If we loop completely around without finding a large enough | |
636 space we will have to get more memory from the system. */ | |
637 blocks = BLOCKIFY (size); | |
638 start = block = _heapindex; | |
639 while (_heapinfo[block].free.size < blocks) | |
640 { | |
641 block = _heapinfo[block].free.next; | |
642 if (block == start) | |
643 { | |
644 /* Need to get more from the system. Check to see if | |
645 the new core will be contiguous with the final free | |
646 block; if so we don't need to get as much. */ | |
647 block = _heapinfo[0].free.prev; | |
648 lastblocks = _heapinfo[block].free.size; | |
649 if (_heaplimit != 0 && block + lastblocks == _heaplimit && | |
650 (*__morecore) (0) == ADDRESS (block + lastblocks) && | |
651 (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL) | |
652 { | |
653 /* Which block we are extending (the `final free | |
654 block' referred to above) might have changed, if | |
655 it got combined with a freed info table. */ | |
656 block = _heapinfo[0].free.prev; | |
657 _heapinfo[block].free.size += (blocks - lastblocks); | |
658 _bytes_free += (blocks - lastblocks) * BLOCKSIZE; | |
659 continue; | |
660 } | |
661 result = morecore (blocks * BLOCKSIZE); | |
662 if (result == NULL) | |
663 return NULL; | |
664 block = BLOCK (result); | |
665 _heapinfo[block].busy.type = 0; | |
666 _heapinfo[block].busy.info.size = blocks; | |
667 ++_chunks_used; | |
668 _bytes_used += blocks * BLOCKSIZE; | |
669 return result; | |
670 } | |
671 } | |
672 | |
673 /* At this point we have found a suitable free list entry. | |
674 Figure out how to remove what we need from the list. */ | |
675 result = ADDRESS (block); | |
676 if (_heapinfo[block].free.size > blocks) | |
677 { | |
678 /* The block we found has a bit left over, | |
679 so relink the tail end back into the free list. */ | |
680 _heapinfo[block + blocks].free.size | |
681 = _heapinfo[block].free.size - blocks; | |
682 _heapinfo[block + blocks].free.next | |
683 = _heapinfo[block].free.next; | |
684 _heapinfo[block + blocks].free.prev | |
685 = _heapinfo[block].free.prev; | |
686 _heapinfo[_heapinfo[block].free.prev].free.next | |
687 = _heapinfo[_heapinfo[block].free.next].free.prev | |
688 = _heapindex = block + blocks; | |
689 } | |
690 else | |
691 { | |
692 /* The block exactly matches our requirements, | |
693 so just remove it from the list. */ | |
694 _heapinfo[_heapinfo[block].free.next].free.prev | |
695 = _heapinfo[block].free.prev; | |
696 _heapinfo[_heapinfo[block].free.prev].free.next | |
697 = _heapindex = _heapinfo[block].free.next; | |
698 --_chunks_free; | |
699 } | |
700 | |
701 _heapinfo[block].busy.type = 0; | |
702 _heapinfo[block].busy.info.size = blocks; | |
703 ++_chunks_used; | |
704 _bytes_used += blocks * BLOCKSIZE; | |
705 _bytes_free -= blocks * BLOCKSIZE; | |
706 } | |
707 | |
708 return result; | |
709 } | |
710 | |
711 #ifndef _LIBC | |
712 | |
713 /* On some ANSI C systems, some libc functions call _malloc, _free | |
714 and _realloc. Make them use the GNU functions. */ | |
715 | |
716 __ptr_t _malloc (__malloc_size_t size); | |
717 __ptr_t | |
718 _malloc (__malloc_size_t size) | |
719 { | |
720 return malloc (size); | |
721 } | |
722 | |
723 void _free (__ptr_t ptr); | |
724 void | |
725 _free (__ptr_t ptr) | |
726 { | |
727 free (ptr); | |
728 } | |
729 | |
730 __ptr_t _realloc (__ptr_t ptr, __malloc_size_t size); | |
731 __ptr_t | |
732 _realloc (__ptr_t ptr, __malloc_size_t size) | |
733 { | |
734 return realloc (ptr, size); | |
735 } | |
736 | |
737 #endif | |
738 /* Free a block of memory allocated by `malloc'. | |
739 Copyright 1990, 1991, 1992, 1994 Free Software Foundation | |
740 Written May 1989 by Mike Haertel. | |
741 | |
742 This library is free software; you can redistribute it and/or | |
743 modify it under the terms of the GNU Library General Public License as | |
744 published by the Free Software Foundation; either version 2 of the | |
745 License, or (at your option) any later version. | |
746 | |
747 This library is distributed in the hope that it will be useful, | |
748 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
749 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
750 Library General Public License for more details. | |
751 | |
752 You should have received a copy of the GNU General Public License | |
753 along with this library; see the file COPYING. If not, write to | |
754 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
755 Boston, MA 02111-1307, USA. | |
756 | |
757 The author may be reached (Email) at the address mike@ai.mit.edu, | |
758 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
759 | |
760 #ifndef _MALLOC_INTERNAL | |
761 #define _MALLOC_INTERNAL | |
762 #include <malloc.h> | |
763 #endif | |
764 | |
765 /* Debugging hook for free. */ | |
766 void (*__free_hook) __P ((__ptr_t __ptr)); | |
767 | |
768 /* List of blocks allocated by memalign. */ | |
769 struct alignlist *_aligned_blocks = NULL; | |
770 | |
771 /* Return memory to the heap. | |
772 Like `free' but don't call a __free_hook if there is one. */ | |
773 void | |
774 _free_internal (__ptr_t ptr) | |
775 { | |
776 int type; | |
777 __malloc_size_t block, blocks; | |
778 __malloc_size_t i; | |
779 struct list *prev, *next; | |
780 | |
781 block = BLOCK (ptr); | |
782 | |
783 type = _heapinfo[block].busy.type; | |
784 switch (type) | |
785 { | |
786 case 0: | |
787 /* Get as many statistics as early as we can. */ | |
788 --_chunks_used; | |
789 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE; | |
790 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE; | |
791 | |
792 /* Find the free cluster previous to this one in the free list. | |
793 Start searching at the last block referenced; this may benefit | |
794 programs with locality of allocation. */ | |
795 i = _heapindex; | |
796 if (i > block) | |
797 while (i > block) | |
798 i = _heapinfo[i].free.prev; | |
799 else | |
800 { | |
801 do | |
802 i = _heapinfo[i].free.next; | |
803 while (i > 0 && i < block); | |
804 i = _heapinfo[i].free.prev; | |
805 } | |
806 | |
807 /* Determine how to link this block into the free list. */ | |
808 if (block == i + _heapinfo[i].free.size) | |
809 { | |
810 /* Coalesce this block with its predecessor. */ | |
811 _heapinfo[i].free.size += _heapinfo[block].busy.info.size; | |
812 block = i; | |
813 } | |
814 else | |
815 { | |
816 /* Really link this block back into the free list. */ | |
817 _heapinfo[block].free.size = _heapinfo[block].busy.info.size; | |
818 _heapinfo[block].free.next = _heapinfo[i].free.next; | |
819 _heapinfo[block].free.prev = i; | |
820 _heapinfo[i].free.next = block; | |
821 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
822 ++_chunks_free; | |
823 } | |
824 | |
825 /* Now that the block is linked in, see if we can coalesce it | |
826 with its successor (by deleting its successor from the list | |
827 and adding in its size). */ | |
828 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) | |
829 { | |
830 _heapinfo[block].free.size | |
831 += _heapinfo[_heapinfo[block].free.next].free.size; | |
832 _heapinfo[block].free.next | |
833 = _heapinfo[_heapinfo[block].free.next].free.next; | |
834 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
835 --_chunks_free; | |
836 } | |
837 | |
838 /* Now see if we can return stuff to the system. */ | |
839 blocks = _heapinfo[block].free.size; | |
840 if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit | |
841 && (*__morecore) (0) == ADDRESS (block + blocks)) | |
842 { | |
843 __malloc_size_t bytes = blocks * BLOCKSIZE; | |
844 _heaplimit -= blocks; | |
845 (*__morecore) (-(int)bytes); | |
846 _heapinfo[_heapinfo[block].free.prev].free.next | |
847 = _heapinfo[block].free.next; | |
848 _heapinfo[_heapinfo[block].free.next].free.prev | |
849 = _heapinfo[block].free.prev; | |
850 block = _heapinfo[block].free.prev; | |
851 --_chunks_free; | |
852 _bytes_free -= bytes; | |
853 } | |
854 | |
855 /* Set the next search to begin at this block. */ | |
856 _heapindex = block; | |
857 break; | |
858 | |
859 default: | |
860 /* Do some of the statistics. */ | |
861 --_chunks_used; | |
862 _bytes_used -= 1 << type; | |
863 ++_chunks_free; | |
864 _bytes_free += 1 << type; | |
865 | |
866 /* Get the address of the first free fragment in this block. */ | |
867 prev = (struct list *) ((char *) ADDRESS (block) + | |
868 (_heapinfo[block].busy.info.frag.first << type)); | |
869 | |
870 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1) | |
871 { | |
872 /* If all fragments of this block are free, remove them | |
873 from the fragment list and free the whole block. */ | |
874 next = prev; | |
875 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i) | |
876 next = next->next; | |
877 prev->prev->next = next; | |
878 if (next != NULL) | |
879 next->prev = prev->prev; | |
880 _heapinfo[block].busy.type = 0; | |
881 _heapinfo[block].busy.info.size = 1; | |
882 | |
883 /* Keep the statistics accurate. */ | |
884 ++_chunks_used; | |
885 _bytes_used += BLOCKSIZE; | |
886 _chunks_free -= BLOCKSIZE >> type; | |
887 _bytes_free -= BLOCKSIZE; | |
888 | |
889 free (ADDRESS (block)); | |
890 } | |
891 else if (_heapinfo[block].busy.info.frag.nfree != 0) | |
892 { | |
893 /* If some fragments of this block are free, link this | |
894 fragment into the fragment list after the first free | |
895 fragment of this block. */ | |
896 next = (struct list *) ptr; | |
897 next->next = prev->next; | |
898 next->prev = prev; | |
899 prev->next = next; | |
900 if (next->next != NULL) | |
901 next->next->prev = next; | |
902 ++_heapinfo[block].busy.info.frag.nfree; | |
903 } | |
904 else | |
905 { | |
906 /* No fragments of this block are free, so link this | |
907 fragment into the fragment list and announce that | |
908 it is the first free fragment of this block. */ | |
909 prev = (struct list *) ptr; | |
910 _heapinfo[block].busy.info.frag.nfree = 1; | |
911 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
912 ((unsigned long int) ((char *) ptr - (char *) NULL) | |
913 % BLOCKSIZE >> type); | |
914 prev->next = _fraghead[type].next; | |
915 prev->prev = &_fraghead[type]; | |
916 prev->prev->next = prev; | |
917 if (prev->next != NULL) | |
918 prev->next->prev = prev; | |
919 } | |
920 break; | |
921 } | |
922 } | |
923 | |
924 /* Return memory to the heap. */ | |
925 __free_ret_t | |
926 free (__ptr_t ptr) | |
927 { | |
928 struct alignlist *l; | |
929 | |
930 if (ptr == NULL) | |
931 return; | |
932 | |
933 if (PURE_DATA(ptr)) | |
934 { | |
935 return; | |
936 } | |
937 | |
938 for (l = _aligned_blocks; l != NULL; l = l->next) | |
939 if (l->aligned == ptr) | |
940 { | |
941 l->aligned = NULL; /* Mark the slot in the list as free. */ | |
942 ptr = l->exact; | |
943 break; | |
944 } | |
945 | |
946 if (__free_hook != NULL) | |
947 (*__free_hook) (ptr); | |
948 else | |
949 _free_internal (ptr); | |
950 } | |
951 /* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc. | |
952 This file is part of the GNU C Library. | |
953 | |
954 The GNU C Library is free software; you can redistribute it and/or | |
955 modify it under the terms of the GNU Library General Public License as | |
956 published by the Free Software Foundation; either version 2 of the | |
957 License, or (at your option) any later version. | |
958 | |
959 The GNU C Library is distributed in the hope that it will be useful, | |
960 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
961 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
962 Library General Public License for more details. | |
963 | |
964 You should have received a copy of the GNU General Public License | |
965 along with this library; see the file COPYING. If not, write to | |
966 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
967 Boston, MA 02111-1307, USA. */ | |
968 | |
969 #ifndef _MALLOC_INTERNAL | |
970 #define _MALLOC_INTERNAL | |
971 #include <malloc.h> | |
972 #endif | |
973 | |
974 #ifdef _LIBC | |
975 | |
976 #include <ansidecl.h> | |
977 #include <gnu-stabs.h> | |
978 | |
979 #undef cfree | |
980 | |
981 function_alias(cfree, free, void, (ptr), | |
982 DEFUN(cfree, (ptr), PTR ptr)) | |
983 | |
984 #else | |
985 | |
986 void cfree (__ptr_t ptr); | |
987 void | |
988 cfree (__ptr_t ptr) | |
989 { | |
990 free (ptr); | |
991 } | |
992 | |
993 #endif | |
994 /* Change the size of a block allocated by `malloc'. | |
995 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc. | |
996 Written May 1989 by Mike Haertel. | |
997 | |
998 This library is free software; you can redistribute it and/or | |
999 modify it under the terms of the GNU Library General Public License as | |
1000 published by the Free Software Foundation; either version 2 of the | |
1001 License, or (at your option) any later version. | |
1002 | |
1003 This library is distributed in the hope that it will be useful, | |
1004 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1005 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1006 Library General Public License for more details. | |
1007 | |
1008 You should have received a copy of the GNU General Public License | |
1009 along with this library; see the file COPYING. If not, write to | |
1010 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
1011 Boston, MA 02111-1307, USA. | |
1012 | |
1013 The author may be reached (Email) at the address mike@ai.mit.edu, | |
1014 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
1015 | |
1016 #ifndef _MALLOC_INTERNAL | |
1017 #define _MALLOC_INTERNAL | |
1018 #include <malloc.h> | |
1019 #endif | |
1020 | |
1021 #if 0 /* FSFmacs */ | |
1022 /* XEmacs requires an ANSI compiler, and memmove() is part of the ANSI- | |
1023 mandated functions. For losing systems like SunOS 4, we provide | |
1024 our own memmove(). */ | |
1025 | |
1026 #if (defined (MEMMOVE_MISSING) || \ | |
1027 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) | |
1028 | |
1029 /* Snarfed directly from Emacs src/dispnew.c: | |
1030 XXX Should use system bcopy if it handles overlap. */ | |
1031 #ifndef emacs | |
1032 | |
1033 /* Like bcopy except never gets confused by overlap. */ | |
1034 | |
1035 static void | |
1036 safe_bcopy (char *from, char *to, int size) | |
1037 { | |
1038 if (size <= 0 || from == to) | |
1039 return; | |
1040 | |
1041 /* If the source and destination don't overlap, then bcopy can | |
1042 handle it. If they do overlap, but the destination is lower in | |
1043 memory than the source, we'll assume bcopy can handle that. */ | |
1044 if (to < from || from + size <= to) | |
1045 bcopy (from, to, size); | |
1046 | |
1047 /* Otherwise, we'll copy from the end. */ | |
1048 else | |
1049 { | |
1050 char *endf = from + size; | |
1051 char *endt = to + size; | |
1052 | |
1053 /* If TO - FROM is large, then we should break the copy into | |
1054 nonoverlapping chunks of TO - FROM bytes each. However, if | |
1055 TO - FROM is small, then the bcopy function call overhead | |
1056 makes this not worth it. The crossover point could be about | |
1057 anywhere. Since I don't think the obvious copy loop is too | |
1058 bad, I'm trying to err in its favor. */ | |
1059 if (to - from < 64) | |
1060 { | |
1061 do | |
1062 *--endt = *--endf; | |
1063 while (endf != from); | |
1064 } | |
1065 else | |
1066 { | |
1067 for (;;) | |
1068 { | |
1069 endt -= (to - from); | |
1070 endf -= (to - from); | |
1071 | |
1072 if (endt < to) | |
1073 break; | |
1074 | |
1075 bcopy (endf, endt, to - from); | |
1076 } | |
1077 | |
1078 /* If SIZE wasn't a multiple of TO - FROM, there will be a | |
1079 little left over. The amount left over is | |
1080 (endt + (to - from)) - to, which is endt - from. */ | |
1081 bcopy (from, to, endt - from); | |
1082 } | |
1083 } | |
1084 } | |
1085 #endif /* Not emacs. */ | |
1086 | |
1087 #define memmove(to, from, size) safe_bcopy ((from), (to), (size)) | |
1088 | |
1089 #endif | |
1090 | |
1091 #endif /* FSFmacs */ | |
1092 | |
1093 | |
1094 #ifndef min | |
1095 #define min(A, B) ((A) < (B) ? (A) : (B)) | |
1096 #endif | |
1097 | |
1098 /* Debugging hook for realloc. */ | |
1099 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size)); | |
1100 | |
1101 /* Resize the given region to the new size, returning a pointer | |
1102 to the (possibly moved) region. This is optimized for speed; | |
1103 some benchmarks seem to indicate that greater compactness is | |
1104 achieved by unconditionally allocating and copying to a | |
1105 new region. This module has incestuous knowledge of the | |
1106 internals of both free and malloc. */ | |
1107 __ptr_t | |
1108 realloc (__ptr_t ptr, __malloc_size_t size) | |
1109 { | |
1110 __ptr_t result; | |
1111 int type; | |
1112 __malloc_size_t block, blocks, oldlimit; | |
1113 | |
1114 if (PURE_DATA(ptr)) | |
1115 { | |
1116 result = malloc (size); | |
1117 memcpy(result, ptr, size); | |
1118 return result; | |
1119 } | |
1120 | |
1121 else if (size == 0) | |
1122 { | |
1123 free (ptr); | |
1124 return malloc (0); | |
1125 } | |
1126 else if (ptr == NULL) | |
1127 return malloc (size); | |
1128 | |
1129 if (__realloc_hook != NULL) | |
1130 return (*__realloc_hook) (ptr, size); | |
1131 | |
1132 block = BLOCK (ptr); | |
1133 | |
1134 type = _heapinfo[block].busy.type; | |
1135 switch (type) | |
1136 { | |
1137 case 0: | |
1138 /* Maybe reallocate a large block to a small fragment. */ | |
1139 if (size <= BLOCKSIZE / 2) | |
1140 { | |
1141 result = malloc (size); | |
1142 if (result != NULL) | |
1143 { | |
1144 memcpy (result, ptr, size); | |
1145 _free_internal (ptr); | |
1146 return result; | |
1147 } | |
1148 } | |
1149 | |
1150 /* The new size is a large allocation as well; | |
1151 see if we can hold it in place. */ | |
1152 blocks = BLOCKIFY (size); | |
1153 if (blocks < _heapinfo[block].busy.info.size) | |
1154 { | |
1155 /* The new size is smaller; return | |
1156 excess memory to the free list. */ | |
1157 _heapinfo[block + blocks].busy.type = 0; | |
1158 _heapinfo[block + blocks].busy.info.size | |
1159 = _heapinfo[block].busy.info.size - blocks; | |
1160 _heapinfo[block].busy.info.size = blocks; | |
1161 /* We have just created a new chunk by splitting a chunk in two. | |
1162 Now we will free this chunk; increment the statistics counter | |
1163 so it doesn't become wrong when _free_internal decrements it. */ | |
1164 ++_chunks_used; | |
1165 _free_internal (ADDRESS (block + blocks)); | |
1166 result = ptr; | |
1167 } | |
1168 else if (blocks == _heapinfo[block].busy.info.size) | |
1169 /* No size change necessary. */ | |
1170 result = ptr; | |
1171 else | |
1172 { | |
1173 /* Won't fit, so allocate a new region that will. | |
1174 Free the old region first in case there is sufficient | |
1175 adjacent free space to grow without moving. */ | |
1176 blocks = _heapinfo[block].busy.info.size; | |
1177 /* Prevent free from actually returning memory to the system. */ | |
1178 oldlimit = _heaplimit; | |
1179 _heaplimit = 0; | |
1180 free (ptr); | |
1181 _heaplimit = oldlimit; | |
1182 result = malloc (size); | |
1183 if (result == NULL) | |
1184 { | |
1185 /* Now we're really in trouble. We have to unfree | |
1186 the thing we just freed. Unfortunately it might | |
1187 have been coalesced with its neighbors. */ | |
1188 if (_heapindex == block) | |
1189 (void) malloc (blocks * BLOCKSIZE); | |
1190 else | |
1191 { | |
1192 __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE); | |
1193 (void) malloc (blocks * BLOCKSIZE); | |
1194 free (previous); | |
1195 } | |
1196 return NULL; | |
1197 } | |
1198 if (ptr != result) | |
1199 memmove (result, ptr, blocks * BLOCKSIZE); | |
1200 } | |
1201 break; | |
1202 | |
1203 default: | |
1204 /* Old size is a fragment; type is logarithm | |
1205 to base two of the fragment size. */ | |
1206 if (size > (__malloc_size_t) (1 << (type - 1)) && | |
1207 size <= (__malloc_size_t) (1 << type)) | |
1208 /* The new size is the same kind of fragment. */ | |
1209 result = ptr; | |
1210 else | |
1211 { | |
1212 /* The new size is different; allocate a new space, | |
1213 and copy the lesser of the new size and the old. */ | |
1214 result = malloc (size); | |
1215 if (result == NULL) | |
1216 return NULL; | |
1217 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); | |
1218 free (ptr); | |
1219 } | |
1220 break; | |
1221 } | |
1222 | |
1223 return result; | |
1224 } | |
1225 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. | |
1226 | |
1227 This library is free software; you can redistribute it and/or | |
1228 modify it under the terms of the GNU Library General Public License as | |
1229 published by the Free Software Foundation; either version 2 of the | |
1230 License, or (at your option) any later version. | |
1231 | |
1232 This library is distributed in the hope that it will be useful, | |
1233 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1234 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1235 Library General Public License for more details. | |
1236 | |
1237 You should have received a copy of the GNU General Public License | |
1238 along with this library; see the file COPYING. If not, write to | |
1239 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
1240 Boston, MA 02111-1307, USA. | |
1241 | |
1242 The author may be reached (Email) at the address mike@ai.mit.edu, | |
1243 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */ | |
1244 | |
1245 #ifndef _MALLOC_INTERNAL | |
1246 #define _MALLOC_INTERNAL | |
1247 #include <malloc.h> | |
1248 #endif | |
1249 | |
1250 /* Allocate an array of NMEMB elements each SIZE bytes long. | |
1251 The entire array is initialized to zeros. */ | |
1252 __ptr_t | |
1253 calloc (__malloc_size_t nmemb, __malloc_size_t size) | |
1254 { | |
1255 __ptr_t result = malloc (nmemb * size); | |
1256 | |
1257 if (result != NULL) | |
1258 (void) memset (result, 0, nmemb * size); | |
1259 | |
1260 return result; | |
1261 } | |
1262 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc. | |
1263 This file is part of the GNU C Library. | |
1264 | |
1265 The GNU C Library is free software; you can redistribute it and/or modify | |
1266 it under the terms of the GNU General Public License as published by | |
1267 the Free Software Foundation; either version 2, or (at your option) | |
1268 any later version. | |
1269 | |
1270 The GNU C Library is distributed in the hope that it will be useful, | |
1271 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1272 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
1273 GNU General Public License for more details. | |
1274 | |
1275 You should have received a copy of the GNU General Public License | |
1276 along with the GNU C Library; see the file COPYING. If not, write to | |
1277 the Free the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
1278 Boston, MA 02111-1307, USA. */ | |
1279 | |
1280 #ifndef _MALLOC_INTERNAL | |
1281 #define _MALLOC_INTERNAL | |
1282 #include <malloc.h> | |
1283 #endif | |
1284 | |
1285 /* #ifndef __GNU_LIBRARY__ */ | |
1286 #define __sbrk sbrk | |
1287 /* #endif */ | |
1288 | |
1289 #ifdef GMALLOC_NEEDS_SBRK_DECL | |
1290 /* some versions of OSF1 need this */ | |
1291 extern __ptr_t __sbrk __P ((ssize_t increment)); | |
1292 #else | |
1293 #ifdef __GNU_LIBRARY__ | |
1294 /* It is best not to declare this and cast its result on foreign operating | |
1295 systems with potentially hostile include files. */ | |
1296 #if !(defined(linux) && defined(sparc)) | |
1297 extern __ptr_t __sbrk __P ((int increment)); | |
1298 #endif | |
1299 #endif | |
1300 #endif | |
1301 | |
1302 #ifndef NULL | |
1303 #define NULL 0 | |
1304 #endif | |
1305 | |
1306 /* Allocate INCREMENT more bytes of data space, | |
1307 and return the start of data space, or NULL on errors. | |
1308 If INCREMENT is negative, shrink data space. */ | |
1309 __ptr_t | |
1310 __default_morecore ( | |
1311 #ifdef __STDC__ | |
1312 ptrdiff_t increment | |
1313 #else | |
1314 #ifdef OSF1 | |
1315 long increment | |
1316 #else | |
1317 int increment | |
1318 #endif | |
1319 #endif | |
1320 ) | |
1321 { | |
1322 #ifdef OSF1 | |
1323 __ptr_t result = (__ptr_t) __sbrk ((ssize_t) increment); | |
1324 #else | |
1325 __ptr_t result = (__ptr_t) __sbrk ((int) increment); | |
1326 #endif | |
1327 if (result == (__ptr_t) -1) | |
1328 return NULL; | |
1329 return result; | |
1330 } | |
1331 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc. | |
1332 | |
1333 This library is free software; you can redistribute it and/or | |
1334 modify it under the terms of the GNU Library General Public License as | |
1335 published by the Free Software Foundation; either version 2 of the | |
1336 License, or (at your option) any later version. | |
1337 | |
1338 This library is distributed in the hope that it will be useful, | |
1339 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1340 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1341 Library General Public License for more details. | |
1342 | |
1343 You should have received a copy of the GNU General Public License | |
1344 along with this library; see the file COPYING. If not, write to | |
1345 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
1346 Boston, MA 02111-1307, USA. */ | |
1347 | |
1348 #ifndef _MALLOC_INTERNAL | |
1349 #define _MALLOC_INTERNAL | |
1350 #include <malloc.h> | |
1351 #endif | |
1352 | |
1353 __ptr_t | |
1354 memalign (__malloc_size_t alignment, __malloc_size_t size) | |
1355 { | |
1356 __ptr_t result; | |
1357 unsigned long int adj; | |
1358 | |
1359 size = ((size + alignment - 1) / alignment) * alignment; | |
1360 | |
1361 result = malloc (size); | |
1362 if (result == NULL) | |
1363 return NULL; | |
1364 adj = (unsigned long int) ((unsigned long int) ((char *) result - | |
1365 (char *) NULL)) % alignment; | |
1366 if (adj != 0) | |
1367 { | |
1368 struct alignlist *l; | |
1369 for (l = _aligned_blocks; l != NULL; l = l->next) | |
1370 if (l->aligned == NULL) | |
1371 /* This slot is free. Use it. */ | |
1372 break; | |
1373 if (l == NULL) | |
1374 { | |
1375 l = (struct alignlist *) malloc (sizeof (struct alignlist)); | |
1376 if (l == NULL) | |
1377 { | |
1378 free (result); | |
1379 return NULL; | |
1380 } | |
1381 l->next = _aligned_blocks; | |
1382 _aligned_blocks = l; | |
1383 } | |
1384 l->exact = result; | |
1385 result = l->aligned = (char *) result + alignment - adj; | |
1386 } | |
1387 | |
1388 return result; | |
1389 } |