0
|
1 /* Synched up with: Not synched up with FSF 19.28, even though I
|
|
2 thought I did so. */
|
|
3
|
|
4 /* Get the configuration files if we're being compiled for Emacs. */
|
|
5 #ifdef emacs
|
|
6 # include <config.h>
|
|
7 # include "getpagesize.h"
|
|
8 # ifndef HAVE_CONFIG_H
|
|
9 # define HAVE_CONFIG_H
|
|
10 # endif
|
|
11 #endif
|
|
12
|
|
13 #if defined (__STDC__) && !defined (STDC_HEADERS)
|
|
14 /* The ANSI standard says that defining __STDC__ to a non-zero value means
|
|
15 that the compiler conforms to that standard. The standard requires
|
|
16 certain header files and library functions to be present. Therefore,
|
|
17 if your compiler defines __STDC__ to non-0 but does not have ANSI headers
|
|
18 and the ANSI library routines, then your compiler is buggy. Conversely,
|
|
19 an ANSI-conforming environment (which has both the ANSI headers and
|
|
20 library routines, i.e., stdlib.h and `memmove') does not necessarily
|
|
21 define the STDC_HEADERS flag. Lucid Emacs requires an ANSI compiler.
|
|
22 Therefore, there is no need to consult the abominable STDC_HEADERS flag.
|
|
23 -- jwz
|
|
24 */
|
|
25 # define STDC_HEADERS
|
|
26 #endif
|
|
27
|
|
28 #define __const const
|
|
29
|
|
30
|
|
31 /* DO NOT EDIT THIS FILE -- it is automagically generated. -*- C -*- */
|
|
32 /* Bwaa-haa-haa! Not a chance that this is actually true! */
|
|
33
|
|
34 #define _MALLOC_INTERNAL
|
|
35
|
|
36 /* The malloc headers and source files from the C library follow here. */
|
|
37
|
|
38 /* Declarations for `malloc' and friends.
|
|
39 Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
|
|
40 Written May 1989 by Mike Haertel.
|
|
41
|
|
42 This library is free software; you can redistribute it and/or
|
|
43 modify it under the terms of the GNU Library General Public License as
|
|
44 published by the Free Software Foundation; either version 2 of the
|
|
45 License, or (at your option) any later version.
|
|
46
|
|
47 This library is distributed in the hope that it will be useful,
|
|
48 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
49 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
50 Library General Public License for more details.
|
|
51
|
|
52 You should have received a copy of the GNU General Public License
|
|
53 along with this library; see the file COPYING. If not, write to
|
|
54 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
55 Boston, MA 02111-1307, USA.
|
|
56
|
|
57 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
58 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
59
|
|
60 #ifndef _MALLOC_H
|
|
61
|
|
62 #define _MALLOC_H 1
|
|
63
|
|
64 #ifdef _MALLOC_INTERNAL
|
|
65
|
|
66 #ifdef HAVE_CONFIG_H
|
|
67 #include <config.h>
|
|
68 #endif
|
|
69
|
|
70 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
|
|
71 #include <string.h>
|
|
72 #else
|
|
73 #ifndef memset
|
|
74 #define memset(s, zero, n) bzero ((s), (n))
|
|
75 #endif
|
|
76 #ifndef memcpy
|
|
77 #define memcpy(d, s, n) bcopy ((s), (d), (n))
|
|
78 #endif
|
|
79 #endif
|
|
80
|
|
81 #if defined(__GNU_LIBRARY__) || defined(__STDC__)
|
|
82 #include <limits.h>
|
|
83 #else
|
|
84 #define CHAR_BIT 8
|
|
85 #endif
|
|
86
|
|
87 #ifdef HAVE_UNISTD_H
|
|
88 #include <unistd.h>
|
|
89 #endif
|
|
90
|
|
91 #endif /* _MALLOC_INTERNAL. */
|
|
92
|
|
93
|
|
94 #ifdef __cplusplus
|
|
95 extern "C"
|
|
96 {
|
|
97 #endif
|
|
98
|
|
99 /* #### XEmacs change for Solaris */
|
|
100 #if defined (__cplusplus) || (defined (__STDC__) && __STDC__) || \
|
100
|
101 (defined (__STDC__) && defined (SOLARIS2)) || defined (WINDOWSNT)
|
0
|
102 #undef __P
|
|
103 #define __P(args) args
|
|
104 #undef __ptr_t
|
|
105 #define __ptr_t void *
|
|
106 #else /* Not C++ or ANSI C. */
|
|
107 #undef __P
|
|
108 #define __P(args) ()
|
|
109 #undef const
|
|
110 #define const
|
|
111 #undef __ptr_t
|
|
112 #define __ptr_t char *
|
|
113 #endif /* C++ or ANSI C. */
|
|
114
|
|
115 #ifdef __STDC__
|
|
116 #include <stddef.h>
|
|
117 #define __malloc_size_t size_t
|
|
118 #else
|
|
119 #ifdef OSF1 /* from grunwald@foobar.cs.colorado.edu */
|
|
120 #define __malloc_size_t unsigned long
|
|
121 #else
|
|
122 #define __malloc_size_t unsigned int
|
|
123 #endif
|
|
124 #endif
|
|
125
|
|
126 #ifndef NULL
|
|
127 #define NULL 0
|
|
128 #endif
|
|
129
|
|
130 /* XEmacs: I thought this should be int under SunOS, but that
|
|
131 apparently fails. Curses on all this shit. */
|
|
132 #define __free_ret_t void
|
|
133
|
|
134 /* XEmacs: I tried commenting these out and including stdlib.h,
|
|
135 but that fails badly. Urk! This sucks. */
|
|
136 /* Allocate SIZE bytes of memory. */
|
|
137 extern __ptr_t malloc __P ((size_t __size));
|
|
138 /* Re-allocate the previously allocated block
|
|
139 in __ptr_t, making the new block SIZE bytes long. */
|
|
140 extern __ptr_t realloc __P ((__ptr_t __ptr, size_t __size));
|
|
141 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
|
|
142 extern __ptr_t calloc __P ((size_t __nmemb, size_t __size));
|
|
143 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
|
|
144 extern __free_ret_t free __P ((__ptr_t __ptr));
|
|
145
|
|
146 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
|
|
147 extern __ptr_t memalign __P ((size_t __alignment, size_t __size));
|
|
148
|
|
149 /* Allocate SIZE bytes on a page boundary. */
|
|
150 extern __ptr_t valloc __P ((size_t __size));
|
|
151
|
|
152
|
|
153 #ifdef _MALLOC_INTERNAL
|
|
154
|
|
155 /* The allocator divides the heap into blocks of fixed size; large
|
|
156 requests receive one or more whole blocks, and small requests
|
|
157 receive a fragment of a block. Fragment sizes are powers of two,
|
|
158 and all fragments of a block are the same size. When all the
|
|
159 fragments in a block have been freed, the block itself is freed. */
|
|
160 #define INT_BIT (CHAR_BIT * sizeof(int))
|
|
161 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
|
|
162 #define BLOCKSIZE (1 << BLOCKLOG)
|
|
163 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
|
|
164
|
|
165 /* Determine the amount of memory spanned by the initial heap table
|
|
166 (not an absolute limit). */
|
|
167 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
|
|
168
|
|
169 /* Number of contiguous free blocks allowed to build up at the end of
|
|
170 memory before they will be returned to the system. */
|
|
171 #define FINAL_FREE_BLOCKS 8
|
|
172
|
|
173 /* Data structure giving per-block information. */
|
|
174 typedef union
|
|
175 {
|
|
176 /* Heap information for a busy block. */
|
|
177 struct
|
|
178 {
|
|
179 /* Zero for a large block, or positive giving the
|
|
180 logarithm to the base two of the fragment size. */
|
|
181 int type;
|
|
182 union
|
|
183 {
|
|
184 struct
|
|
185 {
|
|
186 __malloc_size_t nfree; /* Free frags in a fragmented block. */
|
|
187 __malloc_size_t first; /* First free fragment of the block. */
|
|
188 } frag;
|
|
189 /* Size (in blocks) of a large cluster. */
|
|
190 __malloc_size_t size;
|
|
191 } info;
|
|
192 } busy;
|
|
193 /* Heap information for a free block
|
|
194 (that may be the first of a free cluster). */
|
|
195 struct
|
|
196 {
|
|
197 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
|
|
198 __malloc_size_t next; /* Index of next free cluster. */
|
|
199 __malloc_size_t prev; /* Index of previous free cluster. */
|
|
200 } free;
|
|
201 } malloc_info;
|
|
202
|
|
203 /* Pointer to first block of the heap. */
|
|
204 extern char *_heapbase;
|
|
205
|
|
206 /* Table indexed by block number giving per-block information. */
|
|
207 extern malloc_info *_heapinfo;
|
|
208
|
|
209 /* Address to block number and vice versa. */
|
|
210 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
|
|
211 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
|
|
212
|
|
213 /* Current search index for the heap table. */
|
|
214 extern __malloc_size_t _heapindex;
|
|
215
|
|
216 /* Limit of valid info table indices. */
|
|
217 extern __malloc_size_t _heaplimit;
|
|
218
|
|
219 /* Doubly linked lists of free fragments. */
|
|
220 struct list
|
185
|
221 {
|
|
222 struct list *next;
|
|
223 struct list *prev;
|
|
224 };
|
0
|
225
|
|
226 /* Free list headers for each fragment size. */
|
|
227 extern struct list _fraghead[];
|
|
228
|
|
229 /* List of blocks allocated with `memalign' (or `valloc'). */
|
|
230 struct alignlist
|
185
|
231 {
|
|
232 struct alignlist *next;
|
|
233 __ptr_t aligned; /* The address that memaligned returned. */
|
|
234 __ptr_t exact; /* The address that malloc returned. */
|
|
235 };
|
0
|
236 extern struct alignlist *_aligned_blocks;
|
|
237
|
|
238 /* Instrumentation. */
|
|
239 extern __malloc_size_t _chunks_used;
|
|
240 extern __malloc_size_t _bytes_used;
|
|
241 extern __malloc_size_t _chunks_free;
|
|
242 extern __malloc_size_t _bytes_free;
|
|
243
|
|
244 /* Internal version of `free' used in `morecore' (malloc.c). */
|
|
245 extern void _free_internal __P ((__ptr_t __ptr));
|
|
246
|
|
247 #endif /* _MALLOC_INTERNAL. */
|
|
248
|
|
249 /* Underlying allocation function; successive calls should
|
|
250 return contiguous pieces of memory. */
|
|
251 extern __ptr_t (*__morecore) __P ((ptrdiff_t __size));
|
|
252
|
|
253 /* Default value of `__morecore'. */
|
|
254 extern __ptr_t __default_morecore __P ((ptrdiff_t __size));
|
|
255
|
|
256 /* If not NULL, this function is called after each time
|
|
257 `__morecore' is called to increase the data size. */
|
|
258 extern void (*__after_morecore_hook) __P ((void));
|
|
259
|
|
260 /* Nonzero if `malloc' has been called and done its initialization. */
|
88
|
261 /* extern int __malloc_initialized; */
|
0
|
262
|
|
263 /* Hooks for debugging versions. */
|
|
264 extern void (*__free_hook) __P ((__ptr_t __ptr));
|
|
265 extern __ptr_t (*__malloc_hook) __P ((size_t __size));
|
|
266 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size));
|
|
267
|
|
268 /* Return values for `mprobe': these are the kinds of inconsistencies that
|
|
269 `mcheck' enables detection of. */
|
|
270 enum mcheck_status
|
185
|
271 {
|
|
272 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
|
|
273 MCHECK_OK, /* Block is fine. */
|
|
274 MCHECK_FREE, /* Block freed twice. */
|
|
275 MCHECK_HEAD, /* Memory before the block was clobbered. */
|
|
276 MCHECK_TAIL /* Memory after the block was clobbered. */
|
|
277 };
|
0
|
278
|
|
279 /* Activate a standard collection of debugging hooks. This must be called
|
|
280 before `malloc' is ever called. ABORTFUNC is called with an error code
|
|
281 (see enum above) when an inconsistency is detected. If ABORTFUNC is
|
|
282 null, the standard function prints on stderr and then calls `abort'. */
|
|
283 extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
|
|
284
|
|
285 /* Check for aberrations in a particular malloc'd block. You must have
|
|
286 called `mcheck' already. These are the same checks that `mcheck' does
|
|
287 when you free or reallocate a block. */
|
|
288 extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
|
|
289
|
|
290 /* Activate a standard collection of tracing hooks. */
|
|
291 extern void mtrace __P ((void));
|
|
292 extern void muntrace __P ((void));
|
|
293
|
|
294 /* Statistics available to the user. */
|
|
295 struct mstats
|
185
|
296 {
|
|
297 __malloc_size_t bytes_total; /* Total size of the heap. */
|
|
298 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
|
|
299 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
|
|
300 __malloc_size_t chunks_free; /* Chunks in the free list. */
|
|
301 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
|
|
302 };
|
0
|
303
|
|
304 /* Pick up the current statistics. */
|
|
305 extern struct mstats mstats __P ((void));
|
|
306
|
|
307 /* Call WARNFUN with a warning message when memory usage is high. */
|
|
308 extern void memory_warnings __P ((__ptr_t __start,
|
|
309 void (*__warnfun) __P ((__const char *))));
|
|
310
|
|
311
|
|
312 #if 0 /* unused in this file, and conflicting prototypes anyway */
|
|
313 /* Relocating allocator. */
|
|
314
|
|
315 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
|
|
316 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, size_t __size));
|
|
317
|
|
318 /* Free the storage allocated in HANDLEPTR. */
|
|
319 extern void r_alloc_free __P ((__ptr_t *__handleptr));
|
|
320
|
|
321 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
|
|
322 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, size_t __size));
|
|
323 #endif /* 0 */
|
|
324
|
|
325 #ifdef __cplusplus
|
|
326 }
|
|
327 #endif
|
|
328
|
|
329 #endif /* malloc.h */
|
|
330 /* Allocate memory on a page boundary.
|
|
331 Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
|
|
332
|
|
333 This library is free software; you can redistribute it and/or
|
|
334 modify it under the terms of the GNU Library General Public License as
|
|
335 published by the Free Software Foundation; either version 2 of the
|
|
336 License, or (at your option) any later version.
|
|
337
|
|
338 This library is distributed in the hope that it will be useful,
|
|
339 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
340 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
341 Library General Public License for more details.
|
|
342
|
|
343 You should have received a copy of the GNU General Public License
|
|
344 along with this library; see the file COPYING. If not, write to
|
|
345 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
346 Boston, MA 02111-1307, USA.
|
|
347
|
|
348 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
349 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
350
|
|
351 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
|
|
352 #include <stddef.h>
|
|
353 #include <sys/cdefs.h>
|
100
|
354 #if ! (defined (__GLIBC__) && (__GLIBC__ >= 2))
|
0
|
355 extern size_t __getpagesize __P ((void));
|
100
|
356 #endif
|
0
|
357 #else
|
|
358 #include "getpagesize.h"
|
|
359 #define __getpagesize() getpagesize()
|
|
360 #endif
|
|
361
|
|
362 #ifndef _MALLOC_INTERNAL
|
|
363 #define _MALLOC_INTERNAL
|
|
364 #include <malloc.h>
|
|
365 #endif
|
|
366
|
|
367 static __malloc_size_t pagesize;
|
|
368
|
|
369 __ptr_t
|
185
|
370 valloc (__malloc_size_t size)
|
0
|
371 {
|
|
372 if (pagesize == 0)
|
|
373 pagesize = __getpagesize ();
|
|
374
|
|
375 return memalign (pagesize, size);
|
|
376 }
|
|
377 /* Memory allocator `malloc'.
|
|
378 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation
|
|
379 Written May 1989 by Mike Haertel.
|
|
380
|
|
381 This library is free software; you can redistribute it and/or
|
|
382 modify it under the terms of the GNU Library General Public License as
|
|
383 published by the Free Software Foundation; either version 2 of the
|
|
384 License, or (at your option) any later version.
|
|
385
|
|
386 This library is distributed in the hope that it will be useful,
|
|
387 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
388 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
389 Library General Public License for more details.
|
|
390
|
|
391 You should have received a copy of the GNU General Public License
|
|
392 along with this library; see the file COPYING. If not, write to
|
|
393 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
394 Boston, MA 02111-1307, USA.
|
|
395
|
|
396 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
397 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
398
|
|
399 #ifndef _MALLOC_INTERNAL
|
|
400 #define _MALLOC_INTERNAL
|
|
401 #include <malloc.h>
|
|
402 #endif
|
|
403
|
|
404 /* How to really get more memory. */
|
251
|
405 #ifdef HEAP_IN_DATA
|
|
406 /* once dumped, free() & realloc() on static heap space will fail */
|
|
407 #define PURE_DATA(x) \
|
|
408 ((static_heap_dumped && (char*)x >= static_heap_base \
|
|
409 && (char*)x <= (static_heap_base + static_heap_size) ) ? 1 : 0)
|
|
410 extern int initialized;
|
|
411 extern int purify_flag;
|
|
412 extern char* static_heap_base;
|
|
413 extern char* static_heap_ptr;
|
|
414 extern char* static_heap_dumped;
|
|
415 extern unsigned long static_heap_size;
|
|
416 extern __ptr_t more_static_core __P ((ptrdiff_t __size));
|
|
417 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = more_static_core;
|
|
418 #else
|
0
|
419 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
|
251
|
420 #define PURE_DATA(x) 0
|
|
421 #endif
|
0
|
422
|
|
423 /* Debugging hook for `malloc'. */
|
|
424 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
|
|
425
|
|
426 /* Pointer to the base of the first block. */
|
|
427 char *_heapbase;
|
|
428
|
|
429 /* Block information table. Allocated with align/__free (not malloc/free). */
|
|
430 malloc_info *_heapinfo;
|
|
431
|
|
432 /* Number of info entries. */
|
|
433 static __malloc_size_t heapsize;
|
|
434
|
|
435 /* Search index in the info table. */
|
|
436 __malloc_size_t _heapindex;
|
|
437
|
|
438 /* Limit of valid info table indices. */
|
|
439 __malloc_size_t _heaplimit;
|
|
440
|
|
441 /* Free lists for each fragment size. */
|
|
442 struct list _fraghead[BLOCKLOG];
|
|
443
|
|
444 /* Instrumentation. */
|
|
445 __malloc_size_t _chunks_used;
|
|
446 __malloc_size_t _bytes_used;
|
|
447 __malloc_size_t _chunks_free;
|
|
448 __malloc_size_t _bytes_free;
|
|
449
|
|
450 /* Are you experienced? */
|
245
|
451 int __malloc_initialized;
|
0
|
452
|
|
453 void (*__after_morecore_hook) __P ((void));
|
|
454
|
|
455 /* Aligned allocation. */
|
|
456 static __ptr_t align __P ((__malloc_size_t));
|
|
457 static __ptr_t
|
185
|
458 align (__malloc_size_t size)
|
0
|
459 {
|
|
460 __ptr_t result;
|
|
461 unsigned long int adj;
|
|
462
|
|
463 result = (*__morecore) (size);
|
|
464 adj = (unsigned long int) ((unsigned long int) ((char *) result -
|
|
465 (char *) NULL)) % BLOCKSIZE;
|
|
466 if (adj != 0)
|
|
467 {
|
|
468 adj = BLOCKSIZE - adj;
|
|
469 (void) (*__morecore) (adj);
|
|
470 result = (char *) result + adj;
|
|
471 }
|
|
472
|
|
473 if (__after_morecore_hook)
|
|
474 (*__after_morecore_hook) ();
|
|
475
|
|
476 return result;
|
|
477 }
|
|
478
|
|
479 /* Set everything up and remember that we have. */
|
|
480 static int initialize __P ((void));
|
|
481 static int
|
|
482 initialize ()
|
|
483 {
|
251
|
484 #ifdef HEAP_IN_DATA
|
|
485 if (static_heap_dumped && __morecore == more_static_core)
|
|
486 {
|
|
487 __morecore = __default_morecore;
|
|
488 }
|
|
489 #endif
|
0
|
490 heapsize = HEAP / BLOCKSIZE;
|
|
491 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
|
|
492 if (_heapinfo == NULL)
|
|
493 return 0;
|
|
494 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
|
251
|
495 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
|
0
|
496 _heapinfo[0].free.size = 0;
|
|
497 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
|
|
498 _heapindex = 0;
|
251
|
499 _heaplimit = 0;
|
0
|
500 _heapbase = (char *) _heapinfo;
|
|
501
|
|
502 /* Account for the _heapinfo block itself in the statistics. */
|
|
503 _bytes_used = heapsize * sizeof (malloc_info);
|
|
504 _chunks_used = 1;
|
251
|
505 _chunks_free=0;
|
|
506 _bytes_free=0;
|
|
507 _aligned_blocks=0;
|
0
|
508
|
|
509 __malloc_initialized = 1;
|
|
510 return 1;
|
|
511 }
|
|
512
|
|
513 /* Get neatly aligned memory, initializing or
|
|
514 growing the heap info table as necessary. */
|
|
515 static __ptr_t morecore __P ((__malloc_size_t));
|
|
516 static __ptr_t
|
185
|
517 morecore (__malloc_size_t size)
|
0
|
518 {
|
|
519 __ptr_t result;
|
|
520 malloc_info *newinfo, *oldinfo;
|
|
521 __malloc_size_t newsize;
|
|
522
|
|
523 result = align (size);
|
|
524 if (result == NULL)
|
|
525 return NULL;
|
|
526
|
|
527 /* Check if we need to grow the info table. */
|
|
528 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
|
|
529 {
|
|
530 newsize = heapsize;
|
|
531 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
|
|
532 newsize *= 2;
|
|
533 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
|
|
534 if (newinfo == NULL)
|
|
535 {
|
|
536 (*__morecore) (-size);
|
|
537 return NULL;
|
|
538 }
|
|
539 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
|
|
540 memset (&newinfo[heapsize], 0,
|
|
541 (newsize - heapsize) * sizeof (malloc_info));
|
|
542 oldinfo = _heapinfo;
|
|
543 newinfo[BLOCK (oldinfo)].busy.type = 0;
|
|
544 newinfo[BLOCK (oldinfo)].busy.info.size
|
|
545 = BLOCKIFY (heapsize * sizeof (malloc_info));
|
|
546 _heapinfo = newinfo;
|
|
547 /* Account for the _heapinfo block itself in the statistics. */
|
|
548 _bytes_used += newsize * sizeof (malloc_info);
|
|
549 ++_chunks_used;
|
|
550 _free_internal (oldinfo);
|
|
551 heapsize = newsize;
|
|
552 }
|
|
553
|
|
554 _heaplimit = BLOCK ((char *) result + size);
|
|
555 return result;
|
|
556 }
|
|
557
|
|
558 /* Allocate memory from the heap. */
|
|
559 __ptr_t
|
185
|
560 malloc (__malloc_size_t size)
|
0
|
561 {
|
|
562 __ptr_t result;
|
|
563 __malloc_size_t block, blocks, lastblocks, start;
|
|
564 __malloc_size_t i;
|
|
565 struct list *next;
|
|
566
|
|
567 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
|
|
568 valid address you can realloc and free (though not dereference).
|
|
569
|
|
570 It turns out that some extant code (sunrpc, at least Ultrix's version)
|
|
571 expects `malloc (0)' to return non-NULL and breaks otherwise.
|
|
572 Be compatible. */
|
|
573
|
|
574 #ifdef HAVE_X_WINDOWS
|
|
575 /* there is at least one Xt bug where calloc(n,x) is blindly called
|
|
576 where n can be 0, and yet if 0 is returned, Xt barfs */
|
|
577 if (size == 0)
|
|
578 size = sizeof (struct list);
|
|
579 #else
|
|
580 if (size == 0)
|
|
581 return NULL;
|
|
582 #endif
|
|
583
|
88
|
584 if (__malloc_hook != NULL)
|
|
585 return (*__malloc_hook) (size);
|
|
586
|
0
|
587 if (!__malloc_initialized)
|
|
588 if (!initialize ())
|
|
589 return NULL;
|
|
590
|
|
591 #ifdef SUNOS_LOCALTIME_BUG
|
|
592 /* Workaround for localtime() allocating 8 bytes and writing 9 bug... */
|
|
593 if (size < 16)
|
|
594 size = 16;
|
|
595 #endif
|
|
596
|
|
597 if (size < sizeof (struct list))
|
|
598 size = sizeof (struct list);
|
|
599
|
|
600 /* Determine the allocation policy based on the request size. */
|
|
601 if (size <= BLOCKSIZE / 2)
|
|
602 {
|
|
603 /* Small allocation to receive a fragment of a block.
|
|
604 Determine the logarithm to base two of the fragment size. */
|
|
605 __malloc_size_t log = 1;
|
|
606 --size;
|
|
607 while ((size /= 2) != 0)
|
|
608 ++log;
|
|
609
|
|
610 /* Look in the fragment lists for a
|
|
611 free fragment of the desired size. */
|
|
612 next = _fraghead[log].next;
|
|
613 if (next != NULL)
|
|
614 {
|
|
615 /* There are free fragments of this size.
|
|
616 Pop a fragment out of the fragment list and return it.
|
|
617 Update the block's nfree and first counters. */
|
|
618 result = (__ptr_t) next;
|
|
619 next->prev->next = next->next;
|
|
620 if (next->next != NULL)
|
|
621 next->next->prev = next->prev;
|
|
622 block = BLOCK (result);
|
|
623 if (--_heapinfo[block].busy.info.frag.nfree != 0)
|
|
624 _heapinfo[block].busy.info.frag.first = (unsigned long int)
|
|
625 ((unsigned long int) ((char *) next->next - (char *) NULL)
|
|
626 % BLOCKSIZE) >> log;
|
|
627
|
|
628 /* Update the statistics. */
|
|
629 ++_chunks_used;
|
|
630 _bytes_used += 1 << log;
|
|
631 --_chunks_free;
|
|
632 _bytes_free -= 1 << log;
|
|
633 }
|
|
634 else
|
|
635 {
|
|
636 /* No free fragments of the desired size, so get a new block
|
|
637 and break it into fragments, returning the first. */
|
|
638 result = malloc (BLOCKSIZE);
|
|
639 if (result == NULL)
|
|
640 return NULL;
|
|
641
|
|
642 /* Link all fragments but the first into the free list. */
|
|
643 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
|
|
644 {
|
|
645 next = (struct list *) ((char *) result + (i << log));
|
|
646 next->next = _fraghead[log].next;
|
|
647 next->prev = &_fraghead[log];
|
|
648 next->prev->next = next;
|
|
649 if (next->next != NULL)
|
|
650 next->next->prev = next;
|
|
651 }
|
|
652
|
|
653 /* Initialize the nfree and first counters for this block. */
|
|
654 block = BLOCK (result);
|
|
655 _heapinfo[block].busy.type = log;
|
|
656 _heapinfo[block].busy.info.frag.nfree = i - 1;
|
|
657 _heapinfo[block].busy.info.frag.first = i - 1;
|
|
658
|
|
659 _chunks_free += (BLOCKSIZE >> log) - 1;
|
|
660 _bytes_free += BLOCKSIZE - (1 << log);
|
|
661 _bytes_used -= BLOCKSIZE - (1 << log);
|
|
662 }
|
|
663 }
|
|
664 else
|
|
665 {
|
|
666 /* Large allocation to receive one or more blocks.
|
|
667 Search the free list in a circle starting at the last place visited.
|
|
668 If we loop completely around without finding a large enough
|
|
669 space we will have to get more memory from the system. */
|
|
670 blocks = BLOCKIFY (size);
|
|
671 start = block = _heapindex;
|
|
672 while (_heapinfo[block].free.size < blocks)
|
|
673 {
|
|
674 block = _heapinfo[block].free.next;
|
|
675 if (block == start)
|
|
676 {
|
|
677 /* Need to get more from the system. Check to see if
|
|
678 the new core will be contiguous with the final free
|
|
679 block; if so we don't need to get as much. */
|
|
680 block = _heapinfo[0].free.prev;
|
|
681 lastblocks = _heapinfo[block].free.size;
|
|
682 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
|
|
683 (*__morecore) (0) == ADDRESS (block + lastblocks) &&
|
|
684 (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
|
|
685 {
|
|
686 /* Which block we are extending (the `final free
|
|
687 block' referred to above) might have changed, if
|
|
688 it got combined with a freed info table. */
|
|
689 block = _heapinfo[0].free.prev;
|
|
690 _heapinfo[block].free.size += (blocks - lastblocks);
|
|
691 _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
|
|
692 continue;
|
|
693 }
|
|
694 result = morecore (blocks * BLOCKSIZE);
|
|
695 if (result == NULL)
|
|
696 return NULL;
|
|
697 block = BLOCK (result);
|
|
698 _heapinfo[block].busy.type = 0;
|
|
699 _heapinfo[block].busy.info.size = blocks;
|
|
700 ++_chunks_used;
|
|
701 _bytes_used += blocks * BLOCKSIZE;
|
|
702 return result;
|
|
703 }
|
|
704 }
|
|
705
|
|
706 /* At this point we have found a suitable free list entry.
|
|
707 Figure out how to remove what we need from the list. */
|
|
708 result = ADDRESS (block);
|
|
709 if (_heapinfo[block].free.size > blocks)
|
|
710 {
|
|
711 /* The block we found has a bit left over,
|
|
712 so relink the tail end back into the free list. */
|
|
713 _heapinfo[block + blocks].free.size
|
|
714 = _heapinfo[block].free.size - blocks;
|
|
715 _heapinfo[block + blocks].free.next
|
|
716 = _heapinfo[block].free.next;
|
|
717 _heapinfo[block + blocks].free.prev
|
|
718 = _heapinfo[block].free.prev;
|
|
719 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
720 = _heapinfo[_heapinfo[block].free.next].free.prev
|
|
721 = _heapindex = block + blocks;
|
|
722 }
|
|
723 else
|
|
724 {
|
|
725 /* The block exactly matches our requirements,
|
|
726 so just remove it from the list. */
|
|
727 _heapinfo[_heapinfo[block].free.next].free.prev
|
|
728 = _heapinfo[block].free.prev;
|
|
729 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
730 = _heapindex = _heapinfo[block].free.next;
|
|
731 --_chunks_free;
|
|
732 }
|
|
733
|
|
734 _heapinfo[block].busy.type = 0;
|
|
735 _heapinfo[block].busy.info.size = blocks;
|
|
736 ++_chunks_used;
|
|
737 _bytes_used += blocks * BLOCKSIZE;
|
|
738 _bytes_free -= blocks * BLOCKSIZE;
|
|
739 }
|
|
740
|
|
741 return result;
|
|
742 }
|
|
743
|
|
744 #ifndef _LIBC
|
|
745
|
|
746 /* On some ANSI C systems, some libc functions call _malloc, _free
|
|
747 and _realloc. Make them use the GNU functions. */
|
|
748
|
|
749 __ptr_t _malloc (__malloc_size_t size);
|
|
750 __ptr_t
|
|
751 _malloc (__malloc_size_t size)
|
|
752 {
|
|
753 return malloc (size);
|
|
754 }
|
|
755
|
|
756 void _free (__ptr_t ptr);
|
|
757 void
|
|
758 _free (__ptr_t ptr)
|
|
759 {
|
|
760 free (ptr);
|
|
761 }
|
|
762
|
|
763 __ptr_t _realloc (__ptr_t ptr, __malloc_size_t size);
|
|
764 __ptr_t
|
|
765 _realloc (__ptr_t ptr, __malloc_size_t size)
|
|
766 {
|
|
767 return realloc (ptr, size);
|
|
768 }
|
|
769
|
|
770 #endif
|
|
771 /* Free a block of memory allocated by `malloc'.
|
|
772 Copyright 1990, 1991, 1992, 1994 Free Software Foundation
|
|
773 Written May 1989 by Mike Haertel.
|
|
774
|
|
775 This library is free software; you can redistribute it and/or
|
|
776 modify it under the terms of the GNU Library General Public License as
|
|
777 published by the Free Software Foundation; either version 2 of the
|
|
778 License, or (at your option) any later version.
|
|
779
|
|
780 This library is distributed in the hope that it will be useful,
|
|
781 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
782 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
783 Library General Public License for more details.
|
|
784
|
|
785 You should have received a copy of the GNU General Public License
|
|
786 along with this library; see the file COPYING. If not, write to
|
|
787 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
788 Boston, MA 02111-1307, USA.
|
|
789
|
|
790 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
791 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
792
|
|
793 #ifndef _MALLOC_INTERNAL
|
|
794 #define _MALLOC_INTERNAL
|
|
795 #include <malloc.h>
|
|
796 #endif
|
|
797
|
|
798 /* Debugging hook for free. */
|
|
799 void (*__free_hook) __P ((__ptr_t __ptr));
|
|
800
|
|
801 /* List of blocks allocated by memalign. */
|
|
802 struct alignlist *_aligned_blocks = NULL;
|
|
803
|
|
804 /* Return memory to the heap.
|
|
805 Like `free' but don't call a __free_hook if there is one. */
|
|
806 void
|
185
|
807 _free_internal (__ptr_t ptr)
|
0
|
808 {
|
|
809 int type;
|
|
810 __malloc_size_t block, blocks;
|
|
811 __malloc_size_t i;
|
|
812 struct list *prev, *next;
|
|
813
|
|
814 block = BLOCK (ptr);
|
|
815
|
|
816 type = _heapinfo[block].busy.type;
|
|
817 switch (type)
|
|
818 {
|
|
819 case 0:
|
|
820 /* Get as many statistics as early as we can. */
|
|
821 --_chunks_used;
|
|
822 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
|
|
823 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
|
|
824
|
|
825 /* Find the free cluster previous to this one in the free list.
|
|
826 Start searching at the last block referenced; this may benefit
|
|
827 programs with locality of allocation. */
|
|
828 i = _heapindex;
|
|
829 if (i > block)
|
|
830 while (i > block)
|
|
831 i = _heapinfo[i].free.prev;
|
|
832 else
|
|
833 {
|
|
834 do
|
|
835 i = _heapinfo[i].free.next;
|
|
836 while (i > 0 && i < block);
|
|
837 i = _heapinfo[i].free.prev;
|
|
838 }
|
|
839
|
|
840 /* Determine how to link this block into the free list. */
|
|
841 if (block == i + _heapinfo[i].free.size)
|
|
842 {
|
|
843 /* Coalesce this block with its predecessor. */
|
|
844 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
|
|
845 block = i;
|
|
846 }
|
|
847 else
|
|
848 {
|
|
849 /* Really link this block back into the free list. */
|
|
850 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
|
|
851 _heapinfo[block].free.next = _heapinfo[i].free.next;
|
|
852 _heapinfo[block].free.prev = i;
|
|
853 _heapinfo[i].free.next = block;
|
|
854 _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
855 ++_chunks_free;
|
|
856 }
|
|
857
|
|
858 /* Now that the block is linked in, see if we can coalesce it
|
|
859 with its successor (by deleting its successor from the list
|
|
860 and adding in its size). */
|
|
861 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
|
|
862 {
|
|
863 _heapinfo[block].free.size
|
|
864 += _heapinfo[_heapinfo[block].free.next].free.size;
|
|
865 _heapinfo[block].free.next
|
|
866 = _heapinfo[_heapinfo[block].free.next].free.next;
|
|
867 _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
868 --_chunks_free;
|
|
869 }
|
|
870
|
|
871 /* Now see if we can return stuff to the system. */
|
|
872 blocks = _heapinfo[block].free.size;
|
|
873 if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
|
|
874 && (*__morecore) (0) == ADDRESS (block + blocks))
|
|
875 {
|
|
876 __malloc_size_t bytes = blocks * BLOCKSIZE;
|
|
877 _heaplimit -= blocks;
|
|
878 (*__morecore) (-bytes);
|
|
879 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
880 = _heapinfo[block].free.next;
|
|
881 _heapinfo[_heapinfo[block].free.next].free.prev
|
|
882 = _heapinfo[block].free.prev;
|
|
883 block = _heapinfo[block].free.prev;
|
|
884 --_chunks_free;
|
|
885 _bytes_free -= bytes;
|
|
886 }
|
|
887
|
|
888 /* Set the next search to begin at this block. */
|
|
889 _heapindex = block;
|
|
890 break;
|
|
891
|
|
892 default:
|
|
893 /* Do some of the statistics. */
|
|
894 --_chunks_used;
|
|
895 _bytes_used -= 1 << type;
|
|
896 ++_chunks_free;
|
|
897 _bytes_free += 1 << type;
|
|
898
|
|
899 /* Get the address of the first free fragment in this block. */
|
|
900 prev = (struct list *) ((char *) ADDRESS (block) +
|
|
901 (_heapinfo[block].busy.info.frag.first << type));
|
|
902
|
|
903 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
|
|
904 {
|
|
905 /* If all fragments of this block are free, remove them
|
|
906 from the fragment list and free the whole block. */
|
|
907 next = prev;
|
|
908 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
|
|
909 next = next->next;
|
|
910 prev->prev->next = next;
|
|
911 if (next != NULL)
|
|
912 next->prev = prev->prev;
|
|
913 _heapinfo[block].busy.type = 0;
|
|
914 _heapinfo[block].busy.info.size = 1;
|
|
915
|
|
916 /* Keep the statistics accurate. */
|
|
917 ++_chunks_used;
|
|
918 _bytes_used += BLOCKSIZE;
|
|
919 _chunks_free -= BLOCKSIZE >> type;
|
|
920 _bytes_free -= BLOCKSIZE;
|
|
921
|
|
922 free (ADDRESS (block));
|
|
923 }
|
|
924 else if (_heapinfo[block].busy.info.frag.nfree != 0)
|
|
925 {
|
|
926 /* If some fragments of this block are free, link this
|
|
927 fragment into the fragment list after the first free
|
|
928 fragment of this block. */
|
|
929 next = (struct list *) ptr;
|
|
930 next->next = prev->next;
|
|
931 next->prev = prev;
|
|
932 prev->next = next;
|
|
933 if (next->next != NULL)
|
|
934 next->next->prev = next;
|
|
935 ++_heapinfo[block].busy.info.frag.nfree;
|
|
936 }
|
|
937 else
|
|
938 {
|
|
939 /* No fragments of this block are free, so link this
|
|
940 fragment into the fragment list and announce that
|
|
941 it is the first free fragment of this block. */
|
|
942 prev = (struct list *) ptr;
|
|
943 _heapinfo[block].busy.info.frag.nfree = 1;
|
|
944 _heapinfo[block].busy.info.frag.first = (unsigned long int)
|
|
945 ((unsigned long int) ((char *) ptr - (char *) NULL)
|
|
946 % BLOCKSIZE >> type);
|
|
947 prev->next = _fraghead[type].next;
|
|
948 prev->prev = &_fraghead[type];
|
|
949 prev->prev->next = prev;
|
|
950 if (prev->next != NULL)
|
|
951 prev->next->prev = prev;
|
|
952 }
|
|
953 break;
|
|
954 }
|
|
955 }
|
|
956
|
|
957 /* Return memory to the heap. */
|
|
958 __free_ret_t
|
185
|
959 free (__ptr_t ptr)
|
0
|
960 {
|
|
961 struct alignlist *l;
|
|
962
|
|
963 if (ptr == NULL)
|
|
964 return;
|
|
965
|
251
|
966 if (PURE_DATA(ptr))
|
|
967 {
|
|
968 return;
|
|
969 }
|
|
970
|
0
|
971 for (l = _aligned_blocks; l != NULL; l = l->next)
|
|
972 if (l->aligned == ptr)
|
|
973 {
|
|
974 l->aligned = NULL; /* Mark the slot in the list as free. */
|
|
975 ptr = l->exact;
|
|
976 break;
|
|
977 }
|
|
978
|
|
979 if (__free_hook != NULL)
|
|
980 (*__free_hook) (ptr);
|
|
981 else
|
|
982 _free_internal (ptr);
|
|
983 }
|
|
984 /* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
|
|
985 This file is part of the GNU C Library.
|
|
986
|
|
987 The GNU C Library is free software; you can redistribute it and/or
|
|
988 modify it under the terms of the GNU Library General Public License as
|
|
989 published by the Free Software Foundation; either version 2 of the
|
|
990 License, or (at your option) any later version.
|
|
991
|
|
992 The GNU C Library is distributed in the hope that it will be useful,
|
|
993 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
994 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
995 Library General Public License for more details.
|
|
996
|
|
997 You should have received a copy of the GNU General Public License
|
|
998 along with this library; see the file COPYING. If not, write to
|
|
999 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
1000 Boston, MA 02111-1307, USA. */
|
|
1001
|
|
1002 #ifndef _MALLOC_INTERNAL
|
|
1003 #define _MALLOC_INTERNAL
|
|
1004 #include <malloc.h>
|
|
1005 #endif
|
|
1006
|
|
1007 #ifdef _LIBC
|
|
1008
|
|
1009 #include <ansidecl.h>
|
|
1010 #include <gnu-stabs.h>
|
|
1011
|
|
1012 #undef cfree
|
|
1013
|
|
1014 function_alias(cfree, free, void, (ptr),
|
|
1015 DEFUN(cfree, (ptr), PTR ptr))
|
|
1016
|
|
1017 #else
|
|
1018
|
|
1019 void cfree (__ptr_t ptr);
|
|
1020 void
|
|
1021 cfree (__ptr_t ptr)
|
|
1022 {
|
|
1023 free (ptr);
|
|
1024 }
|
|
1025
|
|
1026 #endif
|
|
1027 /* Change the size of a block allocated by `malloc'.
|
|
1028 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
|
|
1029 Written May 1989 by Mike Haertel.
|
|
1030
|
|
1031 This library is free software; you can redistribute it and/or
|
|
1032 modify it under the terms of the GNU Library General Public License as
|
|
1033 published by the Free Software Foundation; either version 2 of the
|
|
1034 License, or (at your option) any later version.
|
|
1035
|
|
1036 This library is distributed in the hope that it will be useful,
|
|
1037 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1038 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1039 Library General Public License for more details.
|
|
1040
|
|
1041 You should have received a copy of the GNU General Public License
|
|
1042 along with this library; see the file COPYING. If not, write to
|
|
1043 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
1044 Boston, MA 02111-1307, USA.
|
|
1045
|
|
1046 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
1047 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
1048
|
|
1049 #ifndef _MALLOC_INTERNAL
|
|
1050 #define _MALLOC_INTERNAL
|
|
1051 #include <malloc.h>
|
|
1052 #endif
|
|
1053
|
|
1054 #if 0 /* FSFmacs */
|
|
1055 /* XEmacs requires an ANSI compiler, and memmove() is part of the ANSI-
|
|
1056 mandated functions. For losing systems like SunOS 4, we provide
|
|
1057 our own memmove(). */
|
|
1058
|
|
1059 #if (defined (MEMMOVE_MISSING) || \
|
|
1060 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
|
|
1061
|
|
1062 /* Snarfed directly from Emacs src/dispnew.c:
|
|
1063 XXX Should use system bcopy if it handles overlap. */
|
|
1064 #ifndef emacs
|
|
1065
|
|
1066 /* Like bcopy except never gets confused by overlap. */
|
|
1067
|
|
1068 static void
|
185
|
1069 safe_bcopy (char *from, char *to, int size)
|
0
|
1070 {
|
|
1071 if (size <= 0 || from == to)
|
|
1072 return;
|
|
1073
|
|
1074 /* If the source and destination don't overlap, then bcopy can
|
|
1075 handle it. If they do overlap, but the destination is lower in
|
|
1076 memory than the source, we'll assume bcopy can handle that. */
|
|
1077 if (to < from || from + size <= to)
|
|
1078 bcopy (from, to, size);
|
|
1079
|
|
1080 /* Otherwise, we'll copy from the end. */
|
|
1081 else
|
|
1082 {
|
|
1083 char *endf = from + size;
|
|
1084 char *endt = to + size;
|
|
1085
|
|
1086 /* If TO - FROM is large, then we should break the copy into
|
|
1087 nonoverlapping chunks of TO - FROM bytes each. However, if
|
|
1088 TO - FROM is small, then the bcopy function call overhead
|
|
1089 makes this not worth it. The crossover point could be about
|
|
1090 anywhere. Since I don't think the obvious copy loop is too
|
|
1091 bad, I'm trying to err in its favor. */
|
|
1092 if (to - from < 64)
|
|
1093 {
|
|
1094 do
|
|
1095 *--endt = *--endf;
|
|
1096 while (endf != from);
|
|
1097 }
|
|
1098 else
|
|
1099 {
|
|
1100 for (;;)
|
|
1101 {
|
|
1102 endt -= (to - from);
|
|
1103 endf -= (to - from);
|
|
1104
|
|
1105 if (endt < to)
|
|
1106 break;
|
|
1107
|
|
1108 bcopy (endf, endt, to - from);
|
|
1109 }
|
|
1110
|
|
1111 /* If SIZE wasn't a multiple of TO - FROM, there will be a
|
|
1112 little left over. The amount left over is
|
|
1113 (endt + (to - from)) - to, which is endt - from. */
|
|
1114 bcopy (from, to, endt - from);
|
|
1115 }
|
|
1116 }
|
185
|
1117 }
|
0
|
1118 #endif /* Not emacs. */
|
|
1119
|
|
1120 #define memmove(to, from, size) safe_bcopy ((from), (to), (size))
|
|
1121
|
|
1122 #endif
|
|
1123
|
|
1124 #endif /* FSFmacs */
|
|
1125
|
|
1126
|
|
1127 #ifndef min
|
|
1128 #define min(A, B) ((A) < (B) ? (A) : (B))
|
|
1129 #endif
|
|
1130
|
|
1131 /* Debugging hook for realloc. */
|
|
1132 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
|
|
1133
|
|
1134 /* Resize the given region to the new size, returning a pointer
|
|
1135 to the (possibly moved) region. This is optimized for speed;
|
|
1136 some benchmarks seem to indicate that greater compactness is
|
|
1137 achieved by unconditionally allocating and copying to a
|
|
1138 new region. This module has incestuous knowledge of the
|
|
1139 internals of both free and malloc. */
|
|
1140 __ptr_t
|
185
|
1141 realloc (__ptr_t ptr, __malloc_size_t size)
|
0
|
1142 {
|
|
1143 __ptr_t result;
|
|
1144 int type;
|
|
1145 __malloc_size_t block, blocks, oldlimit;
|
|
1146
|
251
|
1147 if (PURE_DATA(ptr))
|
|
1148 {
|
|
1149 result = malloc (size);
|
|
1150 memcpy(result, ptr, size);
|
|
1151 return result;
|
|
1152 }
|
|
1153
|
|
1154 else if (size == 0)
|
0
|
1155 {
|
|
1156 free (ptr);
|
|
1157 return malloc (0);
|
|
1158 }
|
|
1159 else if (ptr == NULL)
|
|
1160 return malloc (size);
|
|
1161
|
|
1162 if (__realloc_hook != NULL)
|
|
1163 return (*__realloc_hook) (ptr, size);
|
|
1164
|
|
1165 block = BLOCK (ptr);
|
|
1166
|
|
1167 type = _heapinfo[block].busy.type;
|
|
1168 switch (type)
|
|
1169 {
|
|
1170 case 0:
|
|
1171 /* Maybe reallocate a large block to a small fragment. */
|
|
1172 if (size <= BLOCKSIZE / 2)
|
|
1173 {
|
|
1174 result = malloc (size);
|
|
1175 if (result != NULL)
|
|
1176 {
|
|
1177 memcpy (result, ptr, size);
|
|
1178 _free_internal (ptr);
|
|
1179 return result;
|
|
1180 }
|
|
1181 }
|
|
1182
|
|
1183 /* The new size is a large allocation as well;
|
|
1184 see if we can hold it in place. */
|
|
1185 blocks = BLOCKIFY (size);
|
|
1186 if (blocks < _heapinfo[block].busy.info.size)
|
|
1187 {
|
|
1188 /* The new size is smaller; return
|
|
1189 excess memory to the free list. */
|
|
1190 _heapinfo[block + blocks].busy.type = 0;
|
|
1191 _heapinfo[block + blocks].busy.info.size
|
|
1192 = _heapinfo[block].busy.info.size - blocks;
|
|
1193 _heapinfo[block].busy.info.size = blocks;
|
|
1194 /* We have just created a new chunk by splitting a chunk in two.
|
|
1195 Now we will free this chunk; increment the statistics counter
|
|
1196 so it doesn't become wrong when _free_internal decrements it. */
|
|
1197 ++_chunks_used;
|
|
1198 _free_internal (ADDRESS (block + blocks));
|
|
1199 result = ptr;
|
|
1200 }
|
|
1201 else if (blocks == _heapinfo[block].busy.info.size)
|
|
1202 /* No size change necessary. */
|
|
1203 result = ptr;
|
|
1204 else
|
|
1205 {
|
|
1206 /* Won't fit, so allocate a new region that will.
|
|
1207 Free the old region first in case there is sufficient
|
|
1208 adjacent free space to grow without moving. */
|
|
1209 blocks = _heapinfo[block].busy.info.size;
|
|
1210 /* Prevent free from actually returning memory to the system. */
|
|
1211 oldlimit = _heaplimit;
|
|
1212 _heaplimit = 0;
|
|
1213 free (ptr);
|
|
1214 _heaplimit = oldlimit;
|
|
1215 result = malloc (size);
|
|
1216 if (result == NULL)
|
|
1217 {
|
|
1218 /* Now we're really in trouble. We have to unfree
|
|
1219 the thing we just freed. Unfortunately it might
|
|
1220 have been coalesced with its neighbors. */
|
|
1221 if (_heapindex == block)
|
|
1222 (void) malloc (blocks * BLOCKSIZE);
|
|
1223 else
|
|
1224 {
|
|
1225 __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
|
|
1226 (void) malloc (blocks * BLOCKSIZE);
|
|
1227 free (previous);
|
|
1228 }
|
|
1229 return NULL;
|
|
1230 }
|
|
1231 if (ptr != result)
|
|
1232 memmove (result, ptr, blocks * BLOCKSIZE);
|
|
1233 }
|
|
1234 break;
|
|
1235
|
|
1236 default:
|
|
1237 /* Old size is a fragment; type is logarithm
|
|
1238 to base two of the fragment size. */
|
|
1239 if (size > (__malloc_size_t) (1 << (type - 1)) &&
|
|
1240 size <= (__malloc_size_t) (1 << type))
|
|
1241 /* The new size is the same kind of fragment. */
|
|
1242 result = ptr;
|
|
1243 else
|
|
1244 {
|
|
1245 /* The new size is different; allocate a new space,
|
|
1246 and copy the lesser of the new size and the old. */
|
|
1247 result = malloc (size);
|
|
1248 if (result == NULL)
|
|
1249 return NULL;
|
|
1250 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
|
|
1251 free (ptr);
|
|
1252 }
|
|
1253 break;
|
|
1254 }
|
|
1255
|
|
1256 return result;
|
|
1257 }
|
|
1258 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
|
|
1259
|
|
1260 This library is free software; you can redistribute it and/or
|
|
1261 modify it under the terms of the GNU Library General Public License as
|
|
1262 published by the Free Software Foundation; either version 2 of the
|
|
1263 License, or (at your option) any later version.
|
|
1264
|
|
1265 This library is distributed in the hope that it will be useful,
|
|
1266 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1267 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1268 Library General Public License for more details.
|
|
1269
|
|
1270 You should have received a copy of the GNU General Public License
|
|
1271 along with this library; see the file COPYING. If not, write to
|
|
1272 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
1273 Boston, MA 02111-1307, USA.
|
|
1274
|
|
1275 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
1276 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
|
|
1277
|
|
1278 #ifndef _MALLOC_INTERNAL
|
|
1279 #define _MALLOC_INTERNAL
|
|
1280 #include <malloc.h>
|
|
1281 #endif
|
|
1282
|
|
1283 /* Allocate an array of NMEMB elements each SIZE bytes long.
|
|
1284 The entire array is initialized to zeros. */
|
|
1285 __ptr_t
|
185
|
1286 calloc (__malloc_size_t nmemb, __malloc_size_t size)
|
0
|
1287 {
|
|
1288 __ptr_t result = malloc (nmemb * size);
|
|
1289
|
|
1290 if (result != NULL)
|
|
1291 (void) memset (result, 0, nmemb * size);
|
|
1292
|
|
1293 return result;
|
|
1294 }
|
|
1295 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
|
|
1296 This file is part of the GNU C Library.
|
|
1297
|
|
1298 The GNU C Library is free software; you can redistribute it and/or modify
|
|
1299 it under the terms of the GNU General Public License as published by
|
|
1300 the Free Software Foundation; either version 2, or (at your option)
|
|
1301 any later version.
|
|
1302
|
|
1303 The GNU C Library is distributed in the hope that it will be useful,
|
|
1304 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1305 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
1306 GNU General Public License for more details.
|
|
1307
|
|
1308 You should have received a copy of the GNU General Public License
|
|
1309 along with the GNU C Library; see the file COPYING. If not, write to
|
|
1310 the Free the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
1311 Boston, MA 02111-1307, USA. */
|
|
1312
|
|
1313 #ifndef _MALLOC_INTERNAL
|
|
1314 #define _MALLOC_INTERNAL
|
|
1315 #include <malloc.h>
|
|
1316 #endif
|
|
1317
|
|
1318 #ifndef __GNU_LIBRARY__
|
|
1319 #define __sbrk sbrk
|
|
1320 #endif
|
|
1321
|
|
1322 #ifdef GMALLOC_NEEDS_SBRK_DECL
|
|
1323 /* some versions of OSF1 need this */
|
|
1324 extern __ptr_t __sbrk __P ((ssize_t increment));
|
|
1325 #else
|
|
1326 #ifdef __GNU_LIBRARY__
|
|
1327 /* It is best not to declare this and cast its result on foreign operating
|
|
1328 systems with potentially hostile include files. */
|
80
|
1329 #if !(defined(linux) && defined(sparc))
|
0
|
1330 extern __ptr_t __sbrk __P ((int increment));
|
|
1331 #endif
|
|
1332 #endif
|
80
|
1333 #endif
|
0
|
1334
|
|
1335 #ifndef NULL
|
|
1336 #define NULL 0
|
|
1337 #endif
|
|
1338
|
|
1339 /* Allocate INCREMENT more bytes of data space,
|
|
1340 and return the start of data space, or NULL on errors.
|
|
1341 If INCREMENT is negative, shrink data space. */
|
|
1342 __ptr_t
|
185
|
1343 __default_morecore (
|
0
|
1344 #ifdef __STDC__
|
185
|
1345 ptrdiff_t increment
|
0
|
1346 #else
|
|
1347 #ifdef OSF1
|
185
|
1348 long increment
|
0
|
1349 #else
|
185
|
1350 int increment
|
0
|
1351 #endif
|
|
1352 #endif
|
185
|
1353 )
|
0
|
1354 {
|
|
1355 #ifdef OSF1
|
|
1356 __ptr_t result = (__ptr_t) __sbrk ((ssize_t) increment);
|
|
1357 #else
|
|
1358 __ptr_t result = (__ptr_t) __sbrk ((int) increment);
|
|
1359 #endif
|
|
1360 if (result == (__ptr_t) -1)
|
|
1361 return NULL;
|
|
1362 return result;
|
|
1363 }
|
|
1364 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
|
|
1365
|
|
1366 This library is free software; you can redistribute it and/or
|
|
1367 modify it under the terms of the GNU Library General Public License as
|
|
1368 published by the Free Software Foundation; either version 2 of the
|
|
1369 License, or (at your option) any later version.
|
|
1370
|
|
1371 This library is distributed in the hope that it will be useful,
|
|
1372 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1373 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1374 Library General Public License for more details.
|
|
1375
|
|
1376 You should have received a copy of the GNU General Public License
|
|
1377 along with this library; see the file COPYING. If not, write to
|
|
1378 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
1379 Boston, MA 02111-1307, USA. */
|
|
1380
|
|
1381 #ifndef _MALLOC_INTERNAL
|
|
1382 #define _MALLOC_INTERNAL
|
|
1383 #include <malloc.h>
|
|
1384 #endif
|
|
1385
|
|
1386 __ptr_t
|
185
|
1387 memalign (__malloc_size_t alignment, __malloc_size_t size)
|
0
|
1388 {
|
|
1389 __ptr_t result;
|
|
1390 unsigned long int adj;
|
|
1391
|
|
1392 size = ((size + alignment - 1) / alignment) * alignment;
|
|
1393
|
|
1394 result = malloc (size);
|
|
1395 if (result == NULL)
|
|
1396 return NULL;
|
|
1397 adj = (unsigned long int) ((unsigned long int) ((char *) result -
|
|
1398 (char *) NULL)) % alignment;
|
|
1399 if (adj != 0)
|
|
1400 {
|
|
1401 struct alignlist *l;
|
|
1402 for (l = _aligned_blocks; l != NULL; l = l->next)
|
|
1403 if (l->aligned == NULL)
|
|
1404 /* This slot is free. Use it. */
|
|
1405 break;
|
|
1406 if (l == NULL)
|
|
1407 {
|
|
1408 l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
|
1409 if (l == NULL)
|
|
1410 {
|
|
1411 free (result);
|
|
1412 return NULL;
|
|
1413 }
|
|
1414 l->next = _aligned_blocks;
|
|
1415 _aligned_blocks = l;
|
|
1416 }
|
|
1417 l->exact = result;
|
|
1418 result = l->aligned = (char *) result + alignment - adj;
|
|
1419 }
|
|
1420
|
|
1421 return result;
|
|
1422 }
|