Mercurial > hg > xemacs-beta
comparison src/alloc.c @ 5167:e374ea766cc1
clean up, rearrange allocation statistics code
-------------------- ChangeLog entries follow: --------------------
src/ChangeLog addition:
2010-03-21 Ben Wing <ben@xemacs.org>
* alloc.c:
* alloc.c (assert_proper_sizing):
* alloc.c (c_readonly):
* alloc.c (malloced_storage_size):
* alloc.c (fixed_type_block_overhead):
* alloc.c (lisp_object_storage_size):
* alloc.c (inc_lrecord_stats):
* alloc.c (dec_lrecord_stats):
* alloc.c (pluralize_word):
* alloc.c (object_memory_usage_stats):
* alloc.c (Fobject_memory_usage):
* alloc.c (compute_memusage_stats_length):
* alloc.c (disksave_object_finalization_1):
* alloc.c (Fgarbage_collect):
* mc-alloc.c:
* mc-alloc.c (mc_alloced_storage_size):
* mc-alloc.h:
No functionality change here. Collect the allocations-statistics
code that was scattered throughout alloc.c into one place. Add
remaining section headings so that all sections have headings
clearly identifying the start of the section and its purpose.
Expose mc_alloced_storage_size() even when not MEMORY_USAGE_STATS;
this fixes build problems and is related to the export of
lisp_object_storage_size() and malloced_storage_size() when
non-MEMORY_USAGE_STATS in the previous change set.
author | Ben Wing <ben@xemacs.org> |
---|---|
date | Sun, 21 Mar 2010 04:41:49 -0500 |
parents | ab9ee10a53e4 |
children | 5ddbab03b0e6 |
comparison
equal
deleted
inserted
replaced
5161:125f4119e64d | 5167:e374ea766cc1 |
---|---|
140 Lisp_Object Qobject_actually_requested, Qobject_malloc_overhead; | 140 Lisp_Object Qobject_actually_requested, Qobject_malloc_overhead; |
141 Lisp_Object Qother_memory_actually_requested, Qother_memory_malloc_overhead; | 141 Lisp_Object Qother_memory_actually_requested, Qother_memory_malloc_overhead; |
142 Lisp_Object Qother_memory_dynarr_overhead, Qother_memory_gap_overhead; | 142 Lisp_Object Qother_memory_dynarr_overhead, Qother_memory_gap_overhead; |
143 #endif /* MEMORY_USAGE_STATS */ | 143 #endif /* MEMORY_USAGE_STATS */ |
144 | 144 |
145 #ifndef NEW_GC | |
146 static int gc_count_num_short_string_in_use; | |
147 static Bytecount gc_count_string_total_size; | |
148 static Bytecount gc_count_short_string_total_size; | |
149 static Bytecount gc_count_long_string_storage_including_overhead; | |
150 #endif /* not NEW_GC */ | |
151 | |
152 /* static int gc_count_total_records_used, gc_count_records_total_size; */ | |
153 | |
154 /* stats on objects in use */ | |
155 | |
156 #ifdef NEW_GC | |
157 | |
158 static struct | |
159 { | |
160 int instances_in_use; | |
161 int bytes_in_use; | |
162 int bytes_in_use_including_overhead; | |
163 } lrecord_stats [countof (lrecord_implementations_table)]; | |
164 | |
165 #else /* not NEW_GC */ | |
166 | |
167 static struct | |
168 { | |
169 Elemcount instances_in_use; | |
170 Bytecount bytes_in_use; | |
171 Bytecount bytes_in_use_overhead; | |
172 Elemcount instances_freed; | |
173 Bytecount bytes_freed; | |
174 Bytecount bytes_freed_overhead; | |
175 Elemcount instances_on_free_list; | |
176 Bytecount bytes_on_free_list; | |
177 Bytecount bytes_on_free_list_overhead; | |
178 #ifdef MEMORY_USAGE_STATS | |
179 Bytecount nonlisp_bytes_in_use; | |
180 struct generic_usage_stats stats; | |
181 #endif | |
182 } lrecord_stats [countof (lrecord_implementations_table)]; | |
183 | |
184 #endif /* (not) NEW_GC */ | |
185 | |
145 /* Very cheesy ways of figuring out how much memory is being used for | 186 /* Very cheesy ways of figuring out how much memory is being used for |
146 data. #### Need better (system-dependent) ways. */ | 187 data. #### Need better (system-dependent) ways. */ |
147 void *minimum_address_seen; | 188 void *minimum_address_seen; |
148 void *maximum_address_seen; | 189 void *maximum_address_seen; |
149 | 190 |
150 /* Determine now whether we need to garbage collect or not, to make | |
151 Ffuncall() faster */ | |
152 #define INCREMENT_CONS_COUNTER_1(size) \ | |
153 do \ | |
154 { \ | |
155 consing_since_gc += (size); \ | |
156 total_consing += (size); \ | |
157 if (profiling_active) \ | |
158 profile_record_consing (size); \ | |
159 recompute_need_to_garbage_collect (); \ | |
160 } while (0) | |
161 | |
162 #define debug_allocation_backtrace() \ | |
163 do { \ | |
164 if (debug_allocation_backtrace_length > 0) \ | |
165 debug_short_backtrace (debug_allocation_backtrace_length); \ | |
166 } while (0) | |
167 | |
168 #ifdef DEBUG_XEMACS | |
169 #define INCREMENT_CONS_COUNTER(foosize, type) \ | |
170 do { \ | |
171 if (debug_allocation) \ | |
172 { \ | |
173 stderr_out ("allocating %s (size %ld)\n", type, \ | |
174 (long) foosize); \ | |
175 debug_allocation_backtrace (); \ | |
176 } \ | |
177 INCREMENT_CONS_COUNTER_1 (foosize); \ | |
178 } while (0) | |
179 #define NOSEEUM_INCREMENT_CONS_COUNTER(foosize, type) \ | |
180 do { \ | |
181 if (debug_allocation > 1) \ | |
182 { \ | |
183 stderr_out ("allocating noseeum %s (size %ld)\n", type, \ | |
184 (long) foosize); \ | |
185 debug_allocation_backtrace (); \ | |
186 } \ | |
187 INCREMENT_CONS_COUNTER_1 (foosize); \ | |
188 } while (0) | |
189 #else | |
190 #define INCREMENT_CONS_COUNTER(size, type) INCREMENT_CONS_COUNTER_1 (size) | |
191 #define NOSEEUM_INCREMENT_CONS_COUNTER(size, type) \ | |
192 INCREMENT_CONS_COUNTER_1 (size) | |
193 #endif | |
194 | |
195 #ifdef NEW_GC | |
196 /* [[ The call to recompute_need_to_garbage_collect is moved to | |
197 free_normal_lisp_object, since DECREMENT_CONS_COUNTER is extensively called | |
198 during sweep and recomputing need_to_garbage_collect all the time | |
199 is not needed. ]] -- not accurate! */ | |
200 #define DECREMENT_CONS_COUNTER(size) do { \ | |
201 consing_since_gc -= (size); \ | |
202 total_consing -= (size); \ | |
203 if (profiling_active) \ | |
204 profile_record_unconsing (size); \ | |
205 if (consing_since_gc < 0) \ | |
206 consing_since_gc = 0; \ | |
207 } while (0) | |
208 #else /* not NEW_GC */ | |
209 #define DECREMENT_CONS_COUNTER(size) do { \ | |
210 consing_since_gc -= (size); \ | |
211 total_consing -= (size); \ | |
212 if (profiling_active) \ | |
213 profile_record_unconsing (size); \ | |
214 if (consing_since_gc < 0) \ | |
215 consing_since_gc = 0; \ | |
216 recompute_need_to_garbage_collect (); \ | |
217 } while (0) | |
218 #endif /*not NEW_GC */ | |
219 | |
220 #ifndef NEW_GC | |
221 int | |
222 c_readonly (Lisp_Object obj) | |
223 { | |
224 return POINTER_TYPE_P (XTYPE (obj)) && C_READONLY (obj); | |
225 } | |
226 #endif /* not NEW_GC */ | |
227 | |
228 int | |
229 lisp_readonly (Lisp_Object obj) | |
230 { | |
231 return POINTER_TYPE_P (XTYPE (obj)) && LISP_READONLY (obj); | |
232 } | |
233 | |
234 | 191 |
192 /************************************************************************/ | |
193 /* Low-level allocation */ | |
194 /************************************************************************/ | |
195 | |
196 void | |
197 recompute_funcall_allocation_flag (void) | |
198 { | |
199 funcall_allocation_flag = | |
200 need_to_garbage_collect || | |
201 need_to_check_c_alloca || | |
202 need_to_signal_post_gc; | |
203 } | |
204 | |
235 /* Maximum amount of C stack to save when a GC happens. */ | 205 /* Maximum amount of C stack to save when a GC happens. */ |
236 | 206 |
237 #ifndef MAX_SAVE_STACK | 207 #ifndef MAX_SAVE_STACK |
238 #define MAX_SAVE_STACK 0 /* 16000 */ | 208 #define MAX_SAVE_STACK 0 /* 16000 */ |
239 #endif | 209 #endif |
253 void *tmp = breathing_space; | 223 void *tmp = breathing_space; |
254 breathing_space = 0; | 224 breathing_space = 0; |
255 xfree (tmp); | 225 xfree (tmp); |
256 } | 226 } |
257 } | 227 } |
228 | |
229 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | |
230 /* If we released our reserve (due to running out of memory), | |
231 and we have a fair amount free once again, | |
232 try to set aside another reserve in case we run out once more. | |
233 | |
234 This is called when a relocatable block is freed in ralloc.c. */ | |
235 void refill_memory_reserve (void); | |
236 void | |
237 refill_memory_reserve (void) | |
238 { | |
239 if (breathing_space == 0) | |
240 breathing_space = (char *) malloc (4096 - MALLOC_OVERHEAD); | |
241 } | |
242 #endif /* !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) */ | |
243 | |
258 #endif /* not NEW_GC */ | 244 #endif /* not NEW_GC */ |
259 | 245 |
260 static void | 246 static void |
261 set_alloc_mins_and_maxes (void *val, Bytecount size) | 247 set_alloc_mins_and_maxes (void *val, Bytecount size) |
262 { | 248 { |
488 return xstrdup (s); | 474 return xstrdup (s); |
489 } | 475 } |
490 #endif /* NEED_STRDUP */ | 476 #endif /* NEED_STRDUP */ |
491 | 477 |
492 | 478 |
479 /************************************************************************/ | |
480 /* Lisp object allocation */ | |
481 /************************************************************************/ | |
482 | |
483 /* Determine now whether we need to garbage collect or not, to make | |
484 Ffuncall() faster */ | |
485 #define INCREMENT_CONS_COUNTER_1(size) \ | |
486 do \ | |
487 { \ | |
488 consing_since_gc += (size); \ | |
489 total_consing += (size); \ | |
490 if (profiling_active) \ | |
491 profile_record_consing (size); \ | |
492 recompute_need_to_garbage_collect (); \ | |
493 } while (0) | |
494 | |
495 #define debug_allocation_backtrace() \ | |
496 do { \ | |
497 if (debug_allocation_backtrace_length > 0) \ | |
498 debug_short_backtrace (debug_allocation_backtrace_length); \ | |
499 } while (0) | |
500 | |
501 #ifdef DEBUG_XEMACS | |
502 #define INCREMENT_CONS_COUNTER(foosize, type) \ | |
503 do { \ | |
504 if (debug_allocation) \ | |
505 { \ | |
506 stderr_out ("allocating %s (size %ld)\n", type, \ | |
507 (long) foosize); \ | |
508 debug_allocation_backtrace (); \ | |
509 } \ | |
510 INCREMENT_CONS_COUNTER_1 (foosize); \ | |
511 } while (0) | |
512 #define NOSEEUM_INCREMENT_CONS_COUNTER(foosize, type) \ | |
513 do { \ | |
514 if (debug_allocation > 1) \ | |
515 { \ | |
516 stderr_out ("allocating noseeum %s (size %ld)\n", type, \ | |
517 (long) foosize); \ | |
518 debug_allocation_backtrace (); \ | |
519 } \ | |
520 INCREMENT_CONS_COUNTER_1 (foosize); \ | |
521 } while (0) | |
522 #else | |
523 #define INCREMENT_CONS_COUNTER(size, type) INCREMENT_CONS_COUNTER_1 (size) | |
524 #define NOSEEUM_INCREMENT_CONS_COUNTER(size, type) \ | |
525 INCREMENT_CONS_COUNTER_1 (size) | |
526 #endif | |
527 | |
528 #ifdef NEW_GC | |
529 /* [[ The call to recompute_need_to_garbage_collect is moved to | |
530 free_normal_lisp_object, since DECREMENT_CONS_COUNTER is extensively called | |
531 during sweep and recomputing need_to_garbage_collect all the time | |
532 is not needed. ]] -- not accurate! */ | |
533 #define DECREMENT_CONS_COUNTER(size) do { \ | |
534 consing_since_gc -= (size); \ | |
535 total_consing -= (size); \ | |
536 if (profiling_active) \ | |
537 profile_record_unconsing (size); \ | |
538 if (consing_since_gc < 0) \ | |
539 consing_since_gc = 0; \ | |
540 } while (0) | |
541 #else /* not NEW_GC */ | |
542 #define DECREMENT_CONS_COUNTER(size) do { \ | |
543 consing_since_gc -= (size); \ | |
544 total_consing -= (size); \ | |
545 if (profiling_active) \ | |
546 profile_record_unconsing (size); \ | |
547 if (consing_since_gc < 0) \ | |
548 consing_since_gc = 0; \ | |
549 recompute_need_to_garbage_collect (); \ | |
550 } while (0) | |
551 #endif /*not NEW_GC */ | |
552 | |
493 #ifndef NEW_GC | 553 #ifndef NEW_GC |
494 static void * | 554 static void * |
495 allocate_lisp_storage (Bytecount size) | 555 allocate_lisp_storage (Bytecount size) |
496 { | 556 { |
497 void *val = xmalloc (size); | 557 void *val = xmalloc (size); |
514 xemacs_c_alloca (0); | 574 xemacs_c_alloca (0); |
515 | 575 |
516 return val; | 576 return val; |
517 } | 577 } |
518 #endif /* not NEW_GC */ | 578 #endif /* not NEW_GC */ |
519 | |
520 #if defined (NEW_GC) && defined (ALLOC_TYPE_STATS) | |
521 static struct | |
522 { | |
523 int instances_in_use; | |
524 int bytes_in_use; | |
525 int bytes_in_use_including_overhead; | |
526 } lrecord_stats [countof (lrecord_implementations_table)]; | |
527 | |
528 void | |
529 init_lrecord_stats (void) | |
530 { | |
531 xzero (lrecord_stats); | |
532 } | |
533 | |
534 void | |
535 inc_lrecord_stats (Bytecount size, const struct lrecord_header *h) | |
536 { | |
537 int type_index = h->type; | |
538 if (!size) | |
539 size = detagged_lisp_object_size (h); | |
540 | |
541 lrecord_stats[type_index].instances_in_use++; | |
542 lrecord_stats[type_index].bytes_in_use += size; | |
543 lrecord_stats[type_index].bytes_in_use_including_overhead | |
544 #ifdef MEMORY_USAGE_STATS | |
545 += mc_alloced_storage_size (size, 0); | |
546 #else /* not MEMORY_USAGE_STATS */ | |
547 += size; | |
548 #endif /* not MEMORY_USAGE_STATS */ | |
549 } | |
550 | |
551 void | |
552 dec_lrecord_stats (Bytecount size_including_overhead, | |
553 const struct lrecord_header *h) | |
554 { | |
555 int type_index = h->type; | |
556 int size = detagged_lisp_object_size (h); | |
557 | |
558 lrecord_stats[type_index].instances_in_use--; | |
559 lrecord_stats[type_index].bytes_in_use -= size; | |
560 lrecord_stats[type_index].bytes_in_use_including_overhead | |
561 -= size_including_overhead; | |
562 | |
563 DECREMENT_CONS_COUNTER (size); | |
564 } | |
565 | |
566 int | |
567 lrecord_stats_heap_size (void) | |
568 { | |
569 int i; | |
570 int size = 0; | |
571 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
572 size += lrecord_stats[i].bytes_in_use; | |
573 return size; | |
574 } | |
575 #endif /* NEW_GC && ALLOC_TYPE_STATS */ | |
576 | 579 |
577 #define assert_proper_sizing(size) \ | 580 #define assert_proper_sizing(size) \ |
578 type_checking_assert \ | 581 type_checking_assert \ |
579 (implementation->static_size == 0 ? \ | 582 (implementation->static_size == 0 ? \ |
580 implementation->size_in_bytes_method != NULL : \ | 583 implementation->size_in_bytes_method != NULL : \ |
753 return; | 756 return; |
754 } | 757 } |
755 #endif /* Unused */ | 758 #endif /* Unused */ |
756 #endif /* not NEW_GC */ | 759 #endif /* not NEW_GC */ |
757 | 760 |
758 | |
759 static void | |
760 disksave_object_finalization_1 (void) | |
761 { | |
762 #ifdef NEW_GC | |
763 mc_finalize_for_disksave (); | |
764 #else /* not NEW_GC */ | |
765 struct old_lcrecord_header *header; | |
766 | |
767 for (header = all_lcrecords; header; header = header->next) | |
768 { | |
769 struct lrecord_header *objh = &header->lheader; | |
770 const struct lrecord_implementation *imp = LHEADER_IMPLEMENTATION (objh); | |
771 #if 0 /* possibly useful for debugging */ | |
772 if (!RECORD_DUMPABLE (objh) && !objh->free) | |
773 { | |
774 stderr_out ("Disksaving a non-dumpable object: "); | |
775 debug_print (wrap_pointer_1 (header)); | |
776 } | |
777 #endif | |
778 if (imp->disksave && !objh->free) | |
779 (imp->disksave) (wrap_pointer_1 (header)); | |
780 } | |
781 #endif /* not NEW_GC */ | |
782 } | |
783 | |
784 /* Bitwise copy all parts of a Lisp object other than the header */ | 761 /* Bitwise copy all parts of a Lisp object other than the header */ |
785 | 762 |
786 void | 763 void |
787 copy_lisp_object (Lisp_Object dst, Lisp_Object src) | 764 copy_lisp_object (Lisp_Object dst, Lisp_Object src) |
788 { | 765 { |
870 assert (!imp->size_in_bytes_method); | 847 assert (!imp->size_in_bytes_method); |
871 old_free_lcrecord (obj); | 848 old_free_lcrecord (obj); |
872 #endif | 849 #endif |
873 } | 850 } |
874 | 851 |
852 #ifndef NEW_GC | |
853 int | |
854 c_readonly (Lisp_Object obj) | |
855 { | |
856 return POINTER_TYPE_P (XTYPE (obj)) && C_READONLY (obj); | |
857 } | |
858 #endif /* not NEW_GC */ | |
859 | |
860 int | |
861 lisp_readonly (Lisp_Object obj) | |
862 { | |
863 return POINTER_TYPE_P (XTYPE (obj)) && LISP_READONLY (obj); | |
864 } | |
865 | |
866 /* #### Should be made into an object method */ | |
867 | |
868 int | |
869 object_dead_p (Lisp_Object obj) | |
870 { | |
871 return ((BUFFERP (obj) && !BUFFER_LIVE_P (XBUFFER (obj))) || | |
872 (FRAMEP (obj) && !FRAME_LIVE_P (XFRAME (obj))) || | |
873 (WINDOWP (obj) && !WINDOW_LIVE_P (XWINDOW (obj))) || | |
874 (DEVICEP (obj) && !DEVICE_LIVE_P (XDEVICE (obj))) || | |
875 (CONSOLEP (obj) && !CONSOLE_LIVE_P (XCONSOLE (obj))) || | |
876 (EVENTP (obj) && !EVENT_LIVE_P (XEVENT (obj))) || | |
877 (EXTENTP (obj) && !EXTENT_LIVE_P (XEXTENT (obj)))); | |
878 } | |
879 | |
875 | 880 |
876 /************************************************************************/ | 881 /************************************************************************/ |
877 /* Debugger support */ | 882 /* Debugger support */ |
878 /************************************************************************/ | 883 /************************************************************************/ |
879 /* Give gdb/dbx enough information to decode Lisp Objects. We make | 884 /* Give gdb/dbx enough information to decode Lisp Objects. We make |
880 sure certain symbols are always defined, so gdb doesn't complain | 885 sure certain symbols are always defined, so gdb doesn't complain |
881 about expressions in src/.gdbinit. See src/.gdbinit or src/.dbxrc | 886 about expressions in src/.gdbinit. See src/.gdbinit or src/.dbxrc |
882 to see how this is used. */ | 887 to see how this is used. */ |
919 | 924 |
920 #ifdef NEW_GC | 925 #ifdef NEW_GC |
921 #define DECLARE_FIXED_TYPE_ALLOC(type, structture) struct __foo__ | 926 #define DECLARE_FIXED_TYPE_ALLOC(type, structture) struct __foo__ |
922 #else | 927 #else |
923 /************************************************************************/ | 928 /************************************************************************/ |
924 /* Fixed-size type macros */ | 929 /* Fixed-size type macros */ |
925 /************************************************************************/ | 930 /************************************************************************/ |
926 | 931 |
927 /* For fixed-size types that are commonly used, we malloc() large blocks | 932 /* For fixed-size types that are commonly used, we malloc() large blocks |
928 of memory at a time and subdivide them into chunks of the correct | 933 of memory at a time and subdivide them into chunks of the correct |
929 size for an object of that type. This is more efficient than | 934 size for an object of that type. This is more efficient than |
1058 unless there's a large number (usually 1000, but | 1063 unless there's a large number (usually 1000, but |
1059 varies depending on type) of them already on the list. | 1064 varies depending on type) of them already on the list. |
1060 This way, we ensure that an object that gets freed will | 1065 This way, we ensure that an object that gets freed will |
1061 remain free for the next 1000 (or whatever) times that | 1066 remain free for the next 1000 (or whatever) times that |
1062 an object of that type is allocated. */ | 1067 an object of that type is allocated. */ |
1063 | |
1064 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC) | |
1065 /* If we released our reserve (due to running out of memory), | |
1066 and we have a fair amount free once again, | |
1067 try to set aside another reserve in case we run out once more. | |
1068 | |
1069 This is called when a relocatable block is freed in ralloc.c. */ | |
1070 void refill_memory_reserve (void); | |
1071 void | |
1072 refill_memory_reserve (void) | |
1073 { | |
1074 if (breathing_space == 0) | |
1075 breathing_space = (char *) malloc (4096 - MALLOC_OVERHEAD); | |
1076 } | |
1077 #endif | |
1078 | 1068 |
1079 #ifdef ALLOC_NO_POOLS | 1069 #ifdef ALLOC_NO_POOLS |
1080 # define TYPE_ALLOC_SIZE(type, structtype) 1 | 1070 # define TYPE_ALLOC_SIZE(type, structtype) 1 |
1081 #else | 1071 #else |
1082 # define TYPE_ALLOC_SIZE(type, structtype) \ | 1072 # define TYPE_ALLOC_SIZE(type, structtype) \ |
3567 } | 3557 } |
3568 | 3558 |
3569 #endif /* not DEBUG_XEMACS */ | 3559 #endif /* not DEBUG_XEMACS */ |
3570 #endif /* NEW_GC */ | 3560 #endif /* NEW_GC */ |
3571 | 3561 |
3562 #ifdef ALLOC_TYPE_STATS | |
3563 | |
3572 | 3564 |
3573 /************************************************************************/ | 3565 /************************************************************************/ |
3574 /* Allocation Statistics */ | 3566 /* Determining allocation overhead */ |
3575 /************************************************************************/ | 3567 /************************************************************************/ |
3576 | 3568 |
3569 /* Attempt to determine the actual amount of space that is used for | |
3570 the block allocated starting at PTR, supposedly of size "CLAIMED_SIZE". | |
3571 | |
3572 It seems that the following holds: | |
3573 | |
3574 1. When using the old allocator (malloc.c): | |
3575 | |
3576 -- blocks are always allocated in chunks of powers of two. For | |
3577 each block, there is an overhead of 8 bytes if rcheck is not | |
3578 defined, 20 bytes if it is defined. In other words, a | |
3579 one-byte allocation needs 8 bytes of overhead for a total of | |
3580 9 bytes, and needs to have 16 bytes of memory chunked out for | |
3581 it. | |
3582 | |
3583 2. When using the new allocator (gmalloc.c): | |
3584 | |
3585 -- blocks are always allocated in chunks of powers of two up | |
3586 to 4096 bytes. Larger blocks are allocated in chunks of | |
3587 an integral multiple of 4096 bytes. The minimum block | |
3588 size is 2*sizeof (void *), or 16 bytes if SUNOS_LOCALTIME_BUG | |
3589 is defined. There is no per-block overhead, but there | |
3590 is an overhead of 3*sizeof (size_t) for each 4096 bytes | |
3591 allocated. | |
3592 | |
3593 3. When using the system malloc, anything goes, but they are | |
3594 generally slower and more space-efficient than the GNU | |
3595 allocators. One possibly reasonable assumption to make | |
3596 for want of better data is that sizeof (void *), or maybe | |
3597 2 * sizeof (void *), is required as overhead and that | |
3598 blocks are allocated in the minimum required size except | |
3599 that some minimum block size is imposed (e.g. 16 bytes). */ | |
3600 | |
3601 Bytecount | |
3602 malloced_storage_size (void * UNUSED (ptr), Bytecount claimed_size, | |
3603 struct usage_stats *stats) | |
3604 { | |
3605 Bytecount orig_claimed_size = claimed_size; | |
3606 | |
3607 #ifndef SYSTEM_MALLOC | |
3608 if (claimed_size < (Bytecount) (2 * sizeof (void *))) | |
3609 claimed_size = 2 * sizeof (void *); | |
3610 # ifdef SUNOS_LOCALTIME_BUG | |
3611 if (claimed_size < 16) | |
3612 claimed_size = 16; | |
3613 # endif | |
3614 if (claimed_size < 4096) | |
3615 { | |
3616 /* fxg: rename log->log2 to supress gcc3 shadow warning */ | |
3617 int log2 = 1; | |
3618 | |
3619 /* compute the log base two, more or less, then use it to compute | |
3620 the block size needed. */ | |
3621 claimed_size--; | |
3622 /* It's big, it's heavy, it's wood! */ | |
3623 while ((claimed_size /= 2) != 0) | |
3624 ++log2; | |
3625 claimed_size = 1; | |
3626 /* It's better than bad, it's good! */ | |
3627 while (log2 > 0) | |
3628 { | |
3629 claimed_size *= 2; | |
3630 log2--; | |
3631 } | |
3632 /* We have to come up with some average about the amount of | |
3633 blocks used. */ | |
3634 if ((Bytecount) (rand () & 4095) < claimed_size) | |
3635 claimed_size += 3 * sizeof (void *); | |
3636 } | |
3637 else | |
3638 { | |
3639 claimed_size += 4095; | |
3640 claimed_size &= ~4095; | |
3641 claimed_size += (claimed_size / 4096) * 3 * sizeof (size_t); | |
3642 } | |
3643 | |
3644 #else | |
3645 | |
3646 if (claimed_size < 16) | |
3647 claimed_size = 16; | |
3648 claimed_size += 2 * sizeof (void *); | |
3649 | |
3650 #endif /* system allocator */ | |
3651 | |
3652 if (stats) | |
3653 { | |
3654 stats->was_requested += orig_claimed_size; | |
3655 stats->malloc_overhead += claimed_size - orig_claimed_size; | |
3656 } | |
3657 return claimed_size; | |
3658 } | |
3659 | |
3577 #ifndef NEW_GC | 3660 #ifndef NEW_GC |
3578 static int gc_count_num_short_string_in_use; | 3661 static Bytecount |
3579 static Bytecount gc_count_string_total_size; | 3662 fixed_type_block_overhead (Bytecount size, Bytecount per_block) |
3580 static Bytecount gc_count_short_string_total_size; | 3663 { |
3581 static Bytecount gc_count_long_string_storage_including_overhead; | 3664 Bytecount overhead = 0; |
3582 | 3665 Bytecount storage_size = malloced_storage_size (0, per_block, 0); |
3583 /* static int gc_count_total_records_used, gc_count_records_total_size; */ | 3666 while (size >= per_block) |
3667 { | |
3668 size -= per_block; | |
3669 overhead += storage_size - per_block; | |
3670 } | |
3671 if (rand () % per_block < size) | |
3672 overhead += storage_size - per_block; | |
3673 return overhead; | |
3674 } | |
3675 #endif /* not NEW_GC */ | |
3676 | |
3677 Bytecount | |
3678 lisp_object_storage_size (Lisp_Object obj, struct usage_stats *ustats) | |
3679 { | |
3680 #ifndef NEW_GC | |
3681 const struct lrecord_implementation *imp = | |
3682 XRECORD_LHEADER_IMPLEMENTATION (obj); | |
3683 #endif /* not NEW_GC */ | |
3684 Bytecount size = lisp_object_size (obj); | |
3685 | |
3686 #ifdef NEW_GC | |
3687 return mc_alloced_storage_size (size, ustats); | |
3688 #else | |
3689 if (imp->frob_block_p) | |
3690 { | |
3691 Bytecount overhead = | |
3692 /* #### Always using cons_block is incorrect but close; only | |
3693 string_chars_block is significantly different in size, and | |
3694 it won't ever be seen in this function */ | |
3695 fixed_type_block_overhead (size, sizeof (struct cons_block)); | |
3696 if (ustats) | |
3697 { | |
3698 ustats->was_requested += size; | |
3699 ustats->malloc_overhead += overhead; | |
3700 } | |
3701 return size + overhead; | |
3702 } | |
3703 else | |
3704 return malloced_storage_size (XPNTR (obj), size, ustats); | |
3705 #endif | |
3706 } | |
3584 | 3707 |
3585 | 3708 |
3586 /* stats on objects in use */ | 3709 /************************************************************************/ |
3587 | 3710 /* Allocation Statistics: Accumulate */ |
3588 static struct | 3711 /************************************************************************/ |
3589 { | 3712 |
3590 Elemcount instances_in_use; | 3713 #ifdef NEW_GC |
3591 Bytecount bytes_in_use; | 3714 |
3592 Bytecount bytes_in_use_overhead; | 3715 void |
3593 Elemcount instances_freed; | 3716 init_lrecord_stats (void) |
3594 Bytecount bytes_freed; | 3717 { |
3595 Bytecount bytes_freed_overhead; | 3718 xzero (lrecord_stats); |
3596 Elemcount instances_on_free_list; | 3719 } |
3597 Bytecount bytes_on_free_list; | 3720 |
3598 Bytecount bytes_on_free_list_overhead; | 3721 void |
3722 inc_lrecord_stats (Bytecount size, const struct lrecord_header *h) | |
3723 { | |
3724 int type_index = h->type; | |
3725 if (!size) | |
3726 size = detagged_lisp_object_size (h); | |
3727 | |
3728 lrecord_stats[type_index].instances_in_use++; | |
3729 lrecord_stats[type_index].bytes_in_use += size; | |
3730 lrecord_stats[type_index].bytes_in_use_including_overhead | |
3599 #ifdef MEMORY_USAGE_STATS | 3731 #ifdef MEMORY_USAGE_STATS |
3600 Bytecount nonlisp_bytes_in_use; | 3732 += mc_alloced_storage_size (size, 0); |
3601 struct generic_usage_stats stats; | 3733 #else /* not MEMORY_USAGE_STATS */ |
3602 #endif | 3734 += size; |
3603 } lrecord_stats [countof (lrecord_implementations_table)]; | 3735 #endif /* not MEMORY_USAGE_STATS */ |
3736 } | |
3737 | |
3738 void | |
3739 dec_lrecord_stats (Bytecount size_including_overhead, | |
3740 const struct lrecord_header *h) | |
3741 { | |
3742 int type_index = h->type; | |
3743 int size = detagged_lisp_object_size (h); | |
3744 | |
3745 lrecord_stats[type_index].instances_in_use--; | |
3746 lrecord_stats[type_index].bytes_in_use -= size; | |
3747 lrecord_stats[type_index].bytes_in_use_including_overhead | |
3748 -= size_including_overhead; | |
3749 | |
3750 DECREMENT_CONS_COUNTER (size); | |
3751 } | |
3752 | |
3753 int | |
3754 lrecord_stats_heap_size (void) | |
3755 { | |
3756 int i; | |
3757 int size = 0; | |
3758 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
3759 size += lrecord_stats[i].bytes_in_use; | |
3760 return size; | |
3761 } | |
3762 | |
3763 #else /* not NEW_GC */ | |
3604 | 3764 |
3605 static void | 3765 static void |
3606 clear_lrecord_stats (void) | 3766 clear_lrecord_stats (void) |
3607 { | 3767 { |
3608 xzero (lrecord_stats); | 3768 xzero (lrecord_stats); |
3703 } | 3863 } |
3704 else | 3864 else |
3705 tick_lrecord_stats (h, free_p ? ALLOC_FREE : ALLOC_IN_USE); | 3865 tick_lrecord_stats (h, free_p ? ALLOC_FREE : ALLOC_IN_USE); |
3706 } | 3866 } |
3707 | 3867 |
3868 #endif /* (not) NEW_GC */ | |
3869 | |
3870 void | |
3871 finish_object_memory_usage_stats (void) | |
3872 { | |
3873 /* Here we add up the aggregate values for each statistic, previously | |
3874 computed during tick_lrecord_stats(), to get a single combined value | |
3875 of non-Lisp memory usage for all objects of each type. We can't | |
3876 do this if NEW_GC because nothing like tick_lrecord_stats() gets | |
3877 called -- instead, statistics are computed when objects are allocated, | |
3878 which is too early to be calling the memory_usage() method. */ | |
3879 #if defined (MEMORY_USAGE_STATS) && !defined (NEW_GC) | |
3880 int i; | |
3881 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
3882 { | |
3883 struct lrecord_implementation *imp = lrecord_implementations_table[i]; | |
3884 if (imp && imp->num_extra_nonlisp_memusage_stats) | |
3885 { | |
3886 int j; | |
3887 for (j = 0; j < imp->num_extra_nonlisp_memusage_stats; j++) | |
3888 lrecord_stats[i].nonlisp_bytes_in_use += | |
3889 lrecord_stats[i].stats.othervals[j]; | |
3890 } | |
3891 } | |
3892 #endif /* defined (MEMORY_USAGE_STATS) && !defined (NEW_GC) */ | |
3893 } | |
3894 | |
3895 #define COUNT_FROB_BLOCK_USAGE(type) \ | |
3896 EMACS_INT s = 0; \ | |
3897 EMACS_INT s_overhead = 0; \ | |
3898 struct type##_block *x = current_##type##_block; \ | |
3899 while (x) { s += sizeof (*x) + MALLOC_OVERHEAD; x = x->prev; } \ | |
3900 s_overhead = fixed_type_block_overhead (s, sizeof (struct type##_block)); \ | |
3901 DO_NOTHING | |
3902 | |
3903 #define COPY_INTO_LRECORD_STATS(type) \ | |
3904 do { \ | |
3905 COUNT_FROB_BLOCK_USAGE (type); \ | |
3906 lrecord_stats[lrecord_type_##type].bytes_in_use += s; \ | |
3907 lrecord_stats[lrecord_type_##type].bytes_in_use_overhead += \ | |
3908 s_overhead; \ | |
3909 lrecord_stats[lrecord_type_##type].instances_on_free_list += \ | |
3910 gc_count_num_##type##_freelist; \ | |
3911 lrecord_stats[lrecord_type_##type].instances_in_use += \ | |
3912 gc_count_num_##type##_in_use; \ | |
3913 } while (0) | |
3914 | |
3915 | |
3916 /************************************************************************/ | |
3917 /* Allocation statistics: format nicely */ | |
3918 /************************************************************************/ | |
3919 | |
3920 static Lisp_Object | |
3921 gc_plist_hack (const Ascbyte *name, EMACS_INT value, Lisp_Object tail) | |
3922 { | |
3923 /* C doesn't have local functions (or closures, or GC, or readable syntax, | |
3924 or portable numeric datatypes, or bit-vectors, or characters, or | |
3925 arrays, or exceptions, or ...) */ | |
3926 return cons3 (intern (name), make_int (value), tail); | |
3927 } | |
3928 | |
3929 /* Pluralize a lowercase English word stored in BUF, assuming BUF has | |
3930 enough space to hold the extra letters (at most 2). */ | |
3931 static void | |
3932 pluralize_word (Ascbyte *buf) | |
3933 { | |
3934 Bytecount len = strlen (buf); | |
3935 int upper = 0; | |
3936 Ascbyte d, e; | |
3937 | |
3938 if (len == 0 || len == 1) | |
3939 goto pluralize_apostrophe_s; | |
3940 e = buf[len - 1]; | |
3941 d = buf[len - 2]; | |
3942 upper = isupper (e); | |
3943 e = tolower (e); | |
3944 d = tolower (d); | |
3945 if (e == 'y') | |
3946 { | |
3947 switch (d) | |
3948 { | |
3949 case 'a': | |
3950 case 'e': | |
3951 case 'i': | |
3952 case 'o': | |
3953 case 'u': | |
3954 goto pluralize_s; | |
3955 default: | |
3956 buf[len - 1] = (upper ? 'I' : 'i'); | |
3957 goto pluralize_es; | |
3958 } | |
3959 } | |
3960 else if (e == 's' || e == 'x' || (e == 'h' && (d == 's' || d == 'c'))) | |
3961 { | |
3962 pluralize_es: | |
3963 buf[len++] = (upper ? 'E' : 'e'); | |
3964 } | |
3965 pluralize_s: | |
3966 buf[len++] = (upper ? 'S' : 's'); | |
3967 buf[len] = '\0'; | |
3968 return; | |
3969 | |
3970 pluralize_apostrophe_s: | |
3971 buf[len++] = '\''; | |
3972 goto pluralize_s; | |
3973 } | |
3974 | |
3975 static void | |
3976 pluralize_and_append (Ascbyte *buf, const Ascbyte *name, const Ascbyte *suffix) | |
3977 { | |
3978 strcpy (buf, name); | |
3979 pluralize_word (buf); | |
3980 strcat (buf, suffix); | |
3981 } | |
3982 | |
3983 static Lisp_Object | |
3984 object_memory_usage_stats (int set_total_gc_usage) | |
3985 { | |
3986 Lisp_Object pl = Qnil; | |
3987 int i; | |
3988 EMACS_INT tgu_val = 0; | |
3989 | |
3990 #ifdef NEW_GC | |
3991 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
3992 { | |
3993 if (lrecord_stats[i].instances_in_use != 0) | |
3994 { | |
3995 Ascbyte buf[255]; | |
3996 const Ascbyte *name = lrecord_implementations_table[i]->name; | |
3997 | |
3998 if (lrecord_stats[i].bytes_in_use_including_overhead != | |
3999 lrecord_stats[i].bytes_in_use) | |
4000 { | |
4001 sprintf (buf, "%s-storage-including-overhead", name); | |
4002 pl = gc_plist_hack (buf, | |
4003 lrecord_stats[i] | |
4004 .bytes_in_use_including_overhead, | |
4005 pl); | |
4006 } | |
4007 | |
4008 sprintf (buf, "%s-storage", name); | |
4009 pl = gc_plist_hack (buf, | |
4010 lrecord_stats[i].bytes_in_use, | |
4011 pl); | |
4012 tgu_val += lrecord_stats[i].bytes_in_use_including_overhead; | |
4013 | |
4014 pluralize_and_append (buf, name, "-used"); | |
4015 pl = gc_plist_hack (buf, lrecord_stats[i].instances_in_use, pl); | |
4016 } | |
4017 } | |
4018 | |
4019 #else /* not NEW_GC */ | |
4020 | |
4021 for (i = 0; i < lrecord_type_count; i++) | |
4022 { | |
4023 if (lrecord_stats[i].bytes_in_use != 0 | |
4024 || lrecord_stats[i].bytes_freed != 0 | |
4025 || lrecord_stats[i].instances_on_free_list != 0) | |
4026 { | |
4027 Ascbyte buf[255]; | |
4028 const Ascbyte *name = lrecord_implementations_table[i]->name; | |
4029 | |
4030 sprintf (buf, "%s-storage-overhead", name); | |
4031 pl = gc_plist_hack (buf, lrecord_stats[i].bytes_in_use_overhead, pl); | |
4032 tgu_val += lrecord_stats[i].bytes_in_use_overhead; | |
4033 sprintf (buf, "%s-storage", name); | |
4034 pl = gc_plist_hack (buf, lrecord_stats[i].bytes_in_use, pl); | |
4035 tgu_val += lrecord_stats[i].bytes_in_use; | |
4036 #ifdef MEMORY_USAGE_STATS | |
4037 if (lrecord_stats[i].nonlisp_bytes_in_use) | |
4038 { | |
4039 sprintf (buf, "%s-non-lisp-storage", name); | |
4040 pl = gc_plist_hack (buf, lrecord_stats[i].nonlisp_bytes_in_use, | |
4041 pl); | |
4042 tgu_val += lrecord_stats[i].nonlisp_bytes_in_use; | |
4043 } | |
4044 #endif /* MEMORY_USAGE_STATS */ | |
4045 pluralize_and_append (buf, name, "-freed"); | |
4046 if (lrecord_stats[i].instances_freed != 0) | |
4047 pl = gc_plist_hack (buf, lrecord_stats[i].instances_freed, pl); | |
4048 pluralize_and_append (buf, name, "-on-free-list"); | |
4049 if (lrecord_stats[i].instances_on_free_list != 0) | |
4050 pl = gc_plist_hack (buf, lrecord_stats[i].instances_on_free_list, | |
4051 pl); | |
4052 pluralize_and_append (buf, name, "-used"); | |
4053 pl = gc_plist_hack (buf, lrecord_stats[i].instances_in_use, pl); | |
4054 } | |
4055 } | |
4056 | |
4057 pl = gc_plist_hack ("long-string-chars-storage-overhead", | |
4058 gc_count_long_string_storage_including_overhead - | |
4059 (gc_count_string_total_size | |
4060 - gc_count_short_string_total_size), pl); | |
4061 pl = gc_plist_hack ("long-string-chars-storage", | |
4062 gc_count_string_total_size | |
4063 - gc_count_short_string_total_size, pl); | |
4064 do | |
4065 { | |
4066 COUNT_FROB_BLOCK_USAGE (string_chars); | |
4067 tgu_val += s + s_overhead; | |
4068 pl = gc_plist_hack ("short-string-chars-storage-overhead", s_overhead, pl); | |
4069 pl = gc_plist_hack ("short-string-chars-storage", s, pl); | |
4070 } | |
4071 while (0); | |
4072 | |
4073 pl = gc_plist_hack ("long-strings-total-length", | |
4074 gc_count_string_total_size | |
4075 - gc_count_short_string_total_size, pl); | |
4076 pl = gc_plist_hack ("short-strings-total-length", | |
4077 gc_count_short_string_total_size, pl); | |
4078 pl = gc_plist_hack ("long-strings-used", | |
4079 gc_count_num_string_in_use | |
4080 - gc_count_num_short_string_in_use, pl); | |
4081 pl = gc_plist_hack ("short-strings-used", | |
4082 gc_count_num_short_string_in_use, pl); | |
4083 | |
4084 #endif /* NEW_GC */ | |
4085 | |
4086 if (set_total_gc_usage) | |
4087 { | |
4088 total_gc_usage = tgu_val; | |
4089 total_gc_usage_set = 1; | |
4090 } | |
4091 | |
4092 return pl; | |
4093 } | |
4094 | |
4095 static Lisp_Object | |
4096 garbage_collection_statistics (void) | |
4097 { | |
4098 /* The things we do for backwards-compatibility */ | |
4099 #ifdef NEW_GC | |
4100 return | |
4101 list6 | |
4102 (Fcons (make_int (lrecord_stats[lrecord_type_cons].instances_in_use), | |
4103 make_int (lrecord_stats[lrecord_type_cons] | |
4104 .bytes_in_use_including_overhead)), | |
4105 Fcons (make_int (lrecord_stats[lrecord_type_symbol].instances_in_use), | |
4106 make_int (lrecord_stats[lrecord_type_symbol] | |
4107 .bytes_in_use_including_overhead)), | |
4108 Fcons (make_int (lrecord_stats[lrecord_type_marker].instances_in_use), | |
4109 make_int (lrecord_stats[lrecord_type_marker] | |
4110 .bytes_in_use_including_overhead)), | |
4111 make_int (lrecord_stats[lrecord_type_string] | |
4112 .bytes_in_use_including_overhead), | |
4113 make_int (lrecord_stats[lrecord_type_vector] | |
4114 .bytes_in_use_including_overhead), | |
4115 object_memory_usage_stats (1)); | |
4116 #else /* not NEW_GC */ | |
4117 return | |
4118 list6 (Fcons (make_int (gc_count_num_cons_in_use), | |
4119 make_int (gc_count_num_cons_freelist)), | |
4120 Fcons (make_int (gc_count_num_symbol_in_use), | |
4121 make_int (gc_count_num_symbol_freelist)), | |
4122 Fcons (make_int (gc_count_num_marker_in_use), | |
4123 make_int (gc_count_num_marker_freelist)), | |
4124 make_int (gc_count_string_total_size), | |
4125 make_int (lrecord_stats[lrecord_type_vector].bytes_in_use + | |
4126 lrecord_stats[lrecord_type_vector].bytes_freed + | |
4127 lrecord_stats[lrecord_type_vector].bytes_on_free_list), | |
4128 object_memory_usage_stats (1)); | |
3708 #endif /* not NEW_GC */ | 4129 #endif /* not NEW_GC */ |
4130 } | |
4131 | |
4132 DEFUN ("object-memory-usage-stats", Fobject_memory_usage_stats, 0, 0, 0, /* | |
4133 Return statistics about memory usage of Lisp objects. | |
4134 */ | |
4135 ()) | |
4136 { | |
4137 return object_memory_usage_stats (0); | |
4138 } | |
4139 | |
4140 #endif /* ALLOC_TYPE_STATS */ | |
4141 | |
4142 #ifdef MEMORY_USAGE_STATS | |
4143 | |
4144 DEFUN ("object-memory-usage", Fobject_memory_usage, 1, 1, 0, /* | |
4145 Return stats about the memory usage of OBJECT. | |
4146 The values returned are in the form of an alist of usage types and byte | |
4147 counts. The byte counts attempt to encompass all the memory used | |
4148 by the object (separate from the memory logically associated with any | |
4149 other object), including internal structures and any malloc() | |
4150 overhead associated with them. In practice, the byte counts are | |
4151 underestimated because certain memory usage is very hard to determine | |
4152 \(e.g. the amount of memory used inside the Xt library or inside the | |
4153 X server). | |
4154 | |
4155 Multiple slices of the total memory usage may be returned, separated | |
4156 by a nil. Each slice represents a particular view of the memory, a | |
4157 particular way of partitioning it into groups. Within a slice, there | |
4158 is no overlap between the groups of memory, and each slice collectively | |
4159 represents all the memory concerned. The rightmost slice typically | |
4160 represents the total memory used plus malloc and dynarr overhead. | |
4161 | |
4162 Slices describing other Lisp objects logically associated with the | |
4163 object may be included, separated from other slices by `t' and from | |
4164 each other by nil if there is more than one. | |
4165 | |
4166 #### We have to figure out how to handle the memory used by the object | |
4167 itself vs. the memory used by substructures. Probably the memory_usage | |
4168 method should return info only about substructures and related Lisp | |
4169 objects, since the caller can always find and all info about the object | |
4170 itself. | |
4171 */ | |
4172 (object)) | |
4173 { | |
4174 struct generic_usage_stats gustats; | |
4175 struct usage_stats object_stats; | |
4176 int i; | |
4177 Lisp_Object val = Qnil; | |
4178 Lisp_Object stats_list = OBJECT_PROPERTY (object, memusage_stats_list); | |
4179 | |
4180 xzero (object_stats); | |
4181 lisp_object_storage_size (object, &object_stats); | |
4182 | |
4183 val = acons (Qobject_actually_requested, | |
4184 make_int (object_stats.was_requested), val); | |
4185 val = acons (Qobject_malloc_overhead, | |
4186 make_int (object_stats.malloc_overhead), val); | |
4187 assert (!object_stats.dynarr_overhead); | |
4188 assert (!object_stats.gap_overhead); | |
4189 | |
4190 if (!NILP (stats_list)) | |
4191 { | |
4192 xzero (gustats); | |
4193 MAYBE_OBJECT_METH (object, memory_usage, (object, &gustats)); | |
4194 | |
4195 val = Fcons (Qt, val); | |
4196 val = acons (Qother_memory_actually_requested, | |
4197 make_int (gustats.u.was_requested), val); | |
4198 val = acons (Qother_memory_malloc_overhead, | |
4199 make_int (gustats.u.malloc_overhead), val); | |
4200 if (gustats.u.dynarr_overhead) | |
4201 val = acons (Qother_memory_dynarr_overhead, | |
4202 make_int (gustats.u.dynarr_overhead), val); | |
4203 if (gustats.u.gap_overhead) | |
4204 val = acons (Qother_memory_gap_overhead, | |
4205 make_int (gustats.u.gap_overhead), val); | |
4206 val = Fcons (Qnil, val); | |
4207 | |
4208 i = 0; | |
4209 { | |
4210 LIST_LOOP_2 (item, stats_list) | |
4211 { | |
4212 if (NILP (item) || EQ (item, Qt)) | |
4213 val = Fcons (item, val); | |
4214 else | |
4215 { | |
4216 val = acons (item, make_int (gustats.othervals[i]), val); | |
4217 i++; | |
4218 } | |
4219 } | |
4220 } | |
4221 } | |
4222 | |
4223 return Fnreverse (val); | |
4224 } | |
4225 | |
4226 #endif /* MEMORY_USAGE_STATS */ | |
4227 | |
4228 #ifdef ALLOC_TYPE_STATS | |
4229 | |
4230 DEFUN ("total-object-memory-usage", Ftotal_object_memory_usage, 0, 0, 0, /* | |
4231 Return total number of bytes used for object storage in XEmacs. | |
4232 This may be helpful in debugging XEmacs's memory usage. | |
4233 See also `consing-since-gc' and `object-memory-usage-stats'. | |
4234 */ | |
4235 ()) | |
4236 { | |
4237 return make_int (total_gc_usage + consing_since_gc); | |
4238 } | |
4239 | |
4240 #endif /* ALLOC_TYPE_STATS */ | |
4241 | |
4242 | |
4243 /************************************************************************/ | |
4244 /* Allocation statistics: Initialization */ | |
4245 /************************************************************************/ | |
4246 #ifdef MEMORY_USAGE_STATS | |
4247 | |
4248 /* Compute the number of extra memory-usage statistics associated with an | |
4249 object. We can't compute this at the time INIT_LISP_OBJECT() is called | |
4250 because the value of the `memusage_stats_list' property is generally | |
4251 set afterwards. So we compute the values for all types of objects | |
4252 after all objects have been initialized. */ | |
4253 | |
4254 static void | |
4255 compute_memusage_stats_length (void) | |
4256 { | |
4257 int i; | |
4258 | |
4259 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
4260 { | |
4261 int len = 0; | |
4262 int nonlisp_len = 0; | |
4263 int seen_break = 0; | |
4264 | |
4265 struct lrecord_implementation *imp = lrecord_implementations_table[i]; | |
4266 | |
4267 if (!imp) | |
4268 continue; | |
4269 /* For some of the early objects, Qnil was not yet initialized at | |
4270 the time of object initialization, so it came up as Qnull_pointer. | |
4271 Fix that now. */ | |
4272 if (EQ (imp->memusage_stats_list, Qnull_pointer)) | |
4273 imp->memusage_stats_list = Qnil; | |
4274 { | |
4275 LIST_LOOP_2 (item, imp->memusage_stats_list) | |
4276 { | |
4277 if (!NILP (item) && !EQ (item, Qt)) | |
4278 { | |
4279 len++; | |
4280 if (!seen_break) | |
4281 nonlisp_len++; | |
4282 } | |
4283 else | |
4284 seen_break++; | |
4285 } | |
4286 } | |
4287 | |
4288 imp->num_extra_memusage_stats = len; | |
4289 imp->num_extra_nonlisp_memusage_stats = nonlisp_len; | |
4290 } | |
4291 } | |
4292 | |
4293 #endif /* MEMORY_USAGE_STATS */ | |
3709 | 4294 |
3710 | 4295 |
3711 /************************************************************************/ | 4296 /************************************************************************/ |
3712 /* Garbage Collection -- Sweep/Compact */ | 4297 /* Garbage Collection -- Sweep/Compact */ |
3713 /************************************************************************/ | 4298 /************************************************************************/ |
3770 } | 4355 } |
3771 *used = num_used; | 4356 *used = num_used; |
3772 /* *total = total_size; */ | 4357 /* *total = total_size; */ |
3773 } | 4358 } |
3774 | 4359 |
3775 static Bytecount fixed_type_block_overhead (Bytecount size, | |
3776 Bytecount per_block); | |
3777 | |
3778 /* And the Lord said: Thou shalt use the `c-backslash-region' command | 4360 /* And the Lord said: Thou shalt use the `c-backslash-region' command |
3779 to make macros prettier. */ | 4361 to make macros prettier. */ |
3780 | |
3781 #define COUNT_FROB_BLOCK_USAGE(type) \ | |
3782 EMACS_INT s = 0; \ | |
3783 EMACS_INT s_overhead = 0; \ | |
3784 struct type##_block *x = current_##type##_block; \ | |
3785 while (x) { s += sizeof (*x) + MALLOC_OVERHEAD; x = x->prev; } \ | |
3786 s_overhead = fixed_type_block_overhead (s, sizeof (struct type##_block)); \ | |
3787 DO_NOTHING | |
3788 | |
3789 #define COPY_INTO_LRECORD_STATS(type) \ | |
3790 do { \ | |
3791 COUNT_FROB_BLOCK_USAGE (type); \ | |
3792 lrecord_stats[lrecord_type_##type].bytes_in_use += s; \ | |
3793 lrecord_stats[lrecord_type_##type].bytes_in_use_overhead += \ | |
3794 s_overhead; \ | |
3795 lrecord_stats[lrecord_type_##type].instances_on_free_list += \ | |
3796 gc_count_num_##type##_freelist; \ | |
3797 lrecord_stats[lrecord_type_##type].instances_in_use += \ | |
3798 gc_count_num_##type##_in_use; \ | |
3799 } while (0) | |
3800 | 4362 |
3801 #ifdef ERROR_CHECK_GC | 4363 #ifdef ERROR_CHECK_GC |
3802 | 4364 |
3803 #define SWEEP_FIXED_TYPE_BLOCK_1(typename, obj_type, lheader) \ | 4365 #define SWEEP_FIXED_TYPE_BLOCK_1(typename, obj_type, lheader) \ |
3804 do { \ | 4366 do { \ |
4568 | 5130 |
4569 /************************************************************************/ | 5131 /************************************************************************/ |
4570 /* "Disksave Finalization" -- Preparing for Dumping */ | 5132 /* "Disksave Finalization" -- Preparing for Dumping */ |
4571 /************************************************************************/ | 5133 /************************************************************************/ |
4572 | 5134 |
5135 static void | |
5136 disksave_object_finalization_1 (void) | |
5137 { | |
5138 #ifdef NEW_GC | |
5139 mc_finalize_for_disksave (); | |
5140 #else /* not NEW_GC */ | |
5141 struct old_lcrecord_header *header; | |
5142 | |
5143 for (header = all_lcrecords; header; header = header->next) | |
5144 { | |
5145 struct lrecord_header *objh = &header->lheader; | |
5146 const struct lrecord_implementation *imp = LHEADER_IMPLEMENTATION (objh); | |
5147 #if 0 /* possibly useful for debugging */ | |
5148 if (!RECORD_DUMPABLE (objh) && !objh->free) | |
5149 { | |
5150 stderr_out ("Disksaving a non-dumpable object: "); | |
5151 debug_print (wrap_pointer_1 (header)); | |
5152 } | |
5153 #endif | |
5154 if (imp->disksave && !objh->free) | |
5155 (imp->disksave) (wrap_pointer_1 (header)); | |
5156 } | |
5157 #endif /* not NEW_GC */ | |
5158 } | |
5159 | |
4573 void | 5160 void |
4574 disksave_object_finalization (void) | 5161 disksave_object_finalization (void) |
4575 { | 5162 { |
4576 /* It's important that certain information from the environment not get | 5163 /* It's important that certain information from the environment not get |
4577 dumped with the executable (pathnames, environment variables, etc.). | 5164 dumped with the executable (pathnames, environment variables, etc.). |
4634 | 5221 |
4635 /* There, that ought to be enough... */ | 5222 /* There, that ought to be enough... */ |
4636 | 5223 |
4637 } | 5224 } |
4638 | 5225 |
4639 #ifdef ALLOC_TYPE_STATS | 5226 |
4640 | 5227 /************************************************************************/ |
4641 static Lisp_Object | 5228 /* Lisp interface onto garbage collection */ |
4642 gc_plist_hack (const Ascbyte *name, EMACS_INT value, Lisp_Object tail) | 5229 /************************************************************************/ |
4643 { | |
4644 /* C doesn't have local functions (or closures, or GC, or readable syntax, | |
4645 or portable numeric datatypes, or bit-vectors, or characters, or | |
4646 arrays, or exceptions, or ...) */ | |
4647 return cons3 (intern (name), make_int (value), tail); | |
4648 } | |
4649 | |
4650 /* Pluralize a lowercase English word stored in BUF, assuming BUF has | |
4651 enough space to hold the extra letters (at most 2). */ | |
4652 static void | |
4653 pluralize_word (Ascbyte *buf) | |
4654 { | |
4655 Bytecount len = strlen (buf); | |
4656 int upper = 0; | |
4657 Ascbyte d, e; | |
4658 | |
4659 if (len == 0 || len == 1) | |
4660 goto pluralize_apostrophe_s; | |
4661 e = buf[len - 1]; | |
4662 d = buf[len - 2]; | |
4663 upper = isupper (e); | |
4664 e = tolower (e); | |
4665 d = tolower (d); | |
4666 if (e == 'y') | |
4667 { | |
4668 switch (d) | |
4669 { | |
4670 case 'a': | |
4671 case 'e': | |
4672 case 'i': | |
4673 case 'o': | |
4674 case 'u': | |
4675 goto pluralize_s; | |
4676 default: | |
4677 buf[len - 1] = (upper ? 'I' : 'i'); | |
4678 goto pluralize_es; | |
4679 } | |
4680 } | |
4681 else if (e == 's' || e == 'x' || (e == 'h' && (d == 's' || d == 'c'))) | |
4682 { | |
4683 pluralize_es: | |
4684 buf[len++] = (upper ? 'E' : 'e'); | |
4685 } | |
4686 pluralize_s: | |
4687 buf[len++] = (upper ? 'S' : 's'); | |
4688 buf[len] = '\0'; | |
4689 return; | |
4690 | |
4691 pluralize_apostrophe_s: | |
4692 buf[len++] = '\''; | |
4693 goto pluralize_s; | |
4694 } | |
4695 | |
4696 static void | |
4697 pluralize_and_append (Ascbyte *buf, const Ascbyte *name, const Ascbyte *suffix) | |
4698 { | |
4699 strcpy (buf, name); | |
4700 pluralize_word (buf); | |
4701 strcat (buf, suffix); | |
4702 } | |
4703 | |
4704 void | |
4705 finish_object_memory_usage_stats (void) | |
4706 { | |
4707 /* Here we add up the aggregate values for each statistic, previously | |
4708 computed during tick_lrecord_stats(), to get a single combined value | |
4709 of non-Lisp memory usage for all objects of each type. We can't | |
4710 do this if NEW_GC because nothing like tick_lrecord_stats() gets | |
4711 called -- instead, statistics are computed when objects are allocated, | |
4712 which is too early to be calling the memory_usage() method. */ | |
4713 #if defined (MEMORY_USAGE_STATS) && !defined (NEW_GC) | |
4714 int i; | |
4715 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
4716 { | |
4717 struct lrecord_implementation *imp = lrecord_implementations_table[i]; | |
4718 if (imp && imp->num_extra_nonlisp_memusage_stats) | |
4719 { | |
4720 int j; | |
4721 for (j = 0; j < imp->num_extra_nonlisp_memusage_stats; j++) | |
4722 lrecord_stats[i].nonlisp_bytes_in_use += | |
4723 lrecord_stats[i].stats.othervals[j]; | |
4724 } | |
4725 } | |
4726 #endif /* defined (MEMORY_USAGE_STATS) && !defined (NEW_GC) */ | |
4727 } | |
4728 | |
4729 static Lisp_Object | |
4730 object_memory_usage_stats (int set_total_gc_usage) | |
4731 { | |
4732 Lisp_Object pl = Qnil; | |
4733 int i; | |
4734 EMACS_INT tgu_val = 0; | |
4735 | |
4736 #ifdef NEW_GC | |
4737 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
4738 { | |
4739 if (lrecord_stats[i].instances_in_use != 0) | |
4740 { | |
4741 Ascbyte buf[255]; | |
4742 const Ascbyte *name = lrecord_implementations_table[i]->name; | |
4743 | |
4744 if (lrecord_stats[i].bytes_in_use_including_overhead != | |
4745 lrecord_stats[i].bytes_in_use) | |
4746 { | |
4747 sprintf (buf, "%s-storage-including-overhead", name); | |
4748 pl = gc_plist_hack (buf, | |
4749 lrecord_stats[i] | |
4750 .bytes_in_use_including_overhead, | |
4751 pl); | |
4752 } | |
4753 | |
4754 sprintf (buf, "%s-storage", name); | |
4755 pl = gc_plist_hack (buf, | |
4756 lrecord_stats[i].bytes_in_use, | |
4757 pl); | |
4758 tgu_val += lrecord_stats[i].bytes_in_use_including_overhead; | |
4759 | |
4760 pluralize_and_append (buf, name, "-used"); | |
4761 pl = gc_plist_hack (buf, lrecord_stats[i].instances_in_use, pl); | |
4762 } | |
4763 } | |
4764 | |
4765 #else /* not NEW_GC */ | |
4766 | |
4767 for (i = 0; i < lrecord_type_count; i++) | |
4768 { | |
4769 if (lrecord_stats[i].bytes_in_use != 0 | |
4770 || lrecord_stats[i].bytes_freed != 0 | |
4771 || lrecord_stats[i].instances_on_free_list != 0) | |
4772 { | |
4773 Ascbyte buf[255]; | |
4774 const Ascbyte *name = lrecord_implementations_table[i]->name; | |
4775 | |
4776 sprintf (buf, "%s-storage-overhead", name); | |
4777 pl = gc_plist_hack (buf, lrecord_stats[i].bytes_in_use_overhead, pl); | |
4778 tgu_val += lrecord_stats[i].bytes_in_use_overhead; | |
4779 sprintf (buf, "%s-storage", name); | |
4780 pl = gc_plist_hack (buf, lrecord_stats[i].bytes_in_use, pl); | |
4781 tgu_val += lrecord_stats[i].bytes_in_use; | |
4782 #ifdef MEMORY_USAGE_STATS | |
4783 if (lrecord_stats[i].nonlisp_bytes_in_use) | |
4784 { | |
4785 sprintf (buf, "%s-non-lisp-storage", name); | |
4786 pl = gc_plist_hack (buf, lrecord_stats[i].nonlisp_bytes_in_use, | |
4787 pl); | |
4788 tgu_val += lrecord_stats[i].nonlisp_bytes_in_use; | |
4789 } | |
4790 #endif /* MEMORY_USAGE_STATS */ | |
4791 pluralize_and_append (buf, name, "-freed"); | |
4792 if (lrecord_stats[i].instances_freed != 0) | |
4793 pl = gc_plist_hack (buf, lrecord_stats[i].instances_freed, pl); | |
4794 pluralize_and_append (buf, name, "-on-free-list"); | |
4795 if (lrecord_stats[i].instances_on_free_list != 0) | |
4796 pl = gc_plist_hack (buf, lrecord_stats[i].instances_on_free_list, | |
4797 pl); | |
4798 pluralize_and_append (buf, name, "-used"); | |
4799 pl = gc_plist_hack (buf, lrecord_stats[i].instances_in_use, pl); | |
4800 } | |
4801 } | |
4802 | |
4803 pl = gc_plist_hack ("long-string-chars-storage-overhead", | |
4804 gc_count_long_string_storage_including_overhead - | |
4805 (gc_count_string_total_size | |
4806 - gc_count_short_string_total_size), pl); | |
4807 pl = gc_plist_hack ("long-string-chars-storage", | |
4808 gc_count_string_total_size | |
4809 - gc_count_short_string_total_size, pl); | |
4810 do | |
4811 { | |
4812 COUNT_FROB_BLOCK_USAGE (string_chars); | |
4813 tgu_val += s + s_overhead; | |
4814 pl = gc_plist_hack ("short-string-chars-storage-overhead", s_overhead, pl); | |
4815 pl = gc_plist_hack ("short-string-chars-storage", s, pl); | |
4816 } | |
4817 while (0); | |
4818 | |
4819 pl = gc_plist_hack ("long-strings-total-length", | |
4820 gc_count_string_total_size | |
4821 - gc_count_short_string_total_size, pl); | |
4822 pl = gc_plist_hack ("short-strings-total-length", | |
4823 gc_count_short_string_total_size, pl); | |
4824 pl = gc_plist_hack ("long-strings-used", | |
4825 gc_count_num_string_in_use | |
4826 - gc_count_num_short_string_in_use, pl); | |
4827 pl = gc_plist_hack ("short-strings-used", | |
4828 gc_count_num_short_string_in_use, pl); | |
4829 | |
4830 #endif /* NEW_GC */ | |
4831 | |
4832 if (set_total_gc_usage) | |
4833 { | |
4834 total_gc_usage = tgu_val; | |
4835 total_gc_usage_set = 1; | |
4836 } | |
4837 | |
4838 return pl; | |
4839 } | |
4840 | |
4841 DEFUN ("object-memory-usage-stats", Fobject_memory_usage_stats, 0, 0, 0, /* | |
4842 Return statistics about memory usage of Lisp objects. | |
4843 */ | |
4844 ()) | |
4845 { | |
4846 return object_memory_usage_stats (0); | |
4847 } | |
4848 | |
4849 #endif /* ALLOC_TYPE_STATS */ | |
4850 | |
4851 #ifdef MEMORY_USAGE_STATS | |
4852 | |
4853 /* Compute the number of extra memory-usage statistics associated with an | |
4854 object. We can't compute this at the time INIT_LISP_OBJECT() is called | |
4855 because the value of the `memusage_stats_list' property is generally | |
4856 set afterwards. So we compute the values for all types of objects | |
4857 after all objects have been initialized. */ | |
4858 | |
4859 static void | |
4860 compute_memusage_stats_length (void) | |
4861 { | |
4862 int i; | |
4863 | |
4864 for (i = 0; i < countof (lrecord_implementations_table); i++) | |
4865 { | |
4866 int len = 0; | |
4867 int nonlisp_len = 0; | |
4868 int seen_break = 0; | |
4869 | |
4870 struct lrecord_implementation *imp = lrecord_implementations_table[i]; | |
4871 | |
4872 if (!imp) | |
4873 continue; | |
4874 /* For some of the early objects, Qnil was not yet initialized at | |
4875 the time of object initialization, so it came up as Qnull_pointer. | |
4876 Fix that now. */ | |
4877 if (EQ (imp->memusage_stats_list, Qnull_pointer)) | |
4878 imp->memusage_stats_list = Qnil; | |
4879 { | |
4880 LIST_LOOP_2 (item, imp->memusage_stats_list) | |
4881 { | |
4882 if (!NILP (item) && !EQ (item, Qt)) | |
4883 { | |
4884 len++; | |
4885 if (!seen_break) | |
4886 nonlisp_len++; | |
4887 } | |
4888 else | |
4889 seen_break++; | |
4890 } | |
4891 } | |
4892 | |
4893 imp->num_extra_memusage_stats = len; | |
4894 imp->num_extra_nonlisp_memusage_stats = nonlisp_len; | |
4895 } | |
4896 } | |
4897 | |
4898 DEFUN ("object-memory-usage", Fobject_memory_usage, 1, 1, 0, /* | |
4899 Return stats about the memory usage of OBJECT. | |
4900 The values returned are in the form of an alist of usage types and byte | |
4901 counts. The byte counts attempt to encompass all the memory used | |
4902 by the object (separate from the memory logically associated with any | |
4903 other object), including internal structures and any malloc() | |
4904 overhead associated with them. In practice, the byte counts are | |
4905 underestimated because certain memory usage is very hard to determine | |
4906 \(e.g. the amount of memory used inside the Xt library or inside the | |
4907 X server). | |
4908 | |
4909 Multiple slices of the total memory usage may be returned, separated | |
4910 by a nil. Each slice represents a particular view of the memory, a | |
4911 particular way of partitioning it into groups. Within a slice, there | |
4912 is no overlap between the groups of memory, and each slice collectively | |
4913 represents all the memory concerned. The rightmost slice typically | |
4914 represents the total memory used plus malloc and dynarr overhead. | |
4915 | |
4916 Slices describing other Lisp objects logically associated with the | |
4917 object may be included, separated from other slices by `t' and from | |
4918 each other by nil if there is more than one. | |
4919 | |
4920 #### We have to figure out how to handle the memory used by the object | |
4921 itself vs. the memory used by substructures. Probably the memory_usage | |
4922 method should return info only about substructures and related Lisp | |
4923 objects, since the caller can always find and all info about the object | |
4924 itself. | |
4925 */ | |
4926 (object)) | |
4927 { | |
4928 struct generic_usage_stats gustats; | |
4929 struct usage_stats object_stats; | |
4930 int i; | |
4931 Lisp_Object val = Qnil; | |
4932 Lisp_Object stats_list = OBJECT_PROPERTY (object, memusage_stats_list); | |
4933 | |
4934 xzero (object_stats); | |
4935 lisp_object_storage_size (object, &object_stats); | |
4936 | |
4937 val = acons (Qobject_actually_requested, | |
4938 make_int (object_stats.was_requested), val); | |
4939 val = acons (Qobject_malloc_overhead, | |
4940 make_int (object_stats.malloc_overhead), val); | |
4941 assert (!object_stats.dynarr_overhead); | |
4942 assert (!object_stats.gap_overhead); | |
4943 | |
4944 if (!NILP (stats_list)) | |
4945 { | |
4946 xzero (gustats); | |
4947 MAYBE_OBJECT_METH (object, memory_usage, (object, &gustats)); | |
4948 | |
4949 val = Fcons (Qt, val); | |
4950 val = acons (Qother_memory_actually_requested, | |
4951 make_int (gustats.u.was_requested), val); | |
4952 val = acons (Qother_memory_malloc_overhead, | |
4953 make_int (gustats.u.malloc_overhead), val); | |
4954 if (gustats.u.dynarr_overhead) | |
4955 val = acons (Qother_memory_dynarr_overhead, | |
4956 make_int (gustats.u.dynarr_overhead), val); | |
4957 if (gustats.u.gap_overhead) | |
4958 val = acons (Qother_memory_gap_overhead, | |
4959 make_int (gustats.u.gap_overhead), val); | |
4960 val = Fcons (Qnil, val); | |
4961 | |
4962 i = 0; | |
4963 { | |
4964 LIST_LOOP_2 (item, stats_list) | |
4965 { | |
4966 if (NILP (item) || EQ (item, Qt)) | |
4967 val = Fcons (item, val); | |
4968 else | |
4969 { | |
4970 val = acons (item, make_int (gustats.othervals[i]), val); | |
4971 i++; | |
4972 } | |
4973 } | |
4974 } | |
4975 } | |
4976 | |
4977 return Fnreverse (val); | |
4978 } | |
4979 | |
4980 #endif /* MEMORY_USAGE_STATS */ | |
4981 | 5230 |
4982 /* Debugging aids. */ | 5231 /* Debugging aids. */ |
4983 | 5232 |
4984 DEFUN ("garbage-collect", Fgarbage_collect, 0, 0, "", /* | 5233 DEFUN ("garbage-collect", Fgarbage_collect, 0, 0, "", /* |
4985 Reclaim storage for Lisp objects no longer needed. | 5234 Reclaim storage for Lisp objects no longer needed. |
5003 | 5252 |
5004 /* This will get set to 1, and total_gc_usage computed, as part of the | 5253 /* This will get set to 1, and total_gc_usage computed, as part of the |
5005 call to object_memory_usage_stats() -- if ALLOC_TYPE_STATS is enabled. */ | 5254 call to object_memory_usage_stats() -- if ALLOC_TYPE_STATS is enabled. */ |
5006 total_gc_usage_set = 0; | 5255 total_gc_usage_set = 0; |
5007 #ifdef ALLOC_TYPE_STATS | 5256 #ifdef ALLOC_TYPE_STATS |
5008 /* The things we do for backwards-compatibility */ | 5257 return garbage_collection_statistics (); |
5009 #ifdef NEW_GC | 5258 #else |
5010 return | |
5011 list6 | |
5012 (Fcons (make_int (lrecord_stats[lrecord_type_cons].instances_in_use), | |
5013 make_int (lrecord_stats[lrecord_type_cons] | |
5014 .bytes_in_use_including_overhead)), | |
5015 Fcons (make_int (lrecord_stats[lrecord_type_symbol].instances_in_use), | |
5016 make_int (lrecord_stats[lrecord_type_symbol] | |
5017 .bytes_in_use_including_overhead)), | |
5018 Fcons (make_int (lrecord_stats[lrecord_type_marker].instances_in_use), | |
5019 make_int (lrecord_stats[lrecord_type_marker] | |
5020 .bytes_in_use_including_overhead)), | |
5021 make_int (lrecord_stats[lrecord_type_string] | |
5022 .bytes_in_use_including_overhead), | |
5023 make_int (lrecord_stats[lrecord_type_vector] | |
5024 .bytes_in_use_including_overhead), | |
5025 object_memory_usage_stats (1)); | |
5026 #else /* not NEW_GC */ | |
5027 return | |
5028 list6 (Fcons (make_int (gc_count_num_cons_in_use), | |
5029 make_int (gc_count_num_cons_freelist)), | |
5030 Fcons (make_int (gc_count_num_symbol_in_use), | |
5031 make_int (gc_count_num_symbol_freelist)), | |
5032 Fcons (make_int (gc_count_num_marker_in_use), | |
5033 make_int (gc_count_num_marker_freelist)), | |
5034 make_int (gc_count_string_total_size), | |
5035 make_int (lrecord_stats[lrecord_type_vector].bytes_in_use + | |
5036 lrecord_stats[lrecord_type_vector].bytes_freed + | |
5037 lrecord_stats[lrecord_type_vector].bytes_on_free_list), | |
5038 object_memory_usage_stats (1)); | |
5039 #endif /* not NEW_GC */ | |
5040 #else /* not ALLOC_TYPE_STATS */ | |
5041 return Qnil; | 5259 return Qnil; |
5042 #endif /* ALLOC_TYPE_STATS */ | 5260 #endif |
5043 } | 5261 } |
5044 | 5262 |
5045 DEFUN ("consing-since-gc", Fconsing_since_gc, 0, 0, "", /* | 5263 DEFUN ("consing-since-gc", Fconsing_since_gc, 0, 0, "", /* |
5046 Return the number of bytes consed since the last garbage collection. | 5264 Return the number of bytes consed since the last garbage collection. |
5047 \"Consed\" is a misnomer in that this actually counts allocation | 5265 \"Consed\" is a misnomer in that this actually counts allocation |
5076 ()) | 5294 ()) |
5077 { | 5295 { |
5078 return make_int (total_data_usage ()); | 5296 return make_int (total_data_usage ()); |
5079 } | 5297 } |
5080 | 5298 |
5081 #ifdef ALLOC_TYPE_STATS | |
5082 DEFUN ("total-object-memory-usage", Ftotal_object_memory_usage, 0, 0, 0, /* | |
5083 Return total number of bytes used for object storage in XEmacs. | |
5084 This may be helpful in debugging XEmacs's memory usage. | |
5085 See also `consing-since-gc' and `object-memory-usage-stats'. | |
5086 */ | |
5087 ()) | |
5088 { | |
5089 return make_int (total_gc_usage + consing_since_gc); | |
5090 } | |
5091 #endif /* ALLOC_TYPE_STATS */ | |
5092 | |
5093 #ifdef USE_VALGRIND | 5299 #ifdef USE_VALGRIND |
5094 DEFUN ("valgrind-leak-check", Fvalgrind_leak_check, 0, 0, "", /* | 5300 DEFUN ("valgrind-leak-check", Fvalgrind_leak_check, 0, 0, "", /* |
5095 Ask valgrind to perform a memory leak check. | 5301 Ask valgrind to perform a memory leak check. |
5096 The results of the leak check are sent to stderr. | 5302 The results of the leak check are sent to stderr. |
5097 */ | 5303 */ |
5110 { | 5316 { |
5111 VALGRIND_DO_QUICK_LEAK_CHECK; | 5317 VALGRIND_DO_QUICK_LEAK_CHECK; |
5112 return Qnil; | 5318 return Qnil; |
5113 } | 5319 } |
5114 #endif /* USE_VALGRIND */ | 5320 #endif /* USE_VALGRIND */ |
5115 | |
5116 void | |
5117 recompute_funcall_allocation_flag (void) | |
5118 { | |
5119 funcall_allocation_flag = | |
5120 need_to_garbage_collect || | |
5121 need_to_check_c_alloca || | |
5122 need_to_signal_post_gc; | |
5123 } | |
5124 | |
5125 int | |
5126 object_dead_p (Lisp_Object obj) | |
5127 { | |
5128 return ((BUFFERP (obj) && !BUFFER_LIVE_P (XBUFFER (obj))) || | |
5129 (FRAMEP (obj) && !FRAME_LIVE_P (XFRAME (obj))) || | |
5130 (WINDOWP (obj) && !WINDOW_LIVE_P (XWINDOW (obj))) || | |
5131 (DEVICEP (obj) && !DEVICE_LIVE_P (XDEVICE (obj))) || | |
5132 (CONSOLEP (obj) && !CONSOLE_LIVE_P (XCONSOLE (obj))) || | |
5133 (EVENTP (obj) && !EVENT_LIVE_P (XEVENT (obj))) || | |
5134 (EXTENTP (obj) && !EXTENT_LIVE_P (XEXTENT (obj)))); | |
5135 } | |
5136 | |
5137 #ifdef ALLOC_TYPE_STATS | |
5138 | |
5139 /* Attempt to determine the actual amount of space that is used for | |
5140 the block allocated starting at PTR, supposedly of size "CLAIMED_SIZE". | |
5141 | |
5142 It seems that the following holds: | |
5143 | |
5144 1. When using the old allocator (malloc.c): | |
5145 | |
5146 -- blocks are always allocated in chunks of powers of two. For | |
5147 each block, there is an overhead of 8 bytes if rcheck is not | |
5148 defined, 20 bytes if it is defined. In other words, a | |
5149 one-byte allocation needs 8 bytes of overhead for a total of | |
5150 9 bytes, and needs to have 16 bytes of memory chunked out for | |
5151 it. | |
5152 | |
5153 2. When using the new allocator (gmalloc.c): | |
5154 | |
5155 -- blocks are always allocated in chunks of powers of two up | |
5156 to 4096 bytes. Larger blocks are allocated in chunks of | |
5157 an integral multiple of 4096 bytes. The minimum block | |
5158 size is 2*sizeof (void *), or 16 bytes if SUNOS_LOCALTIME_BUG | |
5159 is defined. There is no per-block overhead, but there | |
5160 is an overhead of 3*sizeof (size_t) for each 4096 bytes | |
5161 allocated. | |
5162 | |
5163 3. When using the system malloc, anything goes, but they are | |
5164 generally slower and more space-efficient than the GNU | |
5165 allocators. One possibly reasonable assumption to make | |
5166 for want of better data is that sizeof (void *), or maybe | |
5167 2 * sizeof (void *), is required as overhead and that | |
5168 blocks are allocated in the minimum required size except | |
5169 that some minimum block size is imposed (e.g. 16 bytes). */ | |
5170 | |
5171 Bytecount | |
5172 malloced_storage_size (void * UNUSED (ptr), Bytecount claimed_size, | |
5173 struct usage_stats *stats) | |
5174 { | |
5175 Bytecount orig_claimed_size = claimed_size; | |
5176 | |
5177 #ifndef SYSTEM_MALLOC | |
5178 if (claimed_size < (Bytecount) (2 * sizeof (void *))) | |
5179 claimed_size = 2 * sizeof (void *); | |
5180 # ifdef SUNOS_LOCALTIME_BUG | |
5181 if (claimed_size < 16) | |
5182 claimed_size = 16; | |
5183 # endif | |
5184 if (claimed_size < 4096) | |
5185 { | |
5186 /* fxg: rename log->log2 to supress gcc3 shadow warning */ | |
5187 int log2 = 1; | |
5188 | |
5189 /* compute the log base two, more or less, then use it to compute | |
5190 the block size needed. */ | |
5191 claimed_size--; | |
5192 /* It's big, it's heavy, it's wood! */ | |
5193 while ((claimed_size /= 2) != 0) | |
5194 ++log2; | |
5195 claimed_size = 1; | |
5196 /* It's better than bad, it's good! */ | |
5197 while (log2 > 0) | |
5198 { | |
5199 claimed_size *= 2; | |
5200 log2--; | |
5201 } | |
5202 /* We have to come up with some average about the amount of | |
5203 blocks used. */ | |
5204 if ((Bytecount) (rand () & 4095) < claimed_size) | |
5205 claimed_size += 3 * sizeof (void *); | |
5206 } | |
5207 else | |
5208 { | |
5209 claimed_size += 4095; | |
5210 claimed_size &= ~4095; | |
5211 claimed_size += (claimed_size / 4096) * 3 * sizeof (size_t); | |
5212 } | |
5213 | |
5214 #else | |
5215 | |
5216 if (claimed_size < 16) | |
5217 claimed_size = 16; | |
5218 claimed_size += 2 * sizeof (void *); | |
5219 | |
5220 #endif /* system allocator */ | |
5221 | |
5222 if (stats) | |
5223 { | |
5224 stats->was_requested += orig_claimed_size; | |
5225 stats->malloc_overhead += claimed_size - orig_claimed_size; | |
5226 } | |
5227 return claimed_size; | |
5228 } | |
5229 | |
5230 #ifndef NEW_GC | |
5231 static Bytecount | |
5232 fixed_type_block_overhead (Bytecount size, Bytecount per_block) | |
5233 { | |
5234 Bytecount overhead = 0; | |
5235 Bytecount storage_size = malloced_storage_size (0, per_block, 0); | |
5236 while (size >= per_block) | |
5237 { | |
5238 size -= per_block; | |
5239 overhead += storage_size - per_block; | |
5240 } | |
5241 if (rand () % per_block < size) | |
5242 overhead += storage_size - per_block; | |
5243 return overhead; | |
5244 } | |
5245 #endif /* not NEW_GC */ | |
5246 | |
5247 Bytecount | |
5248 lisp_object_storage_size (Lisp_Object obj, struct usage_stats *ustats) | |
5249 { | |
5250 #ifndef NEW_GC | |
5251 const struct lrecord_implementation *imp = | |
5252 XRECORD_LHEADER_IMPLEMENTATION (obj); | |
5253 #endif /* not NEW_GC */ | |
5254 Bytecount size = lisp_object_size (obj); | |
5255 | |
5256 #ifdef NEW_GC | |
5257 return mc_alloced_storage_size (size, ustats); | |
5258 #else | |
5259 if (imp->frob_block_p) | |
5260 { | |
5261 Bytecount overhead = | |
5262 /* #### Always using cons_block is incorrect but close; only | |
5263 string_chars_block is significantly different in size, and | |
5264 it won't ever be seen in this function */ | |
5265 fixed_type_block_overhead (size, sizeof (struct cons_block)); | |
5266 if (ustats) | |
5267 { | |
5268 ustats->was_requested += size; | |
5269 ustats->malloc_overhead += overhead; | |
5270 } | |
5271 return size + overhead; | |
5272 } | |
5273 else | |
5274 return malloced_storage_size (XPNTR (obj), size, ustats); | |
5275 #endif | |
5276 } | |
5277 | |
5278 #endif /* ALLOC_TYPE_STATS */ | |
5279 | 5321 |
5280 | 5322 |
5281 /************************************************************************/ | 5323 /************************************************************************/ |
5282 /* Initialization */ | 5324 /* Initialization */ |
5283 /************************************************************************/ | 5325 /************************************************************************/ |