comparison src/mc-alloc.c @ 2720:6fa9919a9a0b

[xemacs-hg @ 2005-04-08 23:10:01 by crestani] ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         New configure flag: `MC_ALLOC':         * configure.ac (XE_COMPLEX_ARG_ENABLE): Add `--enable-mc-alloc' as         a new configure flag.         * configure.in (AC_INIT_PARSE_ARGS): Add `--mc-alloc' as a new         configure flag.         * configure.usage: Add description for `mc-alloc'.         DUMP_IN_EXEC:         * Makefile.in.in: Condition the installation of a separate dump         file on !DUMP_ON_EXEC.         * configure.ac (XE_COMPLEX_ARG_ENABLE): Add         `--enable-dump-in-exec' as a new configure flag.         * configure.ac: DUMP_IN_EXEC is define as default for PDUMP but         not default for MC_ALLOC.         * configure.in (AC_INIT_PARSE_ARGS): Add `--dump-in-exec' as a         new configure flag.         * configure.in: DUMP_IN_EXEC is define as default for PDUMP but         not default for MC_ALLOC.         * configure.usage: Add description for `dump-in-exec'. lib-src/ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         DUMP_IN_EXEC:                  * Makefile.in.in: Only compile insert-data-in-exec if         DUMP_IN_EXEC is defined. lisp/ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         MEMORY_USAGE_STATS         * diagnose.el: Add new lisp function to pretty print statistics         about the new allocator.         * diagnose.el (show-mc-alloc-memory-usage): New. modules/ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         Remove Lcrecords:                  * postgresql/postgresql.c (allocate_pgconn): Allocate with new         allocator.         * postgresql/postgresql.c (allocate_pgresult): Allocate PGresult         with new allocator.           * postgresql/postgresql.h (struct Lisp_PGconn): Add         lrecord_header.         * postgresql/postgresql.h (struct Lisp_PGresult): Add         lrecord_header.         * ldap/eldap.c (allocate_ldap): Allocate with new allocator.         * ldap/eldap.h (struct Lisp_LDAP): Add lrecord_header. nt/ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         New configure flag: `MC_ALLOC':         * config.inc.samp: Add new flag `MC_ALLOC'.         * xemacs.mak: Add flag and configuration output for `MC_ALLOC'.         New files:         * xemacs.dsp: Add source files mc-alloc.c and mc-alloc.h.         * xemacs.mak: Add new object file mc-alloc.obj to dependencies. src/ChangeLog addition: 2005-04-01  Marcus Crestani  <crestani@xemacs.org>         The new allocator.         New configure flag: `MC_ALLOC':         * config.h.in: Add new flag `MC_ALLOC'.         New files:         * Makefile.in.in: Add new object file mc-alloc.o.         * depend: Add new files to dependencies.         * mc-alloc.c: New.         * mc-alloc.h: New.         Running the new allocator from XEmacs:         * alloc.c (deadbeef_memory): Moved to mc-alloc.c.         * emacs.c (main_1): Initialize the new allocator and add         syms_of_mc_alloc.         * symsinit.h: Add syms_of_mc_alloc.         New lrecord allocation and free functions:         * alloc.c (alloc_lrecord): New. Allocates an lrecord, includes         type checking and initializing of the lrecord_header.         * alloc.c (noseeum_alloc_lrecord): Same as above, but increments         the NOSEEUM cons counter.         * alloc.c (free_lrecord): New. Calls the finalizer and frees the         lrecord.         * lrecord.h: Add lrecord allocation prototypes and comments.         Remove old lrecord FROB block allocation:                  * alloc.c (allocate_lisp_storage): Former function to expand         heap. Not needed anymore, remove.         * alloc.c: Completely remove `Fixed-size type macros'         * alloc.c (release_breathing_space): Remove.         * alloc.c (memory_full): Remove release_breathing_space.         * alloc.c (refill_memory_reserve): Remove.         * alloc.c (TYPE_ALLOC_SIZE): Remove.         * alloc.c (DECLARE_FIXED_TYPE_ALLOC): Remove.         * alloc.c (ALLOCATE_FIXED_TYPE_FROM_BLOCK): Remove.         * alloc.c (ALLOCATE_FIXED_TYPE_1): Remove.         * alloc.c (ALLOCATE_FIXED_TYPE): Remove.         * alloc.c (NOSEEUM_ALLOCATE_FIXED_TYPE): Remove.         * alloc.c (struct Lisp_Free): Remove.         * alloc.c (LRECORD_FREE_P): Remove.         * alloc.c (MARK_LRECORD_AS_FREE): Remove.         * alloc.c (MARK_LRECORD_AS_NOT_FREE): Remove.         * alloc.c (PUT_FIXED_TYPE_ON_FREE_LIST): Remove.         * alloc.c (FREE_FIXED_TYPE): Remove.         * alloc.c (FREE_FIXED_TYPE_WHEN_NOT_IN_GC): Remove.         Allocate old lrecords with new allocator:                  * alloc.c: DECLARE_FIXED_TYPE_ALLOC removed for all lrecords         defined in alloc.c.         * alloc.c (Fcons): Allocate with new allocator.         * alloc.c (noseeum_cons): Allocate with new allocator.         * alloc.c (make_float): Allocate with new allocator.         * alloc.c (make_bignum): Allocate with new allocator.         * alloc.c (make_bignum_bg): Allocate with new allocator.         * alloc.c (make_ratio): Allocate with new allocator.         * alloc.c (make_ratio_bg): Allocate with new allocator.         * alloc.c (make_ratio_rt): Allocate with new allocator.         * alloc.c (make_bigfloat): Allocate with new allocator.         * alloc.c (make_bigfloat_bf): Allocate with new allocator.         * alloc.c (make_compiled_function): Allocate with new allocator.         * alloc.c (Fmake_symbol): Allocate with new allocator.         * alloc.c (allocate_extent): Allocate with new allocator.         * alloc.c (allocate_event): Allocate with new allocator.         * alloc.c (make_key_data): Allocate with new allocator.         * alloc.c (make_button_data): Allocate with new allocator.         * alloc.c (make_motion_data): Allocate with new allocator.         * alloc.c (make_process_data): Allocate with new allocator.         * alloc.c (make_timeout_data): Allocate with new allocator.         * alloc.c (make_magic_data): Allocate with new allocator.         * alloc.c (make_magic_eval_data): Allocate with new allocator.         * alloc.c (make_eval_data): Allocate with new allocator.         * alloc.c (make_misc_user_data): Allocate with new allocator.         * alloc.c (Fmake_marker): Allocate with new allocator.         * alloc.c (noseeum_make_marker): Allocate with new allocator.         * alloc.c (make_uninit_string): Allocate with new allocator.         * alloc.c (resize_string): Allocate with new allocator.         * alloc.c (make_string_nocopy): Allocate with new allocator.         Garbage Collection:         * alloc.c (GC_CHECK_NOT_FREE): Remove obsolete assertions.         * alloc.c (SWEEP_FIXED_TYPE_BLOCK): Remove.         * alloc.c (SWEEP_FIXED_TYPE_BLOCK_1): Remove.         * alloc.c (sweep_conses): Remove.         * alloc.c (free_cons): Use new allocator to free.         * alloc.c (sweep_compiled_functions): Remove.         * alloc.c (sweep_floats): Remove.         * alloc.c (sweep_bignums): Remove.         * alloc.c (sweep_ratios): Remove.         * alloc.c (sweep_bigfloats): Remove.         * alloc.c (sweep_symbols): Remove.         * alloc.c (sweep_extents): Remove.         * alloc.c (sweep_events): Remove.         * alloc.c (sweep_key_data): Remove.         * alloc.c (free_key_data): Use new allocator to free.         * alloc.c (sweep_button_data): Remove.         * alloc.c (free_button_data): Use new allocator to free.         * alloc.c (sweep_motion_data): Remove.         * alloc.c (free_motion_data): Use new allocator to free.         * alloc.c (sweep_process_data): Remove.         * alloc.c (free_process_data): Use new allocator to free.         * alloc.c (sweep_timeout_data): Remove.         * alloc.c (free_timeout_data): Use new allocator to free.         * alloc.c (sweep_magic_data): Remove.         * alloc.c (free_magic_data): Use new allocator to free.         * alloc.c (sweep_magic_eval_data): Remove.         * alloc.c (free_magic_eval_data): Use new allocator to free.         * alloc.c (sweep_eval_data): Remove.         * alloc.c (free_eval_data): Use new allocator to free.         * alloc.c (sweep_misc_user_data): Remove.         * alloc.c (free_misc_user_data): Use new allocator to free.         * alloc.c (sweep_markers): Remove.         * alloc.c (free_marker): Use new allocator to free.         * alloc.c (garbage_collect_1): Remove release_breathing_space.         * alloc.c (gc_sweep): Remove all the old lcrecord and lrecord         related stuff. Sweeping now works like this: compact string         chars, finalize, sweep.         * alloc.c (common_init_alloc_early): Remove old lrecord         initializations, remove breathing_space.         * emacs.c (Fdump_emacs): Remove release_breathing_space.         * lisp.h: Remove prototype for release_breathing_space.         * lisp.h: Adjust the special cons mark makros.         Lrecord Finalizer:         * alloc.c: Add finalizer to lrecord definition.         * alloc.c (finalize_string): Add finalizer for string.         * bytecode.c: Add finalizer to lrecord definition.         * bytecode.c (finalize_compiled_function): Add finalizer for         compiled function.         * marker.c: Add finalizer to lrecord definition.         * marker.c (finalize_marker): Add finalizer for marker.         These changes build the interface to mc-alloc:         * lrecord.h (MC_ALLOC_CALL_FINALIZER): Tell mc-alloc how to         finalize lrecords.         * lrecord.h (MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE): Tell         mc-alloc how to finalize for disksave.         Unify lrecords and lcrecords:         * lisp.h (struct Lisp_String): Adjust string union hack to         new lrecord header.         * lrecord.h: Adjust comments.         * lrecord.h (struct lrecord_header): The new lrecord header         includes type, lisp-readonly, free, and uid.         * lrecord.h (set_lheader_implementation): Adjust to new         lrecord_header.         * lrecord.h (struct lrecord_implementation): The field basic_p         for indication of an old lrecord is not needed anymore, remove.         * lrecord.h (MAKE_LRECORD_IMPLEMENTATION): Remove basic_p.         * lrecord.h (MAKE_EXTERNAL_LRECORD_IMPLEMENTATION): Remove         basic_p.         * lrecord.h (copy_sized_lrecord): Remove distinction between         old lrecords and lcrecords.         * lrecord.h (copy_lrecord): Remove distinction between old         lrecords and lcrecords.         * lrecord.h (zero_sized_lrecord): Remove distinction between         old lrecords and lcrecords.         * lrecord.h (zero_lrecord): Remove distinction between old         lrecords and lcrecords.         Remove lcrecords and lcrecord lists:         * alloc.c (basic_alloc_lcrecord): Not needed anymore, remove.         * alloc.c (very_old_free_lcrecord): Not needed anymore, remove.         * alloc.c (copy_lisp_object): No more distinction between         lrecords and lcrecords.         * alloc.c (all_lcrecords): Not needed anymore, remove.         * alloc.c (make_vector_internal): Allocate as lrecord.         * alloc.c (make_bit_vector_internal): Allocate as lrecord.         * alloc.c: Completely remove `lcrecord lists'.         * alloc.c (free_description): Remove.         * alloc.c (lcrecord_list_description): Remove.         * alloc.c (mark_lcrecord_list): Remove.         * alloc.c (make_lcrecord_list): Remove.         * alloc.c (alloc_managed_lcrecord): Remove.         * alloc.c (free_managed_lcrecord): Remove.         * alloc.c (alloc_automanaged_lcrecord): Remove.         * alloc.c (free_lcrecord): Remove.         * alloc.c (lcrecord_stats): Remove.         * alloc.c (tick_lcrecord_stats): Remove.         * alloc.c (disksave_object_finalization_1): Add call to         mc_finalize_for_disksave. Remove the lcrecord way to visit all         objects.         * alloc.c (kkcc_marking): Remove XD_FLAG_FREE_LISP_OBJECT         * alloc.c (sweep_lcrecords_1): Remove.         * alloc.c (common_init_alloc_early): Remove everything related         to lcrecords, remove old lrecord initializations,         * alloc.c (init_lcrecord_lists): Not needed anymore, remove.         * alloc.c (reinit_alloc_early): Remove everything related to         lcrecords.         * alloc.c (init_alloc_once_early): Remove everything related to         lcrecords.         * buffer.c (allocate_buffer): Allocate as lrecord.         * buffer.c (nuke_all_buffer_slots): Use lrecord functions.         * buffer.c (common_init_complex_vars_of_buffer): Allocate as         lrecord.         * buffer.h (struct buffer): Add lrecord_header.         * casetab.c (allocate_case_table): Allocate as lrecord.         * casetab.h (struct Lisp_Case_Table): Add lrecord_header.         * charset.h (struct Lisp_Charset): Add lrecord_header.         * chartab.c (fill_char_table): Use lrecord functions.         * chartab.c (Fmake_char_table): Allocate as lrecord.         * chartab.c (make_char_table_entry): Allocate as lrecord.         * chartab.c (copy_char_table_entry): Allocate as lrecord.         * chartab.c (Fcopy_char_table): Allocate as lrecord.         * chartab.c (put_char_table): Use lrecord functions.         * chartab.h (struct Lisp_Char_Table_Entry): Add lrecord_header.         * chartab.h (struct Lisp_Char_Table): Add lrecord_header.         * console-impl.h (struct console): Add lrecord_header.         * console-msw-impl.h (struct Lisp_Devmode): Add lrecord_header.         * console-msw-impl.h (struct mswindows_dialog_id): Add         lrecord_header.         * console.c (allocate_console): Allocate as lrecord.         * console.c (nuke_all_console_slots): Use lrecord functions.         * console.c (common_init_complex_vars_of_console): Allocate as         lrecord.         * data.c (make_weak_list): Allocate as lrecord.         * data.c (make_weak_box): Allocate as lrecord.         * data.c (make_ephemeron): Allocate as lrecord.         * database.c (struct Lisp_Database): Add lrecord_header.         * database.c (allocate_database): Allocate as lrecord.         * device-impl.h (struct device): Add lrecord_header.         * device-msw.c (allocate_devmode): Allocate as lrecord.         * device.c (nuke_all_device_slots): Use lrecord functions.         * device.c (allocate_device): Allocate as lrecord.         * dialog-msw.c (handle_question_dialog_box): Allocate as lrecord.         * elhash.c (struct Lisp_Hash_Table): Add lrecord_header.         * elhash.c (make_general_lisp_hash_table): Allocate as lrecord.         * elhash.c (Fcopy_hash_table): Allocate as lrecord.         * event-stream.c: Lcrecord lists Vcommand_builder_free_list and         Vtimeout_free_list are no longer needed. Remove.         * event-stream.c (allocate_command_builder): Allocate as lrecord.         * event-stream.c (free_command_builder): Use lrecord functions.         * event-stream.c (event_stream_generate_wakeup): Allocate as         lrecord.         * event-stream.c (event_stream_resignal_wakeup): Use lrecord         functions.         * event-stream.c (event_stream_disable_wakeup): Use lrecord         functions.         * event-stream.c (reinit_vars_of_event_stream): Lcrecord lists         remove.         * events.h (struct Lisp_Timeout): Add lrecord_header.         * events.h (struct command_builder): Add lrecord_header.         * extents-impl.h (struct extent_auxiliary): Add lrecord_header.         * extents-impl.h (struct extent_info): Add lrecord_header.         * extents.c (allocate_extent_auxiliary): Allocate as lrecord.         * extents.c (allocate_extent_info): Allocate as lrecord.         * extents.c (copy_extent): Allocate as lrecord.         * faces.c (allocate_face): Allocate as lrecord.         * faces.h (struct Lisp_Face): Add lrecord_header.         * file-coding.c (allocate_coding_system): Allocate as lrecord.         * file-coding.c (Fcopy_coding_system): Allocate as lrecord.         * file-coding.h (struct Lisp_Coding_System): Add lrecord_header.         * fns.c (Ffillarray): Allocate as lrecord.         * frame-impl.h (struct frame): Add lrecord_header.         * frame.c (nuke_all_frame_slots): Use lrecord functions.         * frame.c (allocate_frame_core): Allocate as lrecord.         * glyphs.c (allocate_image_instance): Allocate as lrecord.         * glyphs.c (Fcolorize_image_instance): Allocate as lrecord.         * glyphs.c (allocate_glyph): Allocate as lrecord.         * glyphs.h (struct Lisp_Image_Instance): Add lrecord_header.         * glyphs.h (struct Lisp_Glyph): Add lrecord_header.         * gui.c (allocate_gui_item): Allocate as lrecord.         * gui.h (struct Lisp_Gui_Item): Add lrecord_header.         * keymap.c (struct Lisp_Keymap): Add lrecord_header.         * keymap.c (make_keymap): Allocate as lrecord.         * lisp.h (struct Lisp_Vector): Add lrecord_header.         * lisp.h (struct Lisp_Bit_Vector): Add lrecord_header.         * lisp.h (struct weak_box): Add lrecord_header.         * lisp.h (struct ephemeron): Add lrecord_header.         * lisp.h (struct weak_list): Add lrecord_header.         * lrecord.h (struct lcrecord_header): Not used, remove.         * lrecord.h (struct free_lcrecord_header): Not used, remove.         * lrecord.h (struct lcrecord_list): Not needed anymore, remove.         * lrecord.h (lcrecord_list): Not needed anymore, remove.         * lrecord.h: (enum data_description_entry_flags): Remove         XD_FLAG_FREE_LISP_OBJECT.         * lstream.c: Lrecord list Vlstream_free_list remove.         * lstream.c (Lstream_new): Allocate as lrecord.         * lstream.c (Lstream_delete): Use lrecod functions.         * lstream.c (reinit_vars_of_lstream): Vlstream_free_list         initialization remove.           * lstream.h (struct lstream): Add lrecord_header.         * emacs.c (main_1): Remove lstream initialization.         * mule-charset.c (make_charset): Allocate as lrecord.         * objects-impl.h (struct Lisp_Color_Instance): Add         lrecord_header.         * objects-impl.h (struct Lisp_Font_Instance): Add lrecord_header.         * objects.c (Fmake_color_instance): Allocate as lrecord.         * objects.c (Fmake_font_instance): Allocate as lrecord.         * objects.c (reinit_vars_of_objects): Allocate as lrecord.         * opaque.c: Lcreord list Vopaque_ptr_list remove.         * opaque.c (make_opaque): Allocate as lrecord.         * opaque.c (make_opaque_ptr): Allocate as lrecord.         * opaque.c (free_opaque_ptr): Use lrecord functions.         * opaque.c (reinit_opaque_early):         * opaque.c (init_opaque_once_early): Vopaque_ptr_list         initialization remove.         * opaque.h (Lisp_Opaque): Add lrecord_header.         * opaque.h (Lisp_Opaque_Ptr): Add lrecord_header.         * emacs.c (main_1): Remove opaque variable initialization.         * print.c (default_object_printer): Use new lrecord_header.         * print.c (print_internal): Use new lrecord_header.         * print.c (debug_p4): Use new lrecord_header.         * process.c (make_process_internal): Allocate as lrecord.         * procimpl.h (struct Lisp_Process): Add lrecord_header.         * rangetab.c (Fmake_range_table): Allocate as lrecord.         * rangetab.c (Fcopy_range_table): Allocate as lrecord.         * rangetab.h (struct Lisp_Range_Table): Add lrecord_header.         * scrollbar.c (create_scrollbar_instance): Allocate as lrecord.         * scrollbar.h (struct scrollbar_instance): Add lrecord_header.         * specifier.c (make_specifier_internal): Allocate as lrecord.         * specifier.h (struct Lisp_Specifier): Add lrecord_header.         * symbols.c:         * symbols.c (Fmake_variable_buffer_local): Allocate as lrecord.         * symbols.c (Fdontusethis_set_symbol_value_handler): Allocate         as lrecord.         * symbols.c (Fdefvaralias): Allocate as lrecord.         * symeval.h (struct symbol_value_magic): Add lrecord_header.         * toolbar.c (update_toolbar_button): Allocate as lrecord.         * toolbar.h (struct toolbar_button): Add lrecord_header.         * tooltalk.c (struct Lisp_Tooltalk_Message): Add lrecord_header.         * tooltalk.c (make_tooltalk_message): Allocate as lrecord.         * tooltalk.c (struct Lisp_Tooltalk_Pattern): Add lrecord_header.         * tooltalk.c (make_tooltalk_pattern): Allocate as lrecord.         * ui-gtk.c (allocate_ffi_data): Allocate as lrecord.         * ui-gtk.c (allocate_emacs_gtk_object_data): Allocate as lrecord.         * ui-gtk.c (allocate_emacs_gtk_boxed_data): Allocate as lrecord.         * ui-gtk.h (structs): Add lrecord_header.         * window-impl.h (struct window): Add lrecord_header.         * window-impl.h (struct window_mirror): Add lrecord_header.         * window.c (allocate_window): Allocate as lrecord.         * window.c (new_window_mirror): Allocate as lrecord.         * window.c (make_dummy_parent): Allocate as lrecord.         MEMORY_USAGE_STATS         * alloc.c (fixed_type_block_overhead): Not used anymore, remove.         * buffer.c (compute_buffer_usage): Get storage size from new         allocator.         * marker.c (compute_buffer_marker_usage): Get storage size from         new allocator.         * mule-charset.c (compute_charset_usage): Get storage size from         new allocator.         * scrollbar-gtk.c (gtk_compute_scrollbar_instance_usage): Get         storage size from new allocator.         * scrollbar-msw.c (mswindows_compute_scrollbar_instance_usage):         Get storage size from new allocator.         * scrollbar-x.c (x_compute_scrollbar_instance_usage): Get         storage size from new allocator.         * scrollbar.c (compute_scrollbar_instance_usage): Get storage         size from new allocator.         * unicode.c (compute_from_unicode_table_size_1): Get storage         size from new allocator.         * unicode.c (compute_to_unicode_table_size_1): Get storage size         from new allocator.         * window.c (compute_window_mirror_usage): Get storage size from         new allocator.         * window.c (compute_window_usage): Get storage size from new         allocator.         MC_ALLOC_TYPE_STATS:         * alloc.c (alloc_lrecord): Bump lrecord count.         * alloc.c (noseeum_alloc_lrecord): Bump lrecord count.         * alloc.c (struct lrecord_stats): Storage for counts.         * alloc.c (init_lrecord_stats): Zero statistics.         * alloc.c (inc_lrecord_stats): Increase the statistic.         * alloc.c (dec_lrecord_stats): Decrease the statistic.         * alloc.c (gc_plist_hack): Used to print the information.         * alloc.c (Fgarbage_collect): Return the collected information.         * mc-alloc.c (remove_cell): Decrease lrecord count.         * mc-alloc.h: Set flag MC_ALLOC_TYPE_STATS.         * emacs.c (main_1): Init lrecord statistics.         * lrecord.h: Add prototypes for *_lrecord_stats.         Strings:         * alloc.c (Fmake_string): Initialize ascii_begin to zero.         * alloc.c (gc_count_num_short_string_in_use): Remove.         * alloc.c (gc_count_string_total_size): Remove.         * alloc.c (gc_count_short_string_total_size): Remove.         * alloc.c (debug_string_purity): Remove.         * alloc.c (debug_string_purity_print): Remove.         * alloc.c (sweep_strings): Remove.                  Remove static C-readonly Lisp objects:         * alloc.c (c_readonly): Not needed anymore, remove.         * alloc.c (GC_CHECK_LHEADER_INVARIANTS): Remove some obsolete         lheader invariants assertions.         * buffer.c (DEFVAR_BUFFER_LOCAL_1): Allocate dynamically.         * console.c (DEFVAR_CONSOLE_LOCAL_1): Allocate dynamically.         * gpmevent.c: Indirection via MC_ALLOC_Freceive_gpm_event.         * gpmevent.c (Fgpm_enable): Allocate dynamically.         * gpmevent.c (syms_of_gpmevent): Allocate dynamically.         * lisp.h (C_READONLY): Not needed anymore, remove.         * lisp.h (DEFUN): Allocate dynamically.         * lrecord.h (C_READONLY_RECORD_HEADER_P): Not needed anymore,         remove.         * lrecord.h (SET_C_READONLY_RECORD_HEADER): Not needed anymore,         remove.         * symbols.c (guts_of_unbound_marker):         * symeval.h (defsubr): Allocate dynamically.         * symeval.h (DEFSUBR_MACRO): Allocate dynamically.         * symeval.h (DEFVAR_ SYMVAL_FWD): Allocate dynamically.         * tests.c (TESTS_DEFSUBR): Allocate dynamically.         Definition of mcpro:         * lisp.h: Add mcpro prototypes.         * alloc.c (common_init_alloc_early): Add initialization for         mcpros.         * alloc.c (mcpro_description_1): New.         * alloc.c (mcpro_description): New.         * alloc.c (mcpros_description_1): New.         * alloc.c (mcpros_description): New.         * alloc.c (mcpro_one_name_description_1): New.         * alloc.c (mcpro_one_name_description): New.         * alloc.c (mcpro_names_description_1): New.         * alloc.c (mcpro_names_description): New.         * alloc.c (mcpros): New.         * alloc.c (mcpro_names): New.         * alloc.c (mcpro_1): New.         * alloc.c (mc_pro): New.         * alloc.c (garbage_collect_1): Add mcpros to root set.         Usage of mcpro:         * alloc.c (make_string_nocopy): Add string to root set.         * symbols.c (init_symbols_once_early): Add Qunbound to root set.         Changes to the Portable Dumper:                  * alloc.c (FREE_OR_REALLOC_BEGIN): Since dumped objects can be         freed with the new allocator, remove assertion for !DUMPEDP.         * dumper.c: Adjust comments, increase PDUMP_HASHSIZE.         * dumper.c (pdump_make_hash): Shift address only 2 bytes, to         avoid collisions.         * dumper.c (pdump_objects_unmark): No more mark bits within         the object, remove.         * dumper.c (mc_addr_elt): New. Element data structure for mc         hash table.         * dumper.c (pdump_mc_hash): New hash table: `lookup table'.         * dumper.c (pdump_get_mc_addr): New. Lookup for hash table.         * dumper.c (pdump_get_indirect_mc_addr): New. Lookup for         convertibles.         * dumper.c (pdump_put_mc_addr): New. Putter for hash table.         * dumper.c (pdump_dump_mc_data): New. Writes the table for         relocation at load time to the dump file.         * dumper.c (pdump_scan_lisp_objects_by_alignment): New.         Visits all dumped Lisp objects.         * dumper.c (pdump_scan_non_lisp_objects_by_alignment): New.         Visits all other dumped objects.         * dumper.c (pdump_reloc_one_mc): New. Updates all pointers         of an object by using the hash table pdump_mc_hash.         * dumper.c (pdump_reloc_one): Replaced by pdump_reloc_one_mc.         * dumper.c (pdump): Change the structure of the dump file, add         the mc post dump relocation table to dump file.         * dumper.c (pdump_load_finish): Hand all dumped objects to the         new allocator and use the mc post dump relocation table for         relocating the dumped objects at dump file load time, free not         longer used data structures.         * dumper.c (pdump_load): Free the dump file.         * dumper.h: Remove pdump_objects_unmark.         * lrecord.h (DUMPEDP): Dumped objects can be freed, remove.              DUMP_IN_EXEC:         * Makefile.in.in: Linking for and with dump in executable only if         DUMP_IN_EXEC is defined.         * config.h.in: Add new flag `DUMP_IN_EXEC'         * emacs.c: Condition dump-data.h on DUMP_IN_EXEC.         * emacs.c (main_1): Flag `-si' only works if dump image is         written into executable.         Miscellanious         * lrecord.h (enum lrecord_type): Added numbers to all types,         very handy for debugging.         * xemacs.def.in.in: Add mc-alloc functions to make them visible         to the modules.
author crestani
date Fri, 08 Apr 2005 23:11:35 +0000
parents
children c474585a3460
comparison
equal deleted inserted replaced
2719:5f6ef2b26d9f 2720:6fa9919a9a0b
1 /* New size-based allocator for XEmacs.
2 Copyright (C) 2005 Marcus Crestani.
3
4 This file is part of XEmacs.
5
6 XEmacs is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
10
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with XEmacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* Synched up with: Not in FSF. */
22
23 #include <config.h>
24 #include "lisp.h"
25 #include "mc-alloc.h"
26
27
28 /*--- configurable values ----------------------------------------------*/
29
30 /* Valid page sizes are powers of 2. */
31 #undef PAGE_SIZE /* for FreeBSD */
32 #define PAGE_SIZE 2048
33
34
35 /* Definition of size classes */
36
37 /* Heap used list constants: In the used heap, it is important to
38 quickly find a free spot for a new object. Therefore the size
39 classes of the used heap are defined by the size of the cells on
40 the pages. The size classes should match common object sizes, to
41 avoid wasting memory. */
42
43 /* Minimum object size in bytes. */
44 #define USED_LIST_MIN_OBJECT_SIZE 8
45
46 /* The step size by which the size classes increase (up to upper
47 threshold). This many bytes are mapped to a single used list: */
48 #define USED_LIST_LIN_STEP 4
49
50 /* The upper threshold should always be set to PAGE_SIZE/2, because if
51 a object is larger than PAGE_SIZE/2 there is no room for any other
52 object on this page. Objects this big are kept in the page list of
53 the multiple pages, since a quick search for free spots is not
54 needed for this kind of pages (because there are no free spots).
55 PAGE_SIZES_DIV_2 defines maximum size of a used space list. */
56 #define USED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
57
58
59 /* Unmanaged memory used list constants: Like in the used heap, it is
60 important to quickly find a free spot for a new object. Therefore
61 the size classes of the unmanaged heap are defined by the size of
62 the cells on the pages. The size classes should match common object
63 sizes, to avoid wasting memory. */
64 /* Minimum object size in bytes. */
65 #define UNMANAGED_LIST_MIN_OBJECT_SIZE 8
66 /* The step size by which the size classes increase (up to upper
67 threshold). This many bytes are mapped to a single unmanaged list: */
68 #define UNMANAGED_LIST_LIN_STEP 4
69 /* The upper threshold should always be set to PAGE_SIZE/2, because if
70 a object is larger than PAGE_SIZE/2 there is no room for any other
71 object on this page. Objects this big are kept in the page list of
72 the multiple pages, since a quick search for free spots is not
73 needed for this kind of pages (because there are no free spots).
74 PAGE_SIZES defines maximum size of a unmanaged space list. */
75 #define UNMANAGED_LIST_UPPER_THRESHOLD PAGE_SIZE_DIV_2
76
77
78 /* Heap free list constants: In the unused heap, the size of
79 consecutive memory tips the scales. A page is smallest entity which
80 is asked for. Therefore, the size classes of the unused heap are
81 defined by the number of consecutive pages. */
82 /* Sizes up to this many pages each have their own free list. */
83 #define FREE_LIST_LOWER_THRESHOLD 32
84 /* The step size by which the size classes increase (up to upper
85 threshold). FREE_LIST_LIN_STEP number of sizes are mapped to a
86 single free list for sizes between FREE_LIST_LOWER_THRESHOLD and
87 FREE_LIST_UPPER_THRESHOLD. */
88 #define FREE_LIST_LIN_STEP 8
89 /* Sizes of at least this many pages are mapped to a single free
90 list. Blocks of memory larger than this number are all kept in a
91 single list, which makes searching this list slow. But objects that
92 big are really seldom. */
93 #define FREE_LIST_UPPER_THRESHOLD 256
94
95
96 /* Maximum number of separately added heap sections. */
97 #if BITS_PER_EMACS_INT > 32
98 # define MAX_HEAP_SECTS 2048
99 #else
100 # define MAX_HEAP_SECTS 768
101 #endif
102
103
104 /* Heap growth constants. Heap increases by any number between the
105 boundaries (unit is PAGE_SIZE). */
106 #define MIN_HEAP_INCREASE 32
107 #define MAX_HEAP_INCREASE 256 /* not used */
108
109 /* Every heap growth is calculated like this:
110 needed_pages + ( HEAP_SIZE / ( PAGE_SIZE * HEAP_GROWTH_DIVISOR )).
111 So the growth of the heap is influenced by the current size of the
112 heap, but kept between MIN_HEAP_INCREASE and MAX_HEAP_INCREASE
113 boundaries.
114 This reduces the number of heap sectors, the larger the heap grows
115 the larger are the newly allocated chunks. */
116 #define HEAP_GROWTH_DIVISOR 3
117
118
119 /* Zero memory before putting on free lists. */
120 #define ZERO_MEM 1
121
122
123
124
125 /*--- calculations done by macros --------------------------------------*/
126
127 #ifndef CHAR_BIT /* should be included by limits.h */
128 # define CHAR_BIT BITS_PER_CHAR
129 #endif
130
131 #if PAGE_SIZE == 512
132 # define CPP_LOG_PAGE_SIZE 9
133 #endif
134 #if PAGE_SIZE == 1024
135 # define CPP_LOG_PAGE_SIZE 10
136 #endif
137 #if PAGE_SIZE == 2048
138 # define CPP_LOG_PAGE_SIZE 11
139 #endif
140 #if PAGE_SIZE == 4096
141 # define CPP_LOG_PAGE_SIZE 12
142 #endif
143 #if PAGE_SIZE == 8192
144 # define CPP_LOG_PAGE_SIZE 13
145 #endif
146 #if PAGE_SIZE == 16384
147 # define CPP_LOG_PAGE_SIZE 14
148 #endif
149 #ifndef CPP_LOG_PAGE_SIZE
150 --> fix PAGE_SIZE
151 #endif
152 #undef PAGE_SIZE
153 #define CPP_PAGE_SIZE (1 << CPP_LOG_PAGE_SIZE)
154 #define LOG_PAGE_SIZE ((EMACS_INT) CPP_LOG_PAGE_SIZE)
155 #define PAGE_SIZE ((EMACS_INT) CPP_PAGE_SIZE)
156 #define PAGE_SIZE_DIV_2 (PAGE_SIZE >> 1)
157
158
159 /* NOT USED ANYMORE */
160 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
161 /* used heap list logarithms */
162 #if USED_LIST_LOWER_THRESHOLD == 8
163 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 3
164 #endif
165 #if USED_LIST_LOWER_THRESHOLD == 16
166 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 4
167 #endif
168 #if USED_LIST_LOWER_THRESHOLD == 32
169 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 5
170 #endif
171 #if USED_LIST_LOWER_THRESHOLD == 64
172 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 6
173 #endif
174 #if USED_LIST_LOWER_THRESHOLD == 128
175 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 7
176 #endif
177 #if USED_LIST_LOWER_THRESHOLD == 256
178 # define CPP_LOG_USED_LIST_LOWER_THRESHOLD 8
179 #endif
180 #ifndef CPP_LOG_USED_LIST_LOWER_THRESHOLD
181 --> fix USED_LIST_LOWER_THRESHOLD
182 #endif
183 #define LOG_USED_LIST_LOWER_THRESHOLD CPP_LOG_USED_LIST_LOWER_THRESHOLD
184 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
185
186 /* used heap list count */
187 #define N_USED_PAGE_LISTS (((USED_LIST_UPPER_THRESHOLD \
188 - USED_LIST_MIN_OBJECT_SIZE) \
189 / USED_LIST_LIN_STEP) + 1 ) + 1
190
191 /* unmanaged memory list count */
192 #define N_UNMANAGED_PAGE_LISTS (((UNMANAGED_LIST_UPPER_THRESHOLD \
193 - UNMANAGED_LIST_MIN_OBJECT_SIZE) \
194 / UNMANAGED_LIST_LIN_STEP) + 1 ) + 1
195
196 /* NOT USED ANYMORE */
197 #ifdef USE_EXPONENTIAL_USED_LIST_GROWTH
198 #define N_USED_PAGE_LISTS_LIN (((USED_LIST_LOWER_THRESHOLD \
199 - USED_LIST_MIN_OBJECT_SIZE) \
200 / USED_LIST_LIN_STEP) + 1 )
201 #define N_USED_PAGE_LISTS_EXP \
202 (LOG_PAGE_SIZE - LOG_USED_LIST_LOWER_THRESHOLD)
203
204 #define N_USED_PAGE_LISTS \
205 (N_USED_PAGE_LISTS_LIN + N_USED_PAGE_LISTS_EXP + 1)
206 #endif /* USE_EXPONENTIAL_USED_LIST_GROWTH */
207
208 /* free heap list count */
209 #define N_FREE_PAGE_LISTS (((FREE_LIST_UPPER_THRESHOLD \
210 - FREE_LIST_LOWER_THRESHOLD) \
211 / FREE_LIST_LIN_STEP) \
212 + FREE_LIST_LOWER_THRESHOLD)
213
214
215 /* Constants for heap address to page header mapping. */
216 #define LOG_LEVEL2_SIZE 10
217 #define LEVEL2_SIZE (1 << LOG_LEVEL2_SIZE)
218 #if BITS_PER_EMACS_INT > 32
219 # define USE_HASH_TABLE 1
220 # define LOG_LEVEL1_SIZE 11
221 #else
222 # define LOG_LEVEL1_SIZE \
223 (BITS_PER_EMACS_INT - LOG_LEVEL2_SIZE - LOG_PAGE_SIZE)
224 #endif
225 #define LEVEL1_SIZE (1 << LOG_LEVEL1_SIZE)
226
227 #ifdef USE_HASH_TABLE
228 # define HASH(hi) ((hi) & (LEVEL1_SIZE - 1))
229 # define L1_INDEX(p) HASH ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
230 #else
231 # define L1_INDEX(p) ((EMACS_INT) p >> (LOG_LEVEL2_SIZE + LOG_PAGE_SIZE))
232 #endif
233 #define L2_INDEX(p) (((EMACS_INT) p >> LOG_PAGE_SIZE) & (LEVEL2_SIZE - 1))
234
235
236
237
238 /*--- structs and typedefs ---------------------------------------------*/
239
240 /* Links the free lists (mark_bit_free_list, page_header_free_list,
241 cell free list). */
242 typedef struct free_link
243 {
244 struct lrecord_header lheader;
245 struct free_link *next_free;
246 } free_link;
247
248
249 /* Header for pages. They are hold in a doubly linked list. */
250 typedef struct page_header
251 {
252 struct page_header *next; /* next page_header */
253 struct page_header *prev; /* previous page_header */
254 /* Field plh holds pointer to the according header of the page list.*/
255 struct page_list_header *plh; /* page list header */
256 free_link *free_list; /* links free cells on page */
257 EMACS_INT n_pages; /* number of pages */
258 EMACS_INT cell_size; /* size of cells on page */
259 EMACS_INT cells_on_page; /* total number of cells on page */
260 EMACS_INT cells_used; /* number of used cells on page */
261 /* If the number of objects on page is bigger than BITS_PER_EMACS_INT,
262 the mark bits are put in an extra memory area. Then the field
263 mark_bits holds the pointer to this area. Is the number of
264 objects smaller than BITS_PER_EMACS_INT, the mark bits are held in the
265 mark_bit EMACS_INT directly, without an additional indirection. */
266 char *mark_bits; /* pointer to mark bits */
267 void *heap_space; /* pointer to heap, where objects
268 are stored */
269 } page_header;
270
271
272 /* Different list types. */
273 enum list_type_enum {
274 USED_LIST,
275 UNMANAGED_LIST,
276 FREE_LIST
277 };
278
279
280 /* Header for page lists. Field list_type holds the type of the list. */
281 typedef struct page_list_header
282 {
283 enum list_type_enum list_type; /* the type of the list */
284 /* Size holds the size of one cell (in bytes) in a used heap list, or the
285 size of the heap sector (in number of pages). */
286 size_t size; /* size of one cell / heap sector */
287 page_header *first; /* first of page_header list */
288 page_header *last; /* last of page_header list */
289 /* If the number of objects on page is bigger than
290 BITS_PER_EMACS_INT, the mark bits are put in an extra memory
291 area, which is linked in this free list, if not used. Is the
292 number of objects smaller than BITS_PER_EMACS_INT, the mark bits
293 are hold in the mark bit EMACS_INT directly, without an
294 additional indirection. */
295 free_link *mark_bit_free_list;
296
297 #ifdef MEMORY_USAGE_STATS
298 EMACS_INT page_count; /* number if pages in list */
299 EMACS_INT used_cells; /* number of objects in list */
300 EMACS_INT used_space; /* used space */
301 EMACS_INT total_cells; /* number of available cells */
302 EMACS_INT total_space; /* available space */
303 #endif
304 } page_list_header;
305
306
307 /* The heap sectors are stored with their real start pointer and their
308 real size. Not aligned to PAGE_SIZE. Needed for freeing heap sectors. */
309 typedef struct heap_sect {
310 void *real_start; /* real start pointer (NOT aligned) */
311 size_t real_size; /* NOT multiple of PAGE_SIZE */
312 void *start; /* aligned start pointer */
313 EMACS_INT n_pages; /* multiple of PAGE_SIZE */
314 } heap_sect;
315
316
317 /* 2nd tree level for mapping of heap addresses to page headers. */
318 typedef struct level_2_lookup_tree {
319 page_header *index[LEVEL2_SIZE]; /* link to page header */
320 EMACS_INT key; /* high order address bits */
321 #ifdef USE_HASH_TABLE
322 struct level_2_lookup_tree *hash_link; /* hash chain link */
323 #endif
324 } level_2_lookup_tree;
325
326
327
328 /*--- global variable definitions --------------------------------------*/
329
330 /* All global allocator variables are kept in this struct. */
331 typedef struct mc_allocator_globals_type {
332
333 /* heap size */
334 EMACS_INT heap_size;
335
336 /* list of all separatly allocated chunks of heap */
337 heap_sect heap_sections[MAX_HEAP_SECTS];
338 EMACS_INT n_heap_sections;
339
340 /* Holds all allocated pages, each object size class in its separate list,
341 to guarantee fast allocation on partially filled pages. */
342 page_list_header used_heap_pages[N_USED_PAGE_LISTS];
343
344 /* Holds all unmanaged pages. */
345 page_list_header unmanaged_heap_pages[N_UNMANAGED_PAGE_LISTS];
346
347 /* Holds all free pages in the heap. N multiples of PAGE_SIZE are
348 kept on the Nth free list. Contiguos pages are coalesced. */
349 page_list_header free_heap_pages[N_FREE_PAGE_LISTS];
350
351 /* ptr lookup table */
352 level_2_lookup_tree *ptr_lookup_table[LEVEL1_SIZE];
353
354 /* page header free list */
355 free_link *page_header_free_list;
356
357 #ifdef MEMORY_USAGE_STATS
358 EMACS_INT malloced_bytes;
359 #endif
360 } mc_allocator_globals_type;
361
362 mc_allocator_globals_type mc_allocator_globals;
363
364
365
366
367 /*--- macro accessors --------------------------------------------------*/
368
369 #define USED_HEAP_PAGES(i) \
370 ((page_list_header*) &mc_allocator_globals.used_heap_pages[i])
371
372 #define UNMANAGED_HEAP_PAGES(i) \
373 ((page_list_header*) &mc_allocator_globals.unmanaged_heap_pages[i])
374
375 #define FREE_HEAP_PAGES(i) \
376 ((page_list_header*) &mc_allocator_globals.free_heap_pages[i])
377
378 #define PLH(plh) plh
379 # define PLH_LIST_TYPE(plh) PLH (plh)->list_type
380 # define PLH_SIZE(plh) PLH (plh)->size
381 # define PLH_FIRST(plh) PLH (plh)->first
382 # define PLH_LAST(plh) PLH (plh)->last
383 # define PLH_MARK_BIT_FREE_LIST(plh) PLH (plh)->mark_bit_free_list
384 #ifdef MEMORY_USAGE_STATS
385 # define PLH_PAGE_COUNT(plh) PLH (plh)->page_count
386 # define PLH_USED_CELLS(plh) PLH (plh)->used_cells
387 # define PLH_USED_SPACE(plh) PLH (plh)->used_space
388 # define PLH_TOTAL_CELLS(plh) PLH (plh)->total_cells
389 # define PLH_TOTAL_SPACE(plh) PLH (plh)->total_space
390 #endif
391
392 #define PH(ph) ph
393 # define PH_NEXT(ph) PH (ph)->next
394 # define PH_PREV(ph) PH (ph)->prev
395 # define PH_PLH(ph) PH (ph)->plh
396 # define PH_FREE_LIST(ph) PH (ph)->free_list
397 # define PH_N_PAGES(ph) PH (ph)->n_pages
398 # define PH_CELL_SIZE(ph) PH (ph)->cell_size
399 # define PH_CELLS_ON_PAGE(ph) PH (ph)->cells_on_page
400 # define PH_CELLS_USED(ph) PH (ph)->cells_used
401 # define PH_MARK_BITS(ph) PH (ph)->mark_bits
402 # define PH_HEAP_SPACE(ph) PH (ph)->heap_space
403 #define PH_LIST_TYPE(ph) PLH_LIST_TYPE (PH_PLH (ph))
404 #define PH_MARK_BIT_FREE_LIST(ph) PLH_MARK_BIT_FREE_LIST (PH_PLH (ph))
405
406 #define HEAP_SIZE mc_allocator_globals.heap_size
407
408 #ifdef MEMORY_USAGE_STATS
409 # define MC_MALLOCED_BYTES mc_allocator_globals.malloced_bytes
410 #endif
411
412 #define HEAP_SECTION(index) mc_allocator_globals.heap_sections[index]
413 #define N_HEAP_SECTIONS mc_allocator_globals.n_heap_sections
414
415 #define PAGE_HEADER_FREE_LIST mc_allocator_globals.page_header_free_list
416
417 #define NEXT_FREE(free_list) ((free_link*) free_list)->next_free
418 #define FREE_LIST(free_list) (free_link*) (free_list)
419
420 #define PTR_LOOKUP_TABLE(i) mc_allocator_globals.ptr_lookup_table[i]
421 #define LEVEL2(l2, i) l2->index[i]
422 # define LEVEL2_KEY(l2) l2->key
423 #ifdef USE_HASH_TABLE
424 # define LEVEL2_HASH_LINK(l2) l2->hash_link
425 #endif
426
427 #if ZERO_MEM
428 # define ZERO_HEAP_SPACE(ph) \
429 memset (PH_HEAP_SPACE (ph), '\0', PH_N_PAGES (ph) * PAGE_SIZE)
430 # define ZERO_PAGE_HEADER(ph) memset (ph, '\0', sizeof (page_header))
431 #endif
432
433 #define div_PAGE_SIZE(x) (x >> LOG_PAGE_SIZE)
434 #define mult_PAGE_SIZE(x) (x << LOG_PAGE_SIZE)
435
436 #define BYTES_TO_PAGES(bytes) (div_PAGE_SIZE ((bytes + (PAGE_SIZE - 1))))
437
438 #define PAGE_SIZE_ALIGNMENT(address) \
439 (void *) ((((EMACS_INT) (address)) + PAGE_SIZE) & ~(PAGE_SIZE - 1))
440
441 #define PH_ON_FREE_LIST_P(ph) \
442 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == FREE_LIST))
443
444 #define PH_ON_USED_LIST_P(ph) \
445 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST))
446
447 #define PH_ON_UNMANAGED_LIST_P(ph) \
448 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == UNMANAGED_LIST))
449
450
451
452
453 /************************************************************************/
454 /* MC Allocator */
455 /************************************************************************/
456
457
458 /* ###TODO### */
459 #if 1
460 # define ALLOC_MB_UNMANAGED 1
461 #endif
462
463
464 /*--- misc functions ---------------------------------------------------*/
465
466 /* moved here from alloc.c */
467 #ifdef ERROR_CHECK_GC
468 static void
469 deadbeef_memory (void *ptr, Bytecount size)
470 {
471 UINT_32_BIT *ptr4 = (UINT_32_BIT *) ptr;
472 Bytecount beefs = size >> 2;
473
474 /* In practice, size will always be a multiple of four. */
475 while (beefs--)
476 (*ptr4++) = 0xDEADBEEF; /* -559038737 base 10 */
477 }
478 #endif /* ERROR_CHECK_GC */
479
480 /* Visits all pages (page_headers) hooked into the used heap pages
481 list and executes f with the current page header as
482 argument. Needed for sweep. */
483 static void
484 visit_all_used_page_headers (void (*f) (page_header *ph))
485 {
486 int i;
487 for (i = 0; i < N_USED_PAGE_LISTS; i++)
488 if (PLH_FIRST (USED_HEAP_PAGES (i)))
489 {
490 page_header *ph = PLH_FIRST (USED_HEAP_PAGES (i));
491 while (PH_NEXT (ph))
492 {
493 page_header *next = PH_NEXT (ph); /* in case f removes the page */
494 f (ph);
495 ph = next;
496 }
497 f (ph);
498 }
499 }
500
501
502
503
504 /*--- mapping of heap addresses to page headers and mark bits ----------*/
505
506 /* Sets a heap pointer and page header pair into the lookup table. */
507 static void
508 set_lookup_table (void *ptr, page_header *ph)
509 {
510 int l1_index = L1_INDEX (ptr);
511 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
512 #ifdef USE_HASH_TABLE
513 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
514 l2 = LEVEL2_HASH_LINK (l2);
515 #endif
516 if (!l2)
517 {
518 l2 = (level_2_lookup_tree*)
519 xmalloc_and_zero (sizeof (level_2_lookup_tree));
520 #ifdef MEMORY_USAGE_STATS
521 MC_MALLOCED_BYTES +=
522 malloced_storage_size (0, sizeof (level_2_lookup_tree), 0);
523 #endif
524 memset (l2, 0, sizeof (level_2_lookup_tree));
525 #ifdef USE_HASH_TABLE
526 LEVEL2_HASH_LINK (l2) = PTR_LOOKUP_TABLE (l1_index);
527 #endif
528 PTR_LOOKUP_TABLE (l1_index) = l2;
529 LEVEL2_KEY (l2) = l1_index;
530 }
531 LEVEL2 (l2, L2_INDEX (ptr)) = ph;
532 }
533
534
535 #ifdef UNSET_LOOKUP_TABLE
536 /* Set the lookup table to 0 for given heap address. */
537 static void
538 unset_lookup_table (void *ptr)
539 {
540 int l1_index = L1_INDEX (ptr);
541 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
542 #ifdef USE_HASH_TABLE
543 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
544 l2 = LEVEL2_HASH_LINK (l2);
545 #endif
546 if (l2) {
547 LEVEL2 (l2, L2_INDEX (ptr)) = 0;
548 }
549 }
550 #endif
551
552 /* Returns the page header of a given heap address, or 0 if not in table.
553 For internal use, no error checking. */
554 static page_header *
555 get_page_header_internal (void *ptr)
556 {
557 int l1_index = L1_INDEX (ptr);
558 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
559 #ifdef USE_HASH_TABLE
560 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
561 l2 = LEVEL2_HASH_LINK (l2);
562 #endif
563 if (!l2)
564 return 0;
565 return LEVEL2 (l2, L2_INDEX (ptr));
566 }
567
568 /* Returns the page header of a given heap address, or 0 if not in table. */
569 static page_header *
570 get_page_header (void *ptr)
571 {
572 int l1_index = L1_INDEX (ptr);
573 level_2_lookup_tree *l2 = PTR_LOOKUP_TABLE (l1_index);
574 #ifdef USE_HASH_TABLE
575 while ((l2) && (LEVEL2_KEY (l2) != l1_index))
576 l2 = LEVEL2_HASH_LINK (l2);
577 #endif
578 assert (l2 && LEVEL2 (l2, L2_INDEX (ptr)));
579 return LEVEL2 (l2, L2_INDEX (ptr));
580 }
581
582
583 /* Returns the mark bit index of a given heap address. */
584 static EMACS_INT
585 get_mark_bit_index (void *ptr, page_header *ph)
586 {
587 EMACS_INT cell_size = PH_CELL_SIZE (ph);
588 if (cell_size)
589 return (((EMACS_INT) ptr - (EMACS_INT)(PH_HEAP_SPACE (ph))) / cell_size);
590 else /* only one object on page */
591 return 0;
592 }
593
594
595 /* Adds addresses of pages to lookup table. */
596 static void
597 add_pages_to_lookup_table (page_header *ph, EMACS_INT n_pages)
598 {
599 char *p = (char*) PH_HEAP_SPACE (ph);
600 EMACS_INT end_of_section = (EMACS_INT) p + (PAGE_SIZE * n_pages);
601 for (p = (char*) PH_HEAP_SPACE (ph);
602 (EMACS_INT) p < end_of_section; p += PAGE_SIZE)
603 set_lookup_table (p, ph);
604 }
605
606
607 /* Initializes lookup table. */
608 static void
609 init_lookup_table (void)
610 {
611 int i;
612 for (i = 0; i < LEVEL1_SIZE; i++)
613 PTR_LOOKUP_TABLE (i) = 0;
614 }
615
616
617
618
619 /*--- mark bits --------------------------------------------------------*/
620
621 /* Number of mark bits: minimum 1, maximum 8. */
622 #define N_MARK_BITS 1
623
624 /*--- bit operations --- */
625
626 /* Allocates a bit array of length bits. */
627 static char *
628 alloc_bit_array(size_t bits)
629 {
630 #ifdef ALLOC_MB_UNMANAGED
631 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char);
632 if (size < sizeof (free_link)) size = sizeof (free_link);
633 return (char *) mc_alloc_unmanaged (size);
634 #else /* not ALLOC_MB_UNMANAGED */
635 size_t size = ((bits + CHAR_BIT - 1) / CHAR_BIT) * sizeof(char);
636 char *bit_array;
637 if (size < sizeof (free_link)) size = sizeof (free_link);
638 bit_array = (char*) xmalloc_and_zero (size);
639 #ifdef MEMORY_USAGE_STATS
640 MC_MALLOCED_BYTES += malloced_storage_size (0, size, 0);
641 #endif
642 return bit_array;
643 #endif /* not ALLOC_MB_UNMANAGED */
644 }
645
646
647 /* Returns the bit value at pos. */
648 static EMACS_INT
649 get_bit (char *bit_array, EMACS_INT pos)
650 {
651 #if N_MARK_BITS > 1
652 EMACS_INT result = 0;
653 EMACS_INT i;
654 #endif
655 bit_array += pos / CHAR_BIT;
656 #if N_MARK_BITS > 1
657 for (i = 0; i < N_MARK_BITS; i++)
658 result |= (*bit_array & (1 << ((pos + i) % CHAR_BIT)));
659 return result >> pos;
660 #else
661 return (*bit_array & (1 << (pos % CHAR_BIT))) != 0;
662 #endif
663 }
664
665
666 /* Bit_Arrays bit at pos to val. */
667 static void
668 set_bit(char *bit_array, EMACS_INT pos, EMACS_INT val)
669 {
670 #if N_MARK_BITS > 1
671 EMACS_INT result = 0;
672 EMACS_INT i;
673 #endif
674 bit_array += pos / CHAR_BIT;
675 #if N_MARK_BITS > 1
676 for (i = 0; i < N_MARK_BITS; i++)
677 if ((val >> i) & 1)
678 *bit_array |= 1 << ((pos + i) % CHAR_BIT);
679 else
680 *bit_array &= ~(1 << ((pos + i) % CHAR_BIT));
681 #else
682 if (val)
683 *bit_array |= 1 << (pos % CHAR_BIT);
684 else
685 *bit_array &= ~(1 << (pos % CHAR_BIT));
686 #endif
687 }
688
689
690 /*--- mark bit functions ---*/
691 #define USE_PNTR_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) > BITS_PER_EMACS_INT)
692 #define USE_WORD_MARK_BITS(ph) (PH_CELLS_ON_PAGE (ph) <= BITS_PER_EMACS_INT)
693
694 #define GET_BIT_WORD(b, p) get_bit ((char*) &b, p)
695 #define GET_BIT_PNTR(b, p) get_bit (b, p)
696
697 #define SET_BIT_WORD(b, p, v) set_bit ((char*) &b, p, v)
698 #define SET_BIT_PNTR(b, p, v) set_bit (b, p, v)
699
700 #define ZERO_MARK_BITS_WORD(ph) PH_MARK_BITS (ph) = 0
701 #define ZERO_MARK_BITS_PNTR(ph) \
702 do { \
703 memset (PH_MARK_BITS (ph), '\0', \
704 (PH_CELLS_ON_PAGE (ph) + CHAR_BIT - 1) \
705 / CHAR_BIT * sizeof(char)); \
706 } while (0)
707
708 #define GET_BIT(bit, ph, p) \
709 do { \
710 if (USE_PNTR_MARK_BITS (ph)) \
711 bit = GET_BIT_PNTR (PH_MARK_BITS (ph), p); \
712 else \
713 bit = GET_BIT_WORD (PH_MARK_BITS (ph), p); \
714 } while (0)
715
716 #define SET_BIT(ph, p, v) \
717 do { \
718 if (USE_PNTR_MARK_BITS (ph)) \
719 SET_BIT_PNTR (PH_MARK_BITS (ph), p, v); \
720 else \
721 SET_BIT_WORD (PH_MARK_BITS (ph), p, v); \
722 } while (0)
723
724 #define ZERO_MARK_BITS(ph) \
725 do { \
726 if (USE_PNTR_MARK_BITS (ph)) \
727 ZERO_MARK_BITS_PNTR (ph); \
728 else \
729 ZERO_MARK_BITS_WORD (ph); \
730 } while (0)
731
732
733 /* Allocates mark-bit space either from a free list or from the OS
734 for the given page header. */
735 static char *
736 alloc_mark_bits (page_header *ph)
737 {
738 char *result;
739 if (PH_MARK_BIT_FREE_LIST (ph) == 0)
740 result = (char*) alloc_bit_array (PH_CELLS_ON_PAGE (ph) * N_MARK_BITS);
741 else
742 {
743 result = (char*) PH_MARK_BIT_FREE_LIST (ph);
744 PH_MARK_BIT_FREE_LIST (ph) = NEXT_FREE (result);
745 }
746 return result;
747 }
748
749
750 /* Frees by maintaining a free list. */
751 static void
752 free_mark_bits (page_header *ph)
753 {
754 #ifdef ALLOC_MB_UNMANAGED
755 if (PH_MARK_BITS (ph))
756 mc_free (PH_MARK_BITS (ph));
757 #else /* not ALLOC_MB_UNMANAGED */
758 if (PH_MARK_BITS (ph)) {
759 NEXT_FREE (PH_MARK_BITS (ph)) = PH_MARK_BIT_FREE_LIST (ph);
760 PH_MARK_BIT_FREE_LIST (ph) = FREE_LIST (PH_MARK_BITS (ph));
761 }
762 #endif /* not ALLOC_MB_UNMANAGED */
763 }
764
765
766 /* Installs mark bits and zeros bits. */
767 static void
768 install_mark_bits (page_header *ph)
769 {
770 if (USE_PNTR_MARK_BITS (ph))
771 {
772 PH_MARK_BITS (ph) = alloc_mark_bits (ph);
773 ZERO_MARK_BITS_PNTR (ph);
774 }
775 else
776 ZERO_MARK_BITS_WORD (ph);
777 }
778
779
780 /* Cleans and frees the mark bits of the given page_header. */
781 static void
782 remove_mark_bits (page_header *ph)
783 {
784 if (USE_PNTR_MARK_BITS (ph))
785 free_mark_bits (ph);
786 }
787
788
789 /* Zeros all mark bits in given header. */
790 static void
791 zero_mark_bits (page_header *ph)
792 {
793 ZERO_MARK_BITS (ph);
794 }
795
796
797 /* Returns mark bit for given heap pointer. */
798 EMACS_INT
799 get_mark_bit (void *ptr)
800 {
801 EMACS_INT bit = 0;
802 page_header *ph = get_page_header (ptr);
803 gc_checking_assert (ph && PH_ON_USED_LIST_P (ph));
804 if (ph)
805 {
806 GET_BIT (bit, ph, get_mark_bit_index (ptr, ph));
807 }
808 return bit;
809 }
810
811
812 /* Sets mark bit for given heap pointer. */
813 void
814 set_mark_bit (void *ptr, EMACS_INT value)
815 {
816 page_header *ph = get_page_header (ptr);
817 assert (ph && PH_ON_USED_LIST_P (ph));
818 if (ph)
819 {
820 SET_BIT (ph, get_mark_bit_index (ptr, ph), value);
821 }
822 }
823
824
825
826
827 /*--- page header functions --------------------------------------------*/
828
829 /* Allocates a page header either from a free list or from the OS. */
830 static page_header *
831 alloc_page_header (void)
832 {
833 page_header *result;
834 if (PAGE_HEADER_FREE_LIST == 0)
835 {
836 result =
837 (page_header *) xmalloc_and_zero ((EMACS_INT) (sizeof (page_header)));
838 #ifdef MEMORY_USAGE_STATS
839 MC_MALLOCED_BYTES += malloced_storage_size (0, sizeof (page_header), 0);
840 #endif
841
842 }
843 else
844 {
845 result = (page_header*) PAGE_HEADER_FREE_LIST;
846 PAGE_HEADER_FREE_LIST = NEXT_FREE (result);
847 }
848 return result;
849 }
850
851
852 /* Frees given page header by maintaining a free list. */
853 static void
854 free_page_header (page_header *ph)
855 {
856 #if ZERO_MEM
857 ZERO_PAGE_HEADER (ph);
858 #endif
859 NEXT_FREE (ph) = PAGE_HEADER_FREE_LIST;
860 PAGE_HEADER_FREE_LIST = FREE_LIST (ph);
861 }
862
863
864 /* Adds given page header to given page list header's list. */
865 static void
866 add_page_header_to_plh (page_header *ph, page_list_header *plh)
867 {
868 /* insert at the front of the list */
869 PH_PREV (ph) = 0;
870 PH_NEXT (ph) = PLH_FIRST (plh);
871 PH_PLH (ph) = plh;
872 /* if list is not empty, set prev in the first element */
873 if (PLH_FIRST (plh))
874 PH_PREV (PLH_FIRST (plh)) = ph;
875 /* one element in list is first and last at the same time */
876 PLH_FIRST (plh) = ph;
877 if (!PLH_LAST (plh))
878 PLH_LAST (plh) = ph;
879
880 #ifdef MEMORY_USAGE_STATS
881 /* bump page count */
882 PLH_PAGE_COUNT (plh)++;
883 #endif
884
885 }
886
887
888 /* Removes given page header from given page list header's list. */
889 static void
890 remove_page_header_from_plh (page_header *ph, page_list_header *plh)
891 {
892 if (PLH_FIRST (plh) == ph)
893 PLH_FIRST (plh) = PH_NEXT (ph);
894 if (PLH_LAST (plh) == ph)
895 PLH_LAST (plh) = PH_PREV (ph);
896 if (PH_NEXT (ph))
897 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
898 if (PH_PREV (ph))
899 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
900
901 #ifdef MEMORY_USAGE_STATS
902 /* decrease page count */
903 PLH_PAGE_COUNT (plh)--;
904 #endif
905 }
906
907
908 /* Moves a page header to the front of its the page header list.
909 This is used during sweep: Pages with some alive objects are moved to
910 the front. This makes allocation faster, all pages with free slots
911 can be found at the front of the list. */
912 static void
913 move_page_header_to_front (page_header *ph)
914 {
915 page_list_header *plh = PH_PLH (ph);
916 /* is page already first? */
917 if (ph == PLH_FIRST (plh)) return;
918 /* remove from list */
919 if (PLH_LAST (plh) == ph)
920 PLH_LAST (plh) = PH_PREV (ph);
921 if (PH_NEXT (ph))
922 PH_PREV (PH_NEXT (ph)) = PH_PREV (ph);
923 if (PH_PREV (ph))
924 PH_NEXT (PH_PREV (ph)) = PH_NEXT (ph);
925 /* insert at the front */
926 PH_NEXT (ph) = PLH_FIRST (plh);
927 PH_PREV (ph) = 0;
928 PH_PREV (PH_NEXT (ph)) = ph;
929 PLH_FIRST (plh) = ph;
930 }
931
932
933
934
935 /*--- page list functions ----------------------------------------------*/
936
937 /* Returns the index of the used heap list according to given size. */
938 static int
939 get_used_list_index (size_t size)
940 {
941 if (size <= USED_LIST_MIN_OBJECT_SIZE)
942 return 0;
943 if (size <= USED_LIST_UPPER_THRESHOLD)
944 return ((size - USED_LIST_MIN_OBJECT_SIZE - 1)
945 / USED_LIST_LIN_STEP) + 1;
946 return N_USED_PAGE_LISTS - 1;
947 }
948
949
950 /* Returns the size of the used heap list according to given index. */
951 static size_t
952 get_used_list_size_value (int used_index)
953 {
954 if (used_index < N_USED_PAGE_LISTS - 1)
955 return (used_index * USED_LIST_LIN_STEP) + USED_LIST_MIN_OBJECT_SIZE;
956 return 0;
957 }
958
959
960 /* Returns the index of the used heap list according to given size. */
961 static int
962 get_unmanaged_list_index (size_t size)
963 {
964 if (size <= UNMANAGED_LIST_MIN_OBJECT_SIZE)
965 return 0;
966 if (size <= UNMANAGED_LIST_UPPER_THRESHOLD)
967 return ((size - UNMANAGED_LIST_MIN_OBJECT_SIZE - 1)
968 / UNMANAGED_LIST_LIN_STEP) + 1;
969 return N_UNMANAGED_PAGE_LISTS - 1;
970 }
971
972
973 /* Returns the size of the unmanaged heap list according to given index. */
974 static size_t
975 get_unmanaged_list_size_value (int unmanaged_index)
976 {
977 if (unmanaged_index < N_UNMANAGED_PAGE_LISTS - 1)
978 return (unmanaged_index * UNMANAGED_LIST_LIN_STEP)
979 + UNMANAGED_LIST_MIN_OBJECT_SIZE;
980 return 0;
981 }
982
983
984 /* Returns the index of the free heap list according to given size. */
985 static int
986 get_free_list_index (EMACS_INT n_pages)
987 {
988 if (n_pages == 0)
989 return 0;
990 if (n_pages <= FREE_LIST_LOWER_THRESHOLD)
991 return n_pages - 1;
992 if (n_pages >= FREE_LIST_UPPER_THRESHOLD - 1)
993 return N_FREE_PAGE_LISTS - 1;
994 return ((n_pages - FREE_LIST_LOWER_THRESHOLD - 1)
995 / FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
996
997 }
998
999
1000 /* Returns the size in number of pages of the given free list at index. */
1001 static size_t
1002 get_free_list_size_value (int free_index)
1003 {
1004 if (free_index < FREE_LIST_LOWER_THRESHOLD)
1005 return free_index + 1;
1006 if (free_index >= N_FREE_PAGE_LISTS)
1007 return FREE_LIST_UPPER_THRESHOLD;
1008 return ((free_index + 1 - FREE_LIST_LOWER_THRESHOLD)
1009 * FREE_LIST_LIN_STEP) + FREE_LIST_LOWER_THRESHOLD;
1010 }
1011
1012
1013 #ifdef MEMORY_USAGE_STATS
1014 Bytecount
1015 mc_alloced_storage_size (Bytecount claimed_size, struct overhead_stats *stats)
1016 {
1017 size_t used_size =
1018 get_used_list_size_value (get_used_list_index (claimed_size));
1019 if (used_size == 0)
1020 used_size = mult_PAGE_SIZE (BYTES_TO_PAGES (claimed_size));
1021
1022 if (stats)
1023 {
1024 stats->was_requested += claimed_size;
1025 stats->malloc_overhead += used_size - claimed_size;
1026 }
1027
1028 return used_size;
1029 }
1030 #endif /* not MEMORY_USAGE_STATS */
1031
1032
1033
1034 /*--- free heap functions ----------------------------------------------*/
1035
1036 /* Frees a heap section, if the heap_section is completly free */
1037 static EMACS_INT
1038 free_heap_section (page_header *ph)
1039 {
1040 int i;
1041 int removed = 0;
1042 for (i = 0; i < N_HEAP_SECTIONS; i++)
1043 if (!removed)
1044 {
1045 if ((PH_HEAP_SPACE (ph) == HEAP_SECTION(i).start)
1046 && (PH_N_PAGES (ph) == HEAP_SECTION(i).n_pages))
1047 {
1048 xfree_1 (HEAP_SECTION(i).real_start);
1049 #ifdef MEMORY_USAGE_STATS
1050 MC_MALLOCED_BYTES
1051 -= malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
1052 #endif
1053
1054 HEAP_SIZE -= PH_N_PAGES (ph) * PAGE_SIZE;
1055
1056 removed = 1;
1057 }
1058 }
1059 else
1060 {
1061 HEAP_SECTION(i-1).real_start = HEAP_SECTION(i).real_start;
1062 HEAP_SECTION(i-1).real_size = HEAP_SECTION(i).real_size;
1063 HEAP_SECTION(i-1).start = HEAP_SECTION(i).start;
1064 HEAP_SECTION(i-1).n_pages = HEAP_SECTION(i).n_pages;
1065 }
1066
1067 N_HEAP_SECTIONS = N_HEAP_SECTIONS - removed;
1068
1069 return removed;
1070 }
1071
1072 /* Removes page from free list. */
1073 static void
1074 remove_page_from_free_list (page_header *ph)
1075 {
1076 remove_page_header_from_plh (ph, PH_PLH (ph));
1077 PH_PLH (ph) = 0;
1078 }
1079
1080
1081 /* Adds page to according free list. */
1082 static void
1083 add_page_to_free_list (page_header *ph)
1084 {
1085 PH_PLH (ph) = FREE_HEAP_PAGES (get_free_list_index (PH_N_PAGES (ph)));
1086 add_page_header_to_plh (ph, PH_PLH (ph));
1087 }
1088
1089
1090 /* Merges two adjacent pages. */
1091 static page_header *
1092 merge_pages (page_header *first_ph, page_header *second_ph)
1093 {
1094 /* merge */
1095 PH_N_PAGES (first_ph) += PH_N_PAGES (second_ph);
1096 /* clean up left over page header */
1097 free_page_header (second_ph);
1098 /* update lookup table */
1099 add_pages_to_lookup_table (first_ph, PH_N_PAGES (first_ph));
1100
1101 return first_ph;
1102 }
1103
1104
1105 /* Checks if pages are adjacent, merges them, and adds merged page to
1106 free list */
1107 static void
1108 merge_into_free_list (page_header *ph)
1109 {
1110 /* check if you can coalesce adjacent pages */
1111 page_header *prev_ph =
1112 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
1113 - PAGE_SIZE));
1114 page_header *succ_ph =
1115 get_page_header_internal ((void*) (((EMACS_INT) PH_HEAP_SPACE (ph))
1116 + (PH_N_PAGES (ph) * PAGE_SIZE)));
1117 if (PH_ON_FREE_LIST_P (prev_ph))
1118 {
1119 remove_page_from_free_list (prev_ph);
1120 ph = merge_pages (prev_ph, ph);
1121 }
1122 if (PH_ON_FREE_LIST_P (succ_ph))
1123 {
1124 remove_page_from_free_list (succ_ph);
1125 ph = merge_pages (ph, succ_ph);
1126 }
1127 /* try to free heap_section, if the section is complete */
1128 if (!free_heap_section (ph))
1129 /* else add merged page to free list */
1130 add_page_to_free_list (ph);
1131 }
1132
1133
1134 /* Cuts given page header after n_pages, returns the first (cut) part, and
1135 puts the rest on the free list. */
1136 static page_header *
1137 split_page (page_header *ph, EMACS_INT n_pages)
1138 {
1139 page_header *new_ph;
1140 EMACS_INT rem_pages = PH_N_PAGES (ph) - n_pages;
1141
1142 /* remove the page from the free list if already hooked in */
1143 if (PH_PLH (ph))
1144 remove_page_from_free_list (ph);
1145 /* set new number of pages */
1146 PH_N_PAGES (ph) = n_pages;
1147 /* add new page to lookup table */
1148 add_pages_to_lookup_table (ph, n_pages);
1149
1150 if (rem_pages)
1151 {
1152 /* build new page with reminder */
1153 new_ph = alloc_page_header ();
1154 PH_N_PAGES (new_ph) = rem_pages;
1155 PH_HEAP_SPACE (new_ph) =
1156 (void*) ((EMACS_INT) (PH_HEAP_SPACE (ph)) + (n_pages * PAGE_SIZE));
1157 /* add new page to lookup table */
1158 add_pages_to_lookup_table (new_ph, rem_pages);
1159 /* hook the rest into free list */
1160 add_page_to_free_list (new_ph);
1161 }
1162 return ph;
1163 }
1164
1165
1166 /* Expands the heap by given number of pages. */
1167 static page_header *
1168 expand_heap (EMACS_INT needed_pages)
1169 {
1170 page_header *ph;
1171 EMACS_INT n_pages;
1172 size_t real_size;
1173 void *real_start;
1174
1175 /* determine number of pages the heap should grow */
1176 n_pages = needed_pages + (HEAP_SIZE / (PAGE_SIZE * HEAP_GROWTH_DIVISOR));
1177 if (n_pages < MIN_HEAP_INCREASE)
1178 n_pages = MIN_HEAP_INCREASE;
1179
1180 /* get the real values */
1181 real_size = (n_pages * PAGE_SIZE) + PAGE_SIZE;
1182 real_start = xmalloc_and_zero (real_size);
1183 #ifdef MEMORY_USAGE_STATS
1184 MC_MALLOCED_BYTES += malloced_storage_size (0, real_size, 0);
1185 #endif
1186
1187 /* maintain heap section count */
1188 if (N_HEAP_SECTIONS >= MAX_HEAP_SECTS)
1189 {
1190 stderr_out ("Increase number of MAX_HEAP_SECTS");
1191 ABORT ();
1192 }
1193 HEAP_SECTION(N_HEAP_SECTIONS).real_start = real_start;
1194 HEAP_SECTION(N_HEAP_SECTIONS).real_size = real_size;
1195 HEAP_SECTION(N_HEAP_SECTIONS).start = PAGE_SIZE_ALIGNMENT (real_start);
1196 HEAP_SECTION(N_HEAP_SECTIONS).n_pages = n_pages;
1197 N_HEAP_SECTIONS ++;
1198
1199 /* get page header */
1200 ph = alloc_page_header ();
1201
1202 /* setup page header */
1203 PH_N_PAGES (ph) = n_pages;
1204 PH_HEAP_SPACE (ph) = PAGE_SIZE_ALIGNMENT (real_start);
1205 assert (((EMACS_INT) (PH_HEAP_SPACE (ph)) % PAGE_SIZE) == 0);
1206 HEAP_SIZE += n_pages * PAGE_SIZE;
1207
1208 /* this was also done by allocate_lisp_storage */
1209 if (need_to_check_c_alloca)
1210 xemacs_c_alloca (0);
1211
1212 /* return needed size, put rest on free list */
1213 return split_page (ph, needed_pages);
1214 }
1215
1216
1217
1218
1219 /*--- used heap functions ----------------------------------------------*/
1220 /* Installs initial free list. */
1221 static void
1222 install_cell_free_list (page_header *ph)
1223 {
1224 char *p;
1225 int i;
1226 EMACS_INT cell_size = PH_CELL_SIZE (ph);
1227 /* write initial free list if cell_size is < PAGE_SIZE */
1228 p = (char *) PH_HEAP_SPACE (ph);
1229 for (i = 0; i < PH_CELLS_ON_PAGE (ph) - 1; i++)
1230 {
1231 #ifdef ERROR_CHECK_GC
1232 assert (!LRECORD_FREE_P (p));
1233 MARK_LRECORD_AS_FREE (p);
1234 #endif
1235 NEXT_FREE (p) = FREE_LIST (p + cell_size);
1236 set_lookup_table (p, ph);
1237 p += cell_size;
1238 }
1239 #ifdef ERROR_CHECK_GC
1240 assert (!LRECORD_FREE_P (p));
1241 MARK_LRECORD_AS_FREE (p);
1242 #endif
1243 NEXT_FREE (p) = 0;
1244 set_lookup_table (p, ph);
1245
1246 /* hook free list into header */
1247 PH_FREE_LIST (ph) = FREE_LIST (PH_HEAP_SPACE (ph));
1248 }
1249
1250
1251 /* Cleans the object space of the given page_header. */
1252 static void
1253 remove_cell_free_list (page_header *ph)
1254 {
1255 #if ZERO_MEM
1256 ZERO_HEAP_SPACE (ph);
1257 #endif
1258 PH_FREE_LIST (ph) = 0;
1259 }
1260
1261
1262 /* Installs a new page and hooks it into given page_list_header. */
1263 static page_header *
1264 install_page_in_used_list (page_header *ph, page_list_header *plh,
1265 size_t size, int managed)
1266 {
1267 /* add to list */
1268 add_page_header_to_plh (ph, plh);
1269
1270 /* determine cell size */
1271 if (PLH_SIZE (plh))
1272 PH_CELL_SIZE (ph) = PLH_SIZE (plh);
1273 else
1274 PH_CELL_SIZE (ph) = size;
1275 PH_CELLS_ON_PAGE (ph) = (PAGE_SIZE * PH_N_PAGES (ph)) / PH_CELL_SIZE (ph);
1276
1277 /* init cell count */
1278 PH_CELLS_USED (ph) = 0;
1279
1280 /* install mark bits and initialize cell free list */
1281 if (managed)
1282 install_mark_bits (ph);
1283
1284 install_cell_free_list (ph);
1285
1286 #ifdef MEMORY_USAGE_STATS
1287 PLH_TOTAL_CELLS (plh) += PH_CELLS_ON_PAGE (ph);
1288 PLH_TOTAL_SPACE (plh) += PAGE_SIZE * PH_N_PAGES (ph);
1289 #endif
1290
1291 return ph;
1292 }
1293
1294
1295 /* Cleans and frees a page, identified by the given page_header. */
1296 static void
1297 remove_page_from_used_list (page_header *ph)
1298 {
1299 page_list_header *plh = PH_PLH (ph);
1300
1301 #ifdef MEMORY_USAGE_STATS
1302 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph);
1303 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph);
1304 #endif
1305
1306 /* clean up mark bits and cell free list */
1307 remove_cell_free_list (ph);
1308 if (PH_ON_USED_LIST_P (ph))
1309 remove_mark_bits (ph);
1310
1311 /* clean up page header */
1312 PH_CELL_SIZE (ph) = 0;
1313 PH_CELLS_ON_PAGE (ph) = 0;
1314 PH_CELLS_USED (ph) = 0;
1315
1316 /* remove from used list */
1317 remove_page_header_from_plh (ph, plh);
1318
1319 /* move to free list */
1320 merge_into_free_list (ph);
1321 }
1322
1323
1324
1325
1326 /*--- allocation -------------------------------------------------------*/
1327
1328 /* Allocates from cell free list on already allocated pages. */
1329 static page_header *
1330 allocate_cell (page_list_header *plh)
1331 {
1332 page_header *ph = PLH_FIRST (plh);
1333 if (ph)
1334 {
1335 if (PH_FREE_LIST (ph))
1336 /* elements free on first page */
1337 return ph;
1338 else if ((PH_NEXT (ph))
1339 && (PH_FREE_LIST (PH_NEXT (ph))))
1340 /* elements free on second page */
1341 {
1342 page_header *temp = PH_NEXT (ph);
1343 /* move full page (first page) to end of list */
1344 PH_NEXT (PLH_LAST (plh)) = ph;
1345 PH_PREV (ph) = PLH_LAST (plh);
1346 PLH_LAST (plh) = ph;
1347 PH_NEXT (ph) = 0;
1348 /* install second page as first page */
1349 ph = temp;
1350 PH_PREV (ph) = 0;
1351 PLH_FIRST (plh) = ph;
1352 return ph;
1353 }
1354 }
1355 return 0;
1356 }
1357
1358
1359 /* Finds a page which has at least the needed number of pages.
1360 Algorithm: FIRST FIT. */
1361 static page_header *
1362 find_free_page_first_fit (EMACS_INT needed_pages, page_header *ph)
1363 {
1364 while (ph)
1365 {
1366 if (PH_N_PAGES (ph) >= needed_pages)
1367 return ph;
1368 ph = PH_NEXT (ph);
1369 }
1370 return 0;
1371 }
1372
1373
1374 /* Allocates a page from the free list. */
1375 static page_header *
1376 allocate_page_from_free_list (EMACS_INT needed_pages)
1377 {
1378 page_header *ph = 0;
1379 int i;
1380 for (i = get_free_list_index (needed_pages); i < N_FREE_PAGE_LISTS; i++)
1381 if ((ph = find_free_page_first_fit (needed_pages,
1382 PLH_FIRST (FREE_HEAP_PAGES (i)))) != 0)
1383 {
1384 if (PH_N_PAGES (ph) > needed_pages)
1385 return split_page (ph, needed_pages);
1386 else
1387 {
1388 remove_page_from_free_list (ph);
1389 return ph;
1390 }
1391 }
1392 return 0;
1393 }
1394
1395
1396 /* Allocates a new page, either from free list or by expanding the heap. */
1397 static page_header *
1398 allocate_new_page (page_list_header *plh, size_t size, int managed)
1399 {
1400 EMACS_INT needed_pages = BYTES_TO_PAGES (size);
1401 /* first check free list */
1402 page_header *result = allocate_page_from_free_list (needed_pages);
1403 if (!result)
1404 /* expand heap */
1405 result = expand_heap (needed_pages);
1406 install_page_in_used_list (result, plh, size, managed);
1407 return result;
1408 }
1409
1410
1411 /* Selects the correct size class, tries to allocate a cell of this size
1412 from the free list, if this fails, a new page is allocated. */
1413 static void *
1414 mc_alloc_1 (size_t size, int managed)
1415 {
1416 page_list_header *plh = 0;
1417 if (managed)
1418 plh = USED_HEAP_PAGES (get_used_list_index (size));
1419 else
1420 plh = UNMANAGED_HEAP_PAGES (get_unmanaged_list_index (size));
1421
1422 page_header *ph = 0;
1423 void *result = 0;
1424 if (size == 0)
1425 return 0;
1426 if (size < PAGE_SIZE_DIV_2)
1427 /* first check any free cells */
1428 ph = allocate_cell (plh);
1429 if (!ph)
1430 /* allocate a new page */
1431 ph = allocate_new_page (plh, size, managed);
1432
1433 /* return first element of free list and remove it from the list */
1434 result = (void*) PH_FREE_LIST (ph);
1435 PH_FREE_LIST (ph) =
1436 NEXT_FREE (PH_FREE_LIST (ph));
1437
1438 memset (result, '\0', size);
1439 if (managed)
1440 MARK_LRECORD_AS_FREE (result);
1441
1442 /* bump used cells counter */
1443 PH_CELLS_USED (ph)++;
1444
1445 #ifdef MEMORY_USAGE_STATS
1446 PLH_USED_CELLS (plh)++;
1447 if (managed)
1448 PLH_USED_SPACE (plh) += size;
1449 else
1450 PLH_USED_SPACE (plh) += PLH_SIZE (plh);
1451 #endif
1452
1453 return result;
1454 }
1455
1456 void *
1457 mc_alloc (size_t size)
1458 {
1459 return mc_alloc_1 (size, 1);
1460 }
1461
1462 void *
1463 mc_alloc_unmanaged (size_t size)
1464 {
1465 return mc_alloc_1 (size, 0);
1466 }
1467
1468
1469
1470
1471 /*--- sweep & free & finalize-------------------------------------------*/
1472
1473 /* Frees a heap pointer. */
1474 static void
1475 remove_cell (void *ptr, page_header *ph)
1476 {
1477 #ifdef MEMORY_USAGE_STATS
1478 PLH_USED_CELLS (PH_PLH (ph))--;
1479 if (PH_ON_USED_LIST_P (ph))
1480 PLH_USED_SPACE (PH_PLH (ph)) -=
1481 detagged_lisp_object_size ((const struct lrecord_header *) ptr);
1482 else
1483 PLH_USED_SPACE (PH_PLH (ph)) -= PH_CELL_SIZE (ph);
1484 #endif
1485 #ifdef ERROR_CHECK_GC
1486 if (PH_ON_USED_LIST_P (ph)) {
1487 #ifdef MC_ALLOC_TYPE_STATS
1488 dec_lrecord_stats (PH_CELL_SIZE (ph),
1489 (const struct lrecord_header *) ptr);
1490 #endif /* MC_ALLOC_TYPE_STATS */
1491 assert (!LRECORD_FREE_P (ptr));
1492 deadbeef_memory (ptr, PH_CELL_SIZE (ph));
1493 MARK_LRECORD_AS_FREE (ptr);
1494 }
1495 #endif
1496
1497 /* hooks cell into free list */
1498 NEXT_FREE (ptr) = PH_FREE_LIST (ph);
1499 PH_FREE_LIST (ph) = FREE_LIST (ptr);
1500 /* decrease cells used */
1501 PH_CELLS_USED (ph)--;
1502 }
1503
1504
1505 /* Mark free list marks all free list entries. */
1506 static void
1507 mark_free_list (page_header *ph)
1508 {
1509 free_link *fl = PH_FREE_LIST (ph);
1510 while (fl)
1511 {
1512 SET_BIT (ph, get_mark_bit_index (fl, ph), 1);
1513 fl = NEXT_FREE (fl);
1514 }
1515 }
1516
1517 /* Finalize a page. You have to tell mc-alloc how to call your
1518 object's finalizer. Therefore, you have to define the macro
1519 MC_ALLOC_CALL_FINALIZER(ptr). This macro should do nothing else
1520 then test if there is a finalizer and call it on the given
1521 argument, which is the heap address of the object. */
1522 static void
1523 finalize_page (page_header *ph)
1524 {
1525 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
1526 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1527 EMACS_INT mark_bit = 0;
1528 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1529 int bit = 0;
1530
1531 mark_free_list (ph);
1532
1533 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1534 {
1535 GET_BIT (bit, ph, mark_bit);
1536 if (!bit)
1537 {
1538 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
1539 MC_ALLOC_CALL_FINALIZER ((void *) ptr);
1540 }
1541 }
1542 }
1543
1544
1545 /* Finalize a page for disksave. XEmacs calls this routine before it
1546 dumps the heap image. You have to tell mc-alloc how to call your
1547 object's finalizer for disksave. Therefore, you have to define the
1548 macro MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE(ptr). This macro should
1549 do nothing else then test if there is a finalizer and call it on
1550 the given argument, which is the heap address of the object. */
1551 static void
1552 finalize_page_for_disksave (page_header *ph)
1553 {
1554 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph);
1555 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1556 EMACS_INT mark_bit = 0;
1557 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1558
1559 mark_free_list (ph);
1560
1561 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1562 {
1563 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit));
1564 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr);
1565 }
1566 }
1567
1568
1569 /* Finalizes the heap. */
1570 void
1571 mc_finalize (void)
1572 {
1573 visit_all_used_page_headers (finalize_page);
1574 }
1575
1576
1577 /* Finalizes the heap for disksave. */
1578 void
1579 mc_finalize_for_disksave (void)
1580 {
1581 visit_all_used_page_headers (finalize_page_for_disksave);
1582 }
1583
1584
1585 /* Sweeps a page: all the non-marked cells are freed. If the page is empty
1586 in the end, it is removed. If some cells are free, it is moved to the
1587 front of its page header list. Full pages stay where they are. */
1588 static void
1589 sweep_page (page_header *ph)
1590 {
1591 char *heap_space = (char *) PH_HEAP_SPACE (ph);
1592 EMACS_INT heap_space_step = PH_CELL_SIZE (ph);
1593 EMACS_INT mark_bit = 0;
1594 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph);
1595 int bit = 0;
1596
1597 mark_free_list (ph);
1598
1599 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++)
1600 {
1601 GET_BIT (bit, ph, mark_bit);
1602 if (!bit)
1603 {
1604 remove_cell (heap_space + (heap_space_step * mark_bit), ph);
1605 }
1606 }
1607 zero_mark_bits (ph);
1608 if (PH_CELLS_USED (ph) == 0)
1609 remove_page_from_used_list (ph);
1610 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
1611 move_page_header_to_front (ph);
1612 }
1613
1614
1615 /* Sweeps the heap. */
1616 void
1617 mc_sweep (void)
1618 {
1619 visit_all_used_page_headers (sweep_page);
1620 }
1621
1622
1623 /* Frees the cell pointed to by ptr. */
1624 void
1625 mc_free (void *ptr)
1626 {
1627 page_header *ph = get_page_header (ptr);
1628 assert (!PH_ON_FREE_LIST_P (ph));
1629
1630 remove_cell (ptr, ph);
1631
1632 if (PH_CELLS_USED (ph) == 0)
1633 remove_page_from_used_list (ph);
1634 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph))
1635 move_page_header_to_front (ph);
1636 }
1637
1638
1639 /* Changes the size of the cell pointed to by ptr.
1640 Returns the new address of the new cell with new size. */
1641 void *
1642 mc_realloc_1 (void *ptr, size_t size, int managed)
1643 {
1644 if (ptr)
1645 {
1646 if (size)
1647 {
1648 void *result = mc_alloc_1 (size, managed);
1649 size_t from_size = PH_CELL_SIZE (get_page_header (ptr));
1650 size_t cpy_size = size;
1651 if (size > from_size)
1652 cpy_size = from_size;
1653 memcpy (result, ptr, cpy_size);
1654 mc_free (ptr);
1655 return result;
1656 }
1657 else
1658 {
1659 mc_free (ptr);
1660 return 0;
1661 }
1662 }
1663 else
1664 return mc_alloc_1 (size, managed);
1665 }
1666
1667 void *
1668 mc_realloc (void *ptr, size_t size)
1669 {
1670 return mc_realloc_1 (ptr, size, 1);
1671 }
1672
1673 void *
1674 mc_realloc_unmanaged (void *ptr, size_t size)
1675 {
1676 return mc_realloc_1 (ptr, size, 0);
1677 }
1678
1679
1680
1681
1682 /*--- initialization ---------------------------------------------------*/
1683
1684 /* Call once at the very beginning. */
1685 void
1686 init_mc_allocator (void)
1687 {
1688 int i;
1689
1690 for (i = 0; i < N_USED_PAGE_LISTS; i++)
1691 {
1692 page_list_header *plh = USED_HEAP_PAGES (i);
1693 PLH_LIST_TYPE (plh) = USED_LIST;
1694 PLH_SIZE (plh) = get_used_list_size_value (i);
1695 PLH_FIRST (plh) = 0;
1696 PLH_LAST (plh) = 0;
1697 PLH_MARK_BIT_FREE_LIST (plh) = 0;
1698 #ifdef MEMORY_USAGE_STATS
1699 PLH_PAGE_COUNT (plh) = 0;
1700 PLH_USED_CELLS (plh) = 0;
1701 PLH_USED_SPACE (plh) = 0;
1702 PLH_TOTAL_CELLS (plh) = 0;
1703 PLH_TOTAL_SPACE (plh) = 0;
1704 #endif
1705 }
1706
1707 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
1708 {
1709 page_list_header *plh = UNMANAGED_HEAP_PAGES (i);
1710 PLH_LIST_TYPE (plh) = UNMANAGED_LIST;
1711 PLH_SIZE (plh) = get_unmanaged_list_size_value (i);
1712 PLH_FIRST (plh) = 0;
1713 PLH_LAST (plh) = 0;
1714 PLH_MARK_BIT_FREE_LIST (plh) = 0;
1715 #ifdef MEMORY_USAGE_STATS
1716 PLH_PAGE_COUNT (plh) = 0;
1717 PLH_USED_CELLS (plh) = 0;
1718 PLH_USED_SPACE (plh) = 0;
1719 PLH_TOTAL_CELLS (plh) = 0;
1720 PLH_TOTAL_SPACE (plh) = 0;
1721 #endif
1722 }
1723
1724 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
1725 {
1726 page_list_header *plh = FREE_HEAP_PAGES (i);
1727 PLH_LIST_TYPE (plh) = FREE_LIST;
1728 PLH_SIZE (plh) = get_free_list_size_value (i);
1729 PLH_FIRST (plh) = 0;
1730 PLH_LAST (plh) = 0;
1731 PLH_MARK_BIT_FREE_LIST (plh) = 0;
1732 #ifdef MEMORY_USAGE_STATS
1733 PLH_PAGE_COUNT (plh) = 0;
1734 PLH_USED_CELLS (plh) = 0;
1735 PLH_USED_SPACE (plh) = 0;
1736 PLH_TOTAL_CELLS (plh) = 0;
1737 PLH_TOTAL_SPACE (plh) = 0;
1738 #endif
1739 }
1740
1741 PAGE_HEADER_FREE_LIST = 0;
1742
1743 #ifdef MEMORY_USAGE_STATS
1744 MC_MALLOCED_BYTES = sizeof (mc_allocator_globals);
1745 #endif
1746
1747 init_lookup_table ();
1748 }
1749
1750
1751
1752
1753 /*--- lisp function for statistics -------------------------------------*/
1754
1755 #ifdef MEMORY_USAGE_STATS
1756 DEFUN ("mc-alloc-memory-usage", Fmc_alloc_memory_usage, 0, 0, 0, /*
1757 Returns stats about the mc-alloc memory usage. See diagnose.el.
1758 */
1759 ())
1760 {
1761 Lisp_Object free_plhs = Qnil;
1762 Lisp_Object used_plhs = Qnil;
1763 Lisp_Object unmanaged_plhs = Qnil;
1764 Lisp_Object heap_sects = Qnil;
1765 int used_size = 0;
1766 int real_size = 0;
1767
1768 int i;
1769
1770 for (i = 0; i < N_FREE_PAGE_LISTS; i++)
1771 if (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)) > 0)
1772 free_plhs =
1773 acons (make_int (PLH_SIZE (FREE_HEAP_PAGES(i))),
1774 list1 (make_int (PLH_PAGE_COUNT (FREE_HEAP_PAGES(i)))),
1775 free_plhs);
1776
1777 for (i = 0; i < N_UNMANAGED_PAGE_LISTS; i++)
1778 if (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i)) > 0)
1779 unmanaged_plhs =
1780 acons (make_int (PLH_SIZE (UNMANAGED_HEAP_PAGES(i))),
1781 list5 (make_int (PLH_PAGE_COUNT (UNMANAGED_HEAP_PAGES(i))),
1782 make_int (PLH_USED_CELLS (UNMANAGED_HEAP_PAGES(i))),
1783 make_int (PLH_USED_SPACE (UNMANAGED_HEAP_PAGES(i))),
1784 make_int (PLH_TOTAL_CELLS (UNMANAGED_HEAP_PAGES(i))),
1785 make_int (PLH_TOTAL_SPACE (UNMANAGED_HEAP_PAGES(i)))),
1786 unmanaged_plhs);
1787
1788 for (i = 0; i < N_USED_PAGE_LISTS; i++)
1789 if (PLH_PAGE_COUNT (USED_HEAP_PAGES(i)) > 0)
1790 used_plhs =
1791 acons (make_int (PLH_SIZE (USED_HEAP_PAGES(i))),
1792 list5 (make_int (PLH_PAGE_COUNT (USED_HEAP_PAGES(i))),
1793 make_int (PLH_USED_CELLS (USED_HEAP_PAGES(i))),
1794 make_int (PLH_USED_SPACE (USED_HEAP_PAGES(i))),
1795 make_int (PLH_TOTAL_CELLS (USED_HEAP_PAGES(i))),
1796 make_int (PLH_TOTAL_SPACE (USED_HEAP_PAGES(i)))),
1797 used_plhs);
1798
1799 for (i = 0; i < N_HEAP_SECTIONS; i++) {
1800 used_size += HEAP_SECTION(i).n_pages * PAGE_SIZE;
1801 real_size +=
1802 malloced_storage_size (0, HEAP_SECTION(i).real_size, 0);
1803 }
1804
1805 heap_sects =
1806 list3 (make_int (N_HEAP_SECTIONS),
1807 make_int (used_size),
1808 make_int (real_size));
1809
1810 return Fcons (make_int (PAGE_SIZE),
1811 list6 (heap_sects,
1812 Fnreverse (used_plhs),
1813 Fnreverse (unmanaged_plhs),
1814 Fnreverse (free_plhs),
1815 make_int (sizeof (mc_allocator_globals)),
1816 make_int (MC_MALLOCED_BYTES)));
1817 }
1818 #endif /* MEMORY_USAGE_STATS */
1819
1820 void
1821 syms_of_mc_alloc (void)
1822 {
1823 #ifdef MEMORY_USAGE_STATS
1824 DEFSUBR (Fmc_alloc_memory_usage);
1825 #endif /* MEMORY_USAGE_STATS */
1826 }