Mercurial > hg > xemacs-beta
comparison src/mc-alloc.c @ 3263:d674024a8674
[xemacs-hg @ 2006-02-27 16:29:00 by crestani]
- Introduce a fancy asynchronous finalization strategy on C level.
- Merge the code conditioned on MC_ALLOC into the code conditioned on
NEW_GC.
- Remove the possibility to free objects manually outside garbage
collections when the new collector is enabled.
author | crestani |
---|---|
date | Mon, 27 Feb 2006 16:29:29 +0000 |
parents | 26a547441418 |
children | 619edf713d55 |
comparison
equal
deleted
inserted
replaced
3262:79d41cfd8e6b | 3263:d674024a8674 |
---|---|
401 #define PH_ON_USED_LIST_P(ph) \ | 401 #define PH_ON_USED_LIST_P(ph) \ |
402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) | 402 (ph && PH_PLH (ph) && (PLH_LIST_TYPE (PH_PLH (ph)) == USED_LIST)) |
403 | 403 |
404 | 404 |
405 /* Number of mark bits: minimum 1, maximum 8. */ | 405 /* Number of mark bits: minimum 1, maximum 8. */ |
406 #ifdef NEW_GC | |
407 #define N_MARK_BITS 2 | 406 #define N_MARK_BITS 2 |
408 #else /* not NEW_GC */ | |
409 #define N_MARK_BITS 1 | |
410 #endif /* not NEW_GC */ | |
411 | 407 |
412 | 408 |
413 | 409 |
414 /************************************************************************/ | 410 /************************************************************************/ |
415 /* MC Allocator */ | 411 /* MC Allocator */ |
771 { | 767 { |
772 page_header *ph = get_page_header (ptr); | 768 page_header *ph = get_page_header (ptr); |
773 assert (ph && PH_ON_USED_LIST_P (ph)); | 769 assert (ph && PH_ON_USED_LIST_P (ph)); |
774 if (ph) | 770 if (ph) |
775 { | 771 { |
776 #ifdef NEW_GC | |
777 if (value == BLACK) | 772 if (value == BLACK) |
778 if (!PH_BLACK_BIT (ph)) | 773 if (!PH_BLACK_BIT (ph)) |
779 PH_BLACK_BIT (ph) = 1; | 774 PH_BLACK_BIT (ph) = 1; |
780 #endif /* NEW_GC */ | |
781 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); | 775 SET_BIT (ph, get_mark_bit_index (ptr, ph), value); |
782 } | 776 } |
783 } | 777 } |
784 | 778 |
785 | 779 |
1269 static void | 1263 static void |
1270 remove_page_from_used_list (page_header *ph) | 1264 remove_page_from_used_list (page_header *ph) |
1271 { | 1265 { |
1272 page_list_header *plh = PH_PLH (ph); | 1266 page_list_header *plh = PH_PLH (ph); |
1273 | 1267 |
1274 #ifdef NEW_GC | |
1275 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT(); | 1268 if (gc_in_progress && PH_PROTECTION_BIT (ph)) ABORT(); |
1276 /* cleanup: remove memory protection, zero page_header bits. */ | 1269 /* cleanup: remove memory protection, zero page_header bits. */ |
1277 #endif /* not NEW_GC */ | |
1278 | 1270 |
1279 #ifdef MEMORY_USAGE_STATS | 1271 #ifdef MEMORY_USAGE_STATS |
1280 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); | 1272 PLH_TOTAL_CELLS (plh) -= PH_CELLS_ON_PAGE (ph); |
1281 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); | 1273 PLH_TOTAL_SPACE (plh) -= PAGE_SIZE * PH_N_PAGES (ph); |
1282 #endif | 1274 #endif |
1480 mark_free_list (page_header *ph) | 1472 mark_free_list (page_header *ph) |
1481 { | 1473 { |
1482 free_link *fl = PH_FREE_LIST (ph); | 1474 free_link *fl = PH_FREE_LIST (ph); |
1483 while (fl) | 1475 while (fl) |
1484 { | 1476 { |
1485 #ifdef NEW_GC | |
1486 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); | 1477 SET_BIT (ph, get_mark_bit_index (fl, ph), BLACK); |
1487 #else /* not NEW_GC */ | |
1488 SET_BIT (ph, get_mark_bit_index (fl, ph), 1); | |
1489 #endif /* not NEW_GC */ | |
1490 fl = NEXT_FREE (fl); | 1478 fl = NEXT_FREE (fl); |
1491 } | |
1492 } | |
1493 | |
1494 /* Finalize a page. You have to tell mc-alloc how to call your | |
1495 object's finalizer. Therefore, you have to define the macro | |
1496 MC_ALLOC_CALL_FINALIZER(ptr). This macro should do nothing else | |
1497 then test if there is a finalizer and call it on the given | |
1498 argument, which is the heap address of the object. */ | |
1499 static void | |
1500 finalize_page (page_header *ph) | |
1501 { | |
1502 EMACS_INT heap_space = (EMACS_INT) PH_HEAP_SPACE (ph); | |
1503 EMACS_INT heap_space_step = PH_CELL_SIZE (ph); | |
1504 EMACS_INT mark_bit = 0; | |
1505 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | |
1506 unsigned int bit = 0; | |
1507 | |
1508 mark_free_list (ph); | |
1509 | |
1510 #ifdef NEW_GC | |
1511 /* ARRAY_BIT_HACK */ | |
1512 if (PH_ARRAY_BIT (ph)) | |
1513 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1514 { | |
1515 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1516 if (bit) | |
1517 { | |
1518 return; | |
1519 } | |
1520 } | |
1521 #endif /* NEW_GC */ | |
1522 | |
1523 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | |
1524 { | |
1525 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | |
1526 #ifdef NEW_GC | |
1527 if (bit == WHITE) | |
1528 #else /* not NEW_GC */ | |
1529 if (bit == 0) | |
1530 #endif /* not NEW_GC */ | |
1531 { | |
1532 EMACS_INT ptr = (heap_space + (heap_space_step * mark_bit)); | |
1533 MC_ALLOC_CALL_FINALIZER ((void *) ptr); | |
1534 } | |
1535 } | 1479 } |
1536 } | 1480 } |
1537 | 1481 |
1538 | 1482 |
1539 /* Finalize a page for disksave. XEmacs calls this routine before it | 1483 /* Finalize a page for disksave. XEmacs calls this routine before it |
1556 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); | 1500 MC_ALLOC_CALL_FINALIZER_FOR_DISKSAVE ((void *) ptr); |
1557 } | 1501 } |
1558 } | 1502 } |
1559 | 1503 |
1560 | 1504 |
1561 /* Finalizes the heap. */ | |
1562 void | |
1563 mc_finalize (void) | |
1564 { | |
1565 visit_all_used_page_headers (finalize_page); | |
1566 } | |
1567 | |
1568 | |
1569 /* Finalizes the heap for disksave. */ | 1505 /* Finalizes the heap for disksave. */ |
1570 void | 1506 void |
1571 mc_finalize_for_disksave (void) | 1507 mc_finalize_for_disksave (void) |
1572 { | 1508 { |
1573 visit_all_used_page_headers (finalize_page_for_disksave); | 1509 visit_all_used_page_headers (finalize_page_for_disksave); |
1586 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); | 1522 EMACS_INT mark_bit_max_index = PH_CELLS_ON_PAGE (ph); |
1587 unsigned int bit = 0; | 1523 unsigned int bit = 0; |
1588 | 1524 |
1589 mark_free_list (ph); | 1525 mark_free_list (ph); |
1590 | 1526 |
1591 #ifdef NEW_GC | |
1592 /* ARRAY_BIT_HACK */ | 1527 /* ARRAY_BIT_HACK */ |
1593 if (PH_ARRAY_BIT (ph)) | 1528 if (PH_ARRAY_BIT (ph)) |
1594 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | 1529 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1595 { | 1530 { |
1596 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | 1531 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1599 zero_mark_bits (ph); | 1534 zero_mark_bits (ph); |
1600 PH_BLACK_BIT (ph) = 0; | 1535 PH_BLACK_BIT (ph) = 0; |
1601 return; | 1536 return; |
1602 } | 1537 } |
1603 } | 1538 } |
1604 #endif /* NEW_GC */ | |
1605 | 1539 |
1606 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) | 1540 for (mark_bit = 0; mark_bit < mark_bit_max_index; mark_bit++) |
1607 { | 1541 { |
1608 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); | 1542 GET_BIT (bit, ph, mark_bit * N_MARK_BITS); |
1609 #ifdef NEW_GC | |
1610 if (bit == WHITE) | 1543 if (bit == WHITE) |
1611 #else /* not NEW_GC */ | |
1612 if (bit == 0) | |
1613 #endif /* not NEW_GC */ | |
1614 { | 1544 { |
1615 #ifdef NEW_GC | |
1616 GC_STAT_FREED; | 1545 GC_STAT_FREED; |
1617 #endif /* NEW_GC */ | |
1618 remove_cell (heap_space + (heap_space_step * mark_bit), ph); | 1546 remove_cell (heap_space + (heap_space_step * mark_bit), ph); |
1619 } | 1547 } |
1620 } | 1548 } |
1621 zero_mark_bits (ph); | 1549 zero_mark_bits (ph); |
1622 PH_BLACK_BIT (ph) = 0; | 1550 PH_BLACK_BIT (ph) = 0; |
1635 } | 1563 } |
1636 | 1564 |
1637 | 1565 |
1638 /* Frees the cell pointed to by ptr. */ | 1566 /* Frees the cell pointed to by ptr. */ |
1639 void | 1567 void |
1640 mc_free (void *ptr) | 1568 mc_free (void *UNUSED (ptr)) |
1641 { | 1569 { |
1642 page_header *ph; | 1570 /* Manual frees are not allowed with asynchronous finalization */ |
1643 | 1571 return; |
1644 #ifdef NEW_GC | |
1645 /* Do not allow manual freeing while a gc is running. Data is going | |
1646 to be freed next gc cycle. */ | |
1647 if (write_barrier_enabled || gc_in_progress) | |
1648 return; | |
1649 #endif /* NEW_GC */ | |
1650 | |
1651 ph = get_page_header (ptr); | |
1652 assert (ph); | |
1653 assert (PH_PLH (ph)); | |
1654 assert (PLH_LIST_TYPE (PH_PLH (ph)) != FREE_LIST); | |
1655 | |
1656 #ifdef NEW_GC | |
1657 if (PH_ON_USED_LIST_P (ph)) | |
1658 SET_BIT (ph, get_mark_bit_index (ptr, ph), WHITE); | |
1659 #endif /* NEW_GC */ | |
1660 remove_cell (ptr, ph); | |
1661 | |
1662 if (PH_CELLS_USED (ph) == 0) | |
1663 remove_page_from_used_list (ph); | |
1664 else if (PH_CELLS_USED (ph) < PH_CELLS_ON_PAGE (ph)) | |
1665 move_page_header_to_front (ph); | |
1666 } | 1572 } |
1667 | 1573 |
1668 | 1574 |
1669 /* Changes the size of the cell pointed to by ptr. | 1575 /* Changes the size of the cell pointed to by ptr. |
1670 Returns the new address of the new cell with new size. */ | 1576 Returns the new address of the new cell with new size. */ |
1869 DEFSUBR (Fmc_alloc_memory_usage); | 1775 DEFSUBR (Fmc_alloc_memory_usage); |
1870 #endif /* MEMORY_USAGE_STATS */ | 1776 #endif /* MEMORY_USAGE_STATS */ |
1871 } | 1777 } |
1872 | 1778 |
1873 | 1779 |
1874 #ifdef NEW_GC | |
1875 /*--- incremental garbage collector ----------------------------------*/ | 1780 /*--- incremental garbage collector ----------------------------------*/ |
1876 | 1781 |
1877 /* access dirty bit of page header */ | 1782 /* access dirty bit of page header */ |
1878 void | 1783 void |
1879 set_dirty_bit (page_header *ph, unsigned int value) | 1784 set_dirty_bit (page_header *ph, unsigned int value) |
2060 object_on_heap_p (void *ptr) | 1965 object_on_heap_p (void *ptr) |
2061 { | 1966 { |
2062 page_header *ph = get_page_header_internal (ptr); | 1967 page_header *ph = get_page_header_internal (ptr); |
2063 return (ph && PH_ON_USED_LIST_P (ph)); | 1968 return (ph && PH_ON_USED_LIST_P (ph)); |
2064 } | 1969 } |
2065 | |
2066 #endif /* NEW_GC */ |