Back to index

plt-scheme  4.2.1
Defines | Enumerations | Functions | Variables
newgc.c File Reference
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "platforms.h"
#include "gc2.h"
#include "gc2_dump.h"
#include "newgc.h"
#include "msgprint.c"
#include "my_qsort.c"
#include "page_range.c"
#include "vm.c"
#include "protect_range.c"
#include "stack_comp.c"
#include "var_stack.c"
#include "roots.c"
#include "immobile_boxes.c"
#include "fnls.c"
#include "weak.c"
#include "sighand.c"
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Defines

#define MZ_PRECISE_GC   1 /* required for mz includes to work right */
#define NUMBER_OF_TAGS   512
#define PAGEMAP32_SIZE   (1 << (32 - LOG_APAGE_SIZE))
#define PAGEMAP32_BITS(x)   (NUM(x) >> LOG_APAGE_SIZE)
#define GC_ASSERT(x)   /* empty */
#define GCTYPE   NewGC
#define GC_get_GC()   (GC)
#define GC_set_GC(gc)   (GC = gc)
#define GEN0_INITIAL_SIZE   (1 * 1024 * 1024)
#define GEN0_SIZE_FACTOR   0.5
#define GEN0_SIZE_ADDITION   (512 * 1024)
#define GEN0_MAX_SIZE   (32 * 1024 * 1024)
#define GEN0_PAGE_SIZE   (1 * 1024 * 1024)
#define LOG_WORD_SIZE   2
#define STACK_PART_SIZE   (1 * 1024 * 1024)
#define PTR(x)   ((void*)(x))
#define PPTR(x)   ((void**)(x))
#define NUM(x)   ((unsigned long)(x))
#define WORD_SIZE   (1 << LOG_WORD_SIZE)
#define WORD_BITS   (8 * WORD_SIZE)
#define APAGE_SIZE   (1 << LOG_APAGE_SIZE)
#define GENERATIONS   1
#define PREFIX_WSIZE   1
#define PREFIX_SIZE   (PREFIX_WSIZE * WORD_SIZE)
#define MED_OBJHEAD(p, bytesize)
#define MAX_OBJECT_SIZEW   (gcBYTES_TO_WORDS(APAGE_SIZE) - PREFIX_WSIZE - 3)
#define MAX_OBJECT_SIZE   (gcWORDS_TO_BYTES(MAX_OBJECT_SIZEW))
#define ASSERT_TAG(tag)   GC_ASSERT((tag) >= 0 && (tag) <= NUMBER_OF_TAGS)
#define ASSERT_VALID_OBJPTR(objptr)   GC_ASSERT(!((long)(objptr) & (0x3)))
#define ALIGN_SIZE(sizew)   (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
#define ALIGN_BYTES_SIZE(sizeb)   (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))
#define INSET_WORDS   1
#define COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(s)   (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE))
#define COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(s)   (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE + PREFIX_SIZE))
#define BIG_PAGE_TO_OBJECT(big_page)   ((void *) (((char *)((big_page)->addr)) + OBJHEAD_SIZE + PREFIX_SIZE))
#define BIG_PAGE_TO_OBJHEAD(big_page)   ((objhead*) (((char *)((big_page)->addr)) + PREFIX_SIZE))
#define PAGE_TO_OBJHEAD(page)   ((objhead*) (((char *)((page)->addr)) + PREFIX_SIZE))
#define PAGE_START_VSS(page)   ((void**) (((char *)((page)->addr)) + PREFIX_SIZE))
#define PAGE_END_VSS(page)   ((void**) (((char *)((page)->addr)) + ((page)->size)))
#define MED_OBJHEAD_TO_OBJECT(ptr, page_size)   ((void*) (((char *)MED_OBJHEAD((ptr), (page_size))) + OBJHEAD_SIZE));
#define OVERFLOWS_GEN0(ptr)   ((ptr) > GC_gen0_alloc_page_end)
#define BYTES_MULTIPLE_OF_WORD_TO_WORDS(sizeb)   ((sizeb) >> gcLOG_WORD_SIZE)
#define PAIR_SIZE_IN_BYTES   ALIGN_BYTES_SIZE(sizeof(Scheme_Simple_Object) + OBJHEAD_SIZE)
#define INIT_DEBUG_FILE()   /* */
#define CLOSE_DEBUG_FILE()   /* */
#define DUMP_HEAP()   /* */
#define GCDEBUG(args)   /* */
#define GCWARN(args)   { GCPRINT args; GCFLUSHOUT(); }
#define GCERR(args)   { GCPRINT args; GCFLUSHOUT(); abort(); }
#define backtrace_new_page(gc, page)   /* */
#define backtrace_new_page_if_needed(gc, page)   /* */
#define free_backtrace(page)   /* */
#define set_backtrace_source(ptr, type)   /* */
#define record_backtrace(page, ptr)   /* */
#define copy_backtrace_source(to_page, to_ptr, from_page, from_ptr)   /* */
#define two_arg_no_op(a, b)   /* */
#define GC_X_variable_stack   GC_mark_variable_stack
#define gcX(a)   gcMARK(*a)
#define X_source(stk, p)   set_backtrace_source((stk ? stk : p), BT_STACK)
#define GC_X_variable_stack   GC_fixup_variable_stack
#define gcX(a)   gcFIXUP(*a)
#define X_source(stk, p)   /* */
#define traverse_roots(gcMUCK, set_bt_src)
#define is_marked(gc, p)   marked(gc, p)
#define weak_box_resolve(p)   GC_resolve(p)
#define MARK_STACK_START(ms)   ((void **)(void *)&ms[1])
#define MARK_STACK_END(ms)   ((void **)((char *)ms + STACK_PART_SIZE))
#define clean_up_thread_list()   /* */
#define reset_object_traces()   /* */
#define register_traced_object(p)   /* */
#define print_traced_objects(x, y, q, z)   /* */
#define MAX_DUMP_TAG   256
#define should_compact_page(lsize, tsize)   (lsize < (tsize - PREFIX_SIZE - (APAGE_SIZE >> 2)))
#define TIME_DECLS()
#define TIME_INIT()
#define TIME_STEP(task)
#define TIME_DONE()

Enumerations

enum  {
  PAGE_TAGGED = 0, PAGE_ATOMIC = 1, PAGE_ARRAY = 2, PAGE_TARRAY = 3,
  PAGE_XTAGGED = 4, PAGE_BIG = 5, PAGE_TYPES = 6
}

Functions

GC_collect_start_callback_Proc GC_set_collect_start_callback (GC_collect_start_callback_Proc func)
GC_collect_end_callback_Proc GC_set_collect_end_callback (GC_collect_end_callback_Proc func)
void GC_set_collect_inform_callback (void(*func)(int major_gc, long pre_used, long post_used))
static void garbage_collect (NewGC *, int)
static void out_of_memory ()
static voidofm_malloc (size_t size)
static voidofm_malloc_zero (size_t size)
static void check_used_against_max (NewGC *gc, size_t len)
static voidmalloc_pages (NewGC *gc, size_t len, size_t alignment)
static voidmalloc_dirty_pages (NewGC *gc, size_t len, size_t alignment)
static void free_pages (NewGC *gc, void *p, size_t len)
int GC_mtrace_new_id (void *f)
int GC_mtrace_union_current_with (int newval)
static void free_page_maps (PageMap page_maps1)
static void pagemap_set (PageMap page_maps1, void *p, mpage *value)
static mpagepagemap_find_page (PageMap page_maps1, void *p)
static void pagemap_modify_with_size (PageMap pagemap, mpage *page, long size, mpage *val)
static void pagemap_modify (PageMap pagemap, mpage *page, mpage *val)
static void pagemap_add (PageMap pagemap, mpage *page)
static void pagemap_add_with_size (PageMap pagemap, mpage *page, long size)
static void pagemap_remove (PageMap pagemap, mpage *page)
static void pagemap_remove_with_size (PageMap pagemap, mpage *page, long size)
int GC_is_allocated (void *p)
static size_t round_to_apage_size (size_t sizeb)
static mpagemalloc_mpage ()
static void free_mpage (mpage *page)
static voidallocate_big (const size_t request_size_bytes, int type)
static voidallocate_medium (size_t sizeb, int type)
static mpagegen0_create_new_mpage (NewGC *gc)
static void gen0_free_mpage (NewGC *gc, mpage *page)
static size_t gen0_size_in_use (NewGC *gc)
static voidallocate (const size_t request_size, const int type)
static voidfast_malloc_one_small_tagged (size_t request_size, int dirty)
voidGC_malloc_pair (void *car, void *cdr)
voidGC_malloc (size_t s)
voidGC_malloc_one_tagged (size_t s)
voidGC_malloc_one_xtagged (size_t s)
voidGC_malloc_array_tagged (size_t s)
voidGC_malloc_atomic (size_t s)
voidGC_malloc_atomic_uncollectable (size_t s)
voidGC_malloc_allow_interior (size_t s)
voidGC_malloc_atomic_allow_interior (size_t s)
voidGC_malloc_tagged_allow_interior (size_t s)
voidGC_malloc_one_small_dirty_tagged (size_t s)
voidGC_malloc_one_small_tagged (size_t s)
void GC_free (void *p)
long GC_compute_alloc_size (long sizeb)
long GC_initial_word (int request_size)
void GC_initial_words (char *buffer, long sizeb)
long GC_alloc_alignment ()
long GC_malloc_stays_put_threshold ()
static void resize_gen0 (NewGC *gc, unsigned long new_size)
static void reset_nursery (NewGC *gc)
static int marked (NewGC *gc, void *p)
void ** GC_get_variable_stack ()
void GC_set_variable_stack (void **p)
void GC_set_stack_base (void *base)
unsigned long GC_get_stack_base ()
void GC_set_get_thread_stack_base (unsigned long(*func)(void))
static voidget_stack_base (NewGC *gc)
static void mark_roots (NewGC *gc)
static void repair_roots (NewGC *gc)
static int is_finalizable_page (NewGC *gc, void *p)
static void mark_finalizer_structs (NewGC *gc)
static void repair_finalizer_structs (NewGC *gc)
static void check_finalizers (NewGC *gc, int level)
static void do_ordered_level3 (NewGC *gc)
void GC_finalization_weak_ptr (void **p, int offset)
static void mark_weak_finalizer_structs (NewGC *gc)
static void repair_weak_finalizer_structs (NewGC *gc)
static void zero_weak_finalizers (NewGC *gc)
static void reset_weak_finalizers (NewGC *gc)
static MarkSegmentmark_stack_create_frame ()
static void mark_stack_initialize ()
static void push_ptr (void *ptr)
static int pop_ptr (void **ptr)
static void clear_stack_pages (void)
static void reset_pointer_stack (void)
void GC_register_root_custodian (void *c)
int GC_set_account_hook (int type, void *c1, unsigned long b, void *c2)
void GC_register_thread (void *t, void *c)
void GC_register_new_thread (void *t, void *c)
int GC_merely_accounting ()
static int designate_modified_gc (NewGC *gc, void *p)
static int designate_modified (void *p)
void GC_write_barrier (void *p)
static void NewGC_initialize (NewGC *newgc, NewGC *parentgc)
static NewGCinit_type_tags_worker (NewGC *parentgc, int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
void GC_init_type_tags (int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
void GC_gcollect (void)
void GC_register_traversers (short tag, Size_Proc size, Mark_Proc mark, Fixup_Proc fixup, int constant_Size, int atomic)
long GC_get_memory_use (void *o)
void GC_mark (const void *const_p)
static void propagate_marks (NewGC *gc)
voidGC_resolve (void *p)
voidGC_fixup_self (void *p)
void GC_fixup (void *pp)
void GC_dump_with_traces (int flags, GC_get_type_name_proc get_type_name, GC_get_xtagged_name_proc get_xtagged_name, GC_for_each_found_proc for_each_found, short trace_for_tag, GC_print_tagged_value_proc print_tagged_value, int path_length_limit)
void GC_dump (void)
static void reset_gen1_page (NewGC *gc, mpage *work)
static void reset_gen1_pages_live_and_previous_sizes (NewGC *gc)
static void remove_gen1_page_from_pagemap (NewGC *gc, mpage *work)
static void remove_all_gen1_pages_from_pagemap (NewGC *gc)
static void mark_backpointers (NewGC *gc)
mpageallocate_compact_target (NewGC *gc, mpage *work)
static void do_heap_compact (NewGC *gc)
static void repair_heap (NewGC *gc)
static void gen1_free_mpage (PageMap pagemap, mpage *page)
static void cleanup_vacated_pages (NewGC *gc)
static void gen0_free_big_pages (NewGC *gc)
static void clean_up_heap (NewGC *gc)
static void protect_old_pages (NewGC *gc)
void GC_free_all (void)

Variables

static const char * type_name [PAGE_TYPES]
static THREAD_LOCAL NewGCGC
void(* GC_out_of_memory )(void)
void(* GC_report_out_of_memory )(void)
void(* GC_mark_xtagged )(void *obj)
void(* GC_fixup_xtagged )(void *obj)
THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr = 0
THREAD_LOCAL unsigned long GC_gen0_alloc_page_end = 0
static const char * zero_sized [4]
THREAD_LOCAL void ** GC_variable_stack
static THREAD_LOCAL MarkSegmentmark_stack = NULL

Define Documentation

#define ALIGN_BYTES_SIZE (   sizeb)    (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))

Definition at line 501 of file newgc.c.

#define ALIGN_SIZE (   sizew)    (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))

Definition at line 500 of file newgc.c.

#define APAGE_SIZE   (1 << LOG_APAGE_SIZE)

Definition at line 149 of file newgc.c.

#define ASSERT_TAG (   tag)    GC_ASSERT((tag) >= 0 && (tag) <= NUMBER_OF_TAGS)

Definition at line 437 of file newgc.c.

#define ASSERT_VALID_OBJPTR (   objptr)    GC_ASSERT(!((long)(objptr) & (0x3)))

Definition at line 438 of file newgc.c.

#define backtrace_new_page (   gc,
  page 
)    /* */

Definition at line 1170 of file newgc.c.

#define backtrace_new_page_if_needed (   gc,
  page 
)    /* */

Definition at line 1171 of file newgc.c.

#define BIG_PAGE_TO_OBJECT (   big_page)    ((void *) (((char *)((big_page)->addr)) + OBJHEAD_SIZE + PREFIX_SIZE))

Definition at line 513 of file newgc.c.

#define BIG_PAGE_TO_OBJHEAD (   big_page)    ((objhead*) (((char *)((big_page)->addr)) + PREFIX_SIZE))

Definition at line 514 of file newgc.c.

#define BYTES_MULTIPLE_OF_WORD_TO_WORDS (   sizeb)    ((sizeb) >> gcLOG_WORD_SIZE)

Definition at line 693 of file newgc.c.

#define clean_up_thread_list ( )    /* */

Definition at line 1553 of file newgc.c.

#define CLOSE_DEBUG_FILE ( )    /* */

Definition at line 1085 of file newgc.c.

#define COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE (   s)    (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE + PREFIX_SIZE))

Definition at line 512 of file newgc.c.

#define COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE (   s)    (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE))

Definition at line 511 of file newgc.c.

#define copy_backtrace_source (   to_page,
  to_ptr,
  from_page,
  from_ptr 
)    /* */

Definition at line 1175 of file newgc.c.

#define DUMP_HEAP ( )    /* */

Definition at line 1086 of file newgc.c.

#define free_backtrace (   page)    /* */

Definition at line 1172 of file newgc.c.

#define GC_ASSERT (   x)    /* empty */

Definition at line 58 of file newgc.c.

#define GC_get_GC ( )    (GC)

Definition at line 91 of file newgc.c.

#define GC_set_GC (   gc)    (GC = gc)

Definition at line 92 of file newgc.c.

Definition at line 1230 of file newgc.c.

Definition at line 1230 of file newgc.c.

#define GCDEBUG (   args)    /* */

Definition at line 1087 of file newgc.c.

#define GCERR (   args)    { GCPRINT args; GCFLUSHOUT(); abort(); }

Definition at line 1091 of file newgc.c.

#define GCTYPE   NewGC

Definition at line 90 of file newgc.c.

#define GCWARN (   args)    { GCPRINT args; GCFLUSHOUT(); }

Definition at line 1090 of file newgc.c.

#define gcX (   a)    gcMARK(*a)

Definition at line 1231 of file newgc.c.

#define gcX (   a)    gcFIXUP(*a)

Definition at line 1231 of file newgc.c.

#define GEN0_INITIAL_SIZE   (1 * 1024 * 1024)

Definition at line 123 of file newgc.c.

#define GEN0_MAX_SIZE   (32 * 1024 * 1024)

Definition at line 126 of file newgc.c.

#define GEN0_PAGE_SIZE   (1 * 1024 * 1024)

Definition at line 127 of file newgc.c.

#define GEN0_SIZE_ADDITION   (512 * 1024)

Definition at line 125 of file newgc.c.

#define GEN0_SIZE_FACTOR   0.5

Definition at line 124 of file newgc.c.

#define GENERATIONS   1

Definition at line 150 of file newgc.c.

#define INIT_DEBUG_FILE ( )    /* */

Definition at line 1084 of file newgc.c.

#define INSET_WORDS   1

Definition at line 502 of file newgc.c.

#define is_marked (   gc,
  p 
)    marked(gc, p)

Definition at line 1441 of file newgc.c.

#define LOG_WORD_SIZE   2

Definition at line 133 of file newgc.c.

#define MARK_STACK_END (   ms)    ((void **)((char *)ms + STACK_PART_SIZE))

Definition at line 1456 of file newgc.c.

#define MARK_STACK_START (   ms)    ((void **)(void *)&ms[1])

Definition at line 1455 of file newgc.c.

#define MAX_DUMP_TAG   256

Definition at line 2253 of file newgc.c.

Definition at line 435 of file newgc.c.

Definition at line 434 of file newgc.c.

#define MED_OBJHEAD (   p,
  bytesize 
)
Value:
((objhead *)(PTR(((((NUM(p) & (APAGE_SIZE - 1)) - PREFIX_SIZE) / bytesize) * bytesize) \
                                                         + (NUM(p) & (~(APAGE_SIZE - 1))) + PREFIX_SIZE)))

Definition at line 428 of file newgc.c.

#define MED_OBJHEAD_TO_OBJECT (   ptr,
  page_size 
)    ((void*) (((char *)MED_OBJHEAD((ptr), (page_size))) + OBJHEAD_SIZE));

Definition at line 518 of file newgc.c.

#define MZ_PRECISE_GC   1 /* required for mz includes to work right */

Definition at line 30 of file newgc.c.

#define NUM (   x)    ((unsigned long)(x))

Definition at line 146 of file newgc.c.

#define NUMBER_OF_TAGS   512

Definition at line 41 of file newgc.c.

#define OVERFLOWS_GEN0 (   ptr)    ((ptr) > GC_gen0_alloc_page_end)

Definition at line 687 of file newgc.c.

#define PAGE_END_VSS (   page)    ((void**) (((char *)((page)->addr)) + ((page)->size)))

Definition at line 517 of file newgc.c.

#define PAGE_START_VSS (   page)    ((void**) (((char *)((page)->addr)) + PREFIX_SIZE))

Definition at line 516 of file newgc.c.

#define PAGE_TO_OBJHEAD (   page)    ((objhead*) (((char *)((page)->addr)) + PREFIX_SIZE))

Definition at line 515 of file newgc.c.

#define PAGEMAP32_BITS (   x)    (NUM(x) >> LOG_APAGE_SIZE)

Definition at line 52 of file newgc.c.

#define PAGEMAP32_SIZE   (1 << (32 - LOG_APAGE_SIZE))

Definition at line 51 of file newgc.c.

#define PAIR_SIZE_IN_BYTES   ALIGN_BYTES_SIZE(sizeof(Scheme_Simple_Object) + OBJHEAD_SIZE)

Definition at line 803 of file newgc.c.

#define PPTR (   x)    ((void**)(x))

Definition at line 145 of file newgc.c.

Definition at line 426 of file newgc.c.

#define PREFIX_WSIZE   1

Definition at line 421 of file newgc.c.

#define print_traced_objects (   x,
  y,
  q,
 
)    /* */

Definition at line 2250 of file newgc.c.

#define PTR (   x)    ((void*)(x))

Definition at line 144 of file newgc.c.

#define record_backtrace (   page,
  ptr 
)    /* */

Definition at line 1174 of file newgc.c.

#define register_traced_object (   p)    /* */

Definition at line 2249 of file newgc.c.

#define reset_object_traces ( )    /* */

Definition at line 2248 of file newgc.c.

#define set_backtrace_source (   ptr,
  type 
)    /* */

Definition at line 1173 of file newgc.c.

#define should_compact_page (   lsize,
  tsize 
)    (lsize < (tsize - PREFIX_SIZE - (APAGE_SIZE >> 2)))

Definition at line 2623 of file newgc.c.

#define STACK_PART_SIZE   (1 * 1024 * 1024)

Definition at line 138 of file newgc.c.

#define TIME_DECLS ( )

Definition at line 3040 of file newgc.c.

#define TIME_DONE ( )

Definition at line 3043 of file newgc.c.

#define TIME_INIT ( )

Definition at line 3041 of file newgc.c.

#define TIME_STEP (   task)

Definition at line 3042 of file newgc.c.

#define traverse_roots (   gcMUCK,
  set_bt_src 
)
Value:
{    \
    unsigned long j;                            \
    Roots *roots = &gc->roots;                  \
    if(roots->roots) {                          \
      sort_and_merge_roots(roots);              \
      for(j = 0; j < roots->count; j += 2) {    \
        void **start = (void**)roots->roots[j]; \
        void **end = (void**)roots->roots[j+1]; \
        while(start < end) {                    \
          set_bt_src(start, BT_ROOT);           \
          gcMUCK(*start++);                     \
        }                                       \
      }                                         \
    }                                           \
  }

Definition at line 1244 of file newgc.c.

#define two_arg_no_op (   a,
  b 
)    /* */

Definition at line 1178 of file newgc.c.

#define weak_box_resolve (   p)    GC_resolve(p)

Definition at line 1442 of file newgc.c.

#define WORD_BITS   (8 * WORD_SIZE)

Definition at line 148 of file newgc.c.

#define WORD_SIZE   (1 << LOG_WORD_SIZE)

Definition at line 147 of file newgc.c.

#define X_source (   stk,
  p 
)    set_backtrace_source((stk ? stk : p), BT_STACK)

Definition at line 1232 of file newgc.c.

#define X_source (   stk,
  p 
)    /* */

Definition at line 1232 of file newgc.c.


Enumeration Type Documentation

anonymous enum
Enumerator:
PAGE_TAGGED 
PAGE_ATOMIC 
PAGE_ARRAY 
PAGE_TARRAY 
PAGE_XTAGGED 
PAGE_BIG 
PAGE_TYPES 

Definition at line 62 of file newgc.c.

     {
  PAGE_TAGGED   = 0,
  PAGE_ATOMIC   = 1,
  PAGE_ARRAY    = 2,
  PAGE_TARRAY   = 3,
  PAGE_XTAGGED  = 4,
  PAGE_BIG      = 5,
  /* the number of page types. */
  PAGE_TYPES    = 6,
};

Function Documentation

static void* allocate ( const size_t  request_size,
const int  type 
) [inline, static]

Definition at line 695 of file newgc.c.

{
  size_t allocate_size;
  unsigned long newptr;

  if(request_size == 0) return zero_sized;
  
  allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
  if(allocate_size > MAX_OBJECT_SIZE)  return allocate_big(request_size, type);

  /* ensure that allocation will fit in a gen0 page */
  newptr = GC_gen0_alloc_page_ptr + allocate_size;
  ASSERT_VALID_OBJPTR(newptr);

  while (OVERFLOWS_GEN0(newptr)) {
    NewGC *gc = GC_get_GC();
    /* bring page size used up to date */
    gc->gen0.curr_alloc_page->size = GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr);
    gc->gen0.current_size += gc->gen0.curr_alloc_page->size;

    /* try next nursery page if present */
    if(gc->gen0.curr_alloc_page->next) { 
      gc->gen0.curr_alloc_page  = gc->gen0.curr_alloc_page->next;
      GC_gen0_alloc_page_ptr    = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
      ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
      GC_gen0_alloc_page_end    = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;
    }
    /* WARNING: tries to avoid a collection but
     * malloc_pages can cause a collection due to check_used_against_max */
    else if (gc->dumping_avoid_collection) {
      mpage *new_mpage = gen0_create_new_mpage(gc);

      /* push page */
      new_mpage->next = gc->gen0.curr_alloc_page;
      new_mpage->next->prev = new_mpage;

      gc->gen0.curr_alloc_page  = new_mpage;
      GC_gen0_alloc_page_ptr    = NUM(new_mpage->addr);
      ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
      GC_gen0_alloc_page_end    = NUM(new_mpage->addr) + GEN0_PAGE_SIZE;
    }
    else {
      garbage_collect(gc, 0);
    }
    newptr = GC_gen0_alloc_page_ptr + allocate_size;
    ASSERT_VALID_OBJPTR(newptr);
  } 

  /* actual Allocation */
  {
    objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);

    GC_gen0_alloc_page_ptr = newptr;

    if (type == PAGE_ATOMIC)
      memset(info, 0, sizeof(objhead)); /* init objhead */
    else
      bzero(info, allocate_size);

#ifdef MZ_USE_PLACES
    memcpy(info, &GC_objhead_template, sizeof(objhead));
#endif

    info->type = type;
    info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
    {
      void * objptr = OBJHEAD_TO_OBJPTR(info);
      ASSERT_VALID_OBJPTR(objptr);
      return objptr;
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* allocate_big ( const size_t  request_size_bytes,
int  type 
) [static]

Definition at line 521 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  mpage *bpage;
  size_t allocate_size;

#ifdef NEWGC_BTC_ACCOUNT
  if(GC_out_of_memory) {
    if (BTC_single_allocation_limit(gc, request_size_bytes)) {
      /* We're allowed to fail. Check for allocations that exceed a single-time
         limit. Otherwise, the limit doesn't work as intended, because
         a program can allocate a large block that nearly exhausts memory,
         and then a subsequent allocation can fail. As long as the limit
         is much smaller than the actual available memory, and as long as
         GC_out_of_memory protects any user-requested allocation whose size
         is independent of any existing object, then we can enforce the limit. */
      GC_out_of_memory();
    }
  }
#endif

  /* the actual size of this is the size, ceilinged to the next largest word,
     plus one word for the object header.
     This last serves many purposes, including making sure the object is 
     aligned for Sparcs. */
  allocate_size = COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(request_size_bytes);

  if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
    if (!gc->dumping_avoid_collection)
      garbage_collect(gc, 0);
  }
  gc->gen0.current_size += allocate_size;

  /* We not only need APAGE_SIZE alignment, we 
     need everything consisently mapped within an APAGE_SIZE
     segment. So round up. */
  bpage = malloc_mpage();
  if (type == PAGE_ATOMIC)
    bpage->addr = malloc_dirty_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
  else
    bpage->addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
  bpage->size = allocate_size;
  bpage->size_class = 2;
  bpage->page_type = type;

#ifdef MZ_USE_PLACES
    memcpy(BIG_PAGE_TO_OBJHEAD(bpage), &GC_objhead_template, sizeof(objhead));
#endif

  /* push new bpage onto GC->gen0.big_pages */
  bpage->next = gc->gen0.big_pages;
  if(bpage->next) bpage->next->prev = bpage;
  gc->gen0.big_pages = bpage;
  pagemap_add(gc->page_maps, bpage);

  {
    void * objptr = BIG_PAGE_TO_OBJECT(bpage);
    ASSERT_VALID_OBJPTR(objptr);
    return objptr;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

mpage* allocate_compact_target ( NewGC gc,
mpage work 
)

Definition at line 2599 of file newgc.c.

{
  mpage *npage;

  npage = malloc_mpage();
  npage->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE);
  npage->previous_size = npage->size = PREFIX_SIZE;
  npage->generation = 1;
  npage->back_pointers = 0;
  npage->size_class = 0;
  npage->page_type = work->page_type;
  npage->marked_on = 1;
  backtrace_new_page(gc, npage);
  /* Link in this new replacement page */
  npage->prev = work;
  npage->next = work->next;
  work->next = npage;
  if (npage->next)
    npage->next->prev = npage;

  return npage;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* allocate_medium ( size_t  sizeb,
int  type 
) [static]

Definition at line 583 of file newgc.c.

{
  NewGC *gc;
  int sz = 8, pos = 0, n;
  void *addr, *p;
  mpage *page;
  objhead *info;

  if (sizeb > (1 << (LOG_APAGE_SIZE - 1)))
    return allocate_big(sizeb, type);
 
  while (sz < sizeb) {
    sz <<= 1;
    pos++;
  }

  sz += WORD_SIZE; /* add trailing word, in case pointer is to end */
  sz += OBJHEAD_SIZE; /* room for objhead */
  sz = ALIGN_BYTES_SIZE(sz);

  gc = GC_get_GC();
  while (1) {
    page = gc->med_freelist_pages[pos];
    if (page) {
      n = page->previous_size;
      while (n <= (APAGE_SIZE - sz)) {
        info = (objhead *)PTR(NUM(page->addr) + n);
        if (info->dead) {
#ifdef MZ_USE_PLACES
          info->owner = GC_objhead_template.owner;
          //memcpy(info, &GC_objhead_template, sizeof(objhead));
#endif
          info->dead = 0;
          info->type = type;
          page->previous_size = (n + sz);
          page->live_size += sz;
          p = OBJHEAD_TO_OBJPTR(info);
          memset(p, 0, sz - OBJHEAD_SIZE);
          return p;
        }
        n += sz;
      }
      gc->med_freelist_pages[pos] = page->prev;
    } else
      break;
  }

  page = malloc_mpage();
  addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE);
  page->addr = addr;
  page->size = sz;
  page->size_class = 1;
  page->page_type = PAGE_BIG;
  page->previous_size = PREFIX_SIZE;
  page->live_size = sz;
  
  for (n = page->previous_size; (n + sz) <= APAGE_SIZE; n += sz) {
    info = (objhead *)PTR(NUM(page->addr) + n);
#ifdef MZ_USE_PLACES
    memcpy(info, &GC_objhead_template, sizeof(objhead));
#endif
    info->dead = 1;
    info->size = gcBYTES_TO_WORDS(sz);
  }

  page->next = gc->med_pages[pos];
  if (page->next)
    page->next->prev = page;
  gc->med_pages[pos] = page;
  gc->med_freelist_pages[pos] = page;

  pagemap_add(gc->page_maps, page);

  n = page->previous_size;
  info = (objhead *)PTR(NUM(page->addr) + n);
  info->dead = 0;
  info->type = type;

  {
    void * objptr = OBJHEAD_TO_OBJPTR(info);
    ASSERT_VALID_OBJPTR(objptr);
    return objptr;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void check_finalizers ( NewGC gc,
int  level 
) [inline, static]

Definition at line 1321 of file newgc.c.

{
  Fnl *work = GC_resolve(gc->finalizers);
  Fnl *prev = NULL;

  GCDEBUG((DEBUGOUTF, "CFNL: Checking level %i finalizers\n", level));
  while(work) {
    if((work->eager_level == level) && !marked(gc, work->p)) {
      struct finalizer *next = GC_resolve(work->next);

      GCDEBUG((DEBUGOUTF, 
               "CFNL: Level %i finalizer %p on %p queued for finalization.\n",
               work->eager_level, work, work->p));
      set_backtrace_source(work, BT_FINALIZER);
      gcMARK(work->p);
      if(prev) prev->next = next;
      if(!prev) gc->finalizers = next;
      if(gc->last_in_queue) gc->last_in_queue = gc->last_in_queue->next = work;
      if(!gc->last_in_queue) gc->run_queue = gc->last_in_queue = work;
      work->next = NULL;
      --gc->num_fnls;

      work = next;
    } else { 
      GCDEBUG((DEBUGOUTF, "CFNL: Not finalizing %p (level %i on %p): %p / %i\n",
               work, work->eager_level, work->p, pagemap_find_page(gc->page_maps, work->p),
               marked(work->p)));
      prev = work; 
      work = GC_resolve(work->next); 
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void check_used_against_max ( NewGC gc,
size_t  len 
) [inline, static]

Definition at line 206 of file newgc.c.

{
  gc->used_pages += (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);

  if(gc->in_unsafe_allocation_mode) {
    if(gc->used_pages > gc->max_pages_in_heap)
      gc->unsafe_allocation_abort(gc);
  } else {
    if(gc->used_pages > gc->max_pages_for_use) {
      garbage_collect(gc, 0); /* hopefully this will free enough space */
      if(gc->used_pages > gc->max_pages_for_use) {
        garbage_collect(gc, 1); /* hopefully *this* will free enough space */
        if(gc->used_pages > gc->max_pages_for_use) {
          /* too much memory allocated. 
           * Inform the thunk and then die semi-gracefully */
          if(GC_out_of_memory)
            GC_out_of_memory();
          out_of_memory();
        }
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void clean_up_heap ( NewGC gc) [static]

Definition at line 2909 of file newgc.c.

{
  int i;
  size_t memory_in_use = 0;
  PageMap pagemap = gc->page_maps;

  gen0_free_big_pages(gc);

  for(i = 0; i < PAGE_TYPES; i++) {
    if(gc->gc_full) {
      mpage *work = gc->gen1_pages[i];
      mpage *prev = NULL;
      while(work) {
        mpage *next = work->next;
        if(!work->marked_on) {
          /* remove work from list */
          if(prev) prev->next = next; else gc->gen1_pages[i] = next;
          if(next) work->next->prev = prev;
          gen1_free_mpage(pagemap, work);
        } else {
          pagemap_add(pagemap, work);
          work->back_pointers = work->marked_on = 0;
          memory_in_use += work->size;
          prev = work; 
        }
        work = next;
      }
    } else {
      mpage *work;
      for(work = gc->gen1_pages[i]; work; work = work->next) {
        pagemap_add(pagemap, work);
        work->back_pointers = work->marked_on = 0;
        memory_in_use += work->size;
      }
    }
  }

  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    mpage *work;
    mpage *prev = NULL, *next;

    for (work = gc->med_pages[i]; work; work = next) {
      if (work->marked_on) {
        void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
        void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
        int non_dead = 0;

        while(start <= end) {
          objhead *info = (objhead *)start;
          if (!info->dead) {
            non_dead++;
          }
          start += info->size;
        }

        next = work->next;
        if (non_dead) {
          work->live_size = (work->size * non_dead);
          memory_in_use += work->live_size;
          work->previous_size = PREFIX_SIZE;
          work->back_pointers = work->marked_on = 0;
          work->generation = 1;
          pagemap_add(pagemap, work);
          prev = work;
        } else {
          /* free the page */
          if(prev) prev->next = next; else gc->med_pages[i] = next;
          if(next) work->next->prev = prev;
          gen1_free_mpage(pagemap, work);
        }
      } else if (gc->gc_full || !work->generation) {
        /* Page wasn't touched in full GC, or gen-0 not touched,
           so we can free it. */
        next = work->next;
        if(prev) prev->next = next; else gc->med_pages[i] = next;
        if(next) work->next->prev = prev;
        gen1_free_mpage(pagemap, work);
      } else {
        /* not touched during minor gc */
        memory_in_use += work->live_size;
        work->previous_size = PREFIX_SIZE;
        next = work->next;
        prev = work;
        work->back_pointers = 0;
        pagemap_add(pagemap, work);
      }
    }
    gc->med_freelist_pages[i] = prev;
  }

  gc->memory_in_use = memory_in_use;
  cleanup_vacated_pages(gc);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void cleanup_vacated_pages ( NewGC gc) [inline, static]

Definition at line 2883 of file newgc.c.

                                                    {
  mpage *pages = gc->release_pages;
  PageMap pagemap = gc->page_maps;

  /* Free pages vacated by compaction: */
  while (pages) {
    mpage *next = pages->next;
    gen1_free_mpage(pagemap, pages);
    pages = next;
  }
  gc->release_pages = NULL;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void clear_stack_pages ( void  ) [inline, static]

Definition at line 1513 of file newgc.c.

{
  if(mark_stack) {
    MarkSegment *temp;
    MarkSegment *base;
    int keep = 2;

    /* go to the head of the list */
    for(; mark_stack->prev; mark_stack = mark_stack->prev) {}
    /* then go through and clear them out */
    base = mark_stack;
    for(; mark_stack; mark_stack = temp) {
      temp = mark_stack->next;
      if(keep) { 
        keep--; 
        if (!keep)
          mark_stack->next = NULL;
      } else 
        free(mark_stack);
    }
    mark_stack = base;
    mark_stack->top = MARK_STACK_START(mark_stack);
  }
}

Here is the caller graph for this function:

static int designate_modified ( void p) [static]

Definition at line 1621 of file newgc.c.

                                       {
  NewGC *gc = GC_get_GC();
  return designate_modified_gc(gc, p);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static int designate_modified_gc ( NewGC gc,
void p 
) [static]

Definition at line 1596 of file newgc.c.

{
  mpage *page = pagemap_find_page(gc->page_maps, p);

  if (gc->no_further_modifications) {
    GCPRINT(GCOUTF, "Seg fault (internal error during gc) at %p\n", p);
    return 0;
  }

  if(page) {
    if (!page->back_pointers) {
      page->mprotected = 0;
      vm_protect_pages(page->addr, (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
      page->back_pointers = 1;
      return 1;
    }
  } else {
    if (gc->primoridal_gc) {
      return designate_modified_gc(gc->primoridal_gc, p);
    }
    GCPRINT(GCOUTF, "Seg fault (internal error) at %p\n", p);
  }
  return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void do_heap_compact ( NewGC gc) [inline, static]

Definition at line 2625 of file newgc.c.

{
  int i;
  PageMap pagemap = gc->page_maps;

  for(i = 0; i < PAGE_BIG; i++) {
    mpage *work = gc->gen1_pages[i], *prev, *npage;

    /* Start from the end: */
    if (work) {
      while (work->next)
        work = work->next;
    }
    npage = work;

    while(work) {
      if(work->marked_on && !work->has_new) {
        /* then determine if we actually want to do compaction */
        if(should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size)) {
          void **start = PAGE_START_VSS(work);
          void **end = PAGE_END_VSS(work);
          void **newplace;
          unsigned long avail;

          GCDEBUG((DEBUGOUTF, "Compacting page %p: new version at %p\n", 
                   work, npage));

          if (npage == work) {
            /* Need to insert a page: */
            npage = allocate_compact_target(gc, work);
          }
          avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size);
          newplace = PPTR(NUM(npage->addr) + npage->size);

          while(start < end) {
            objhead *info = (objhead *)start;

            if(info->mark) {
              while (avail <= info->size) {
                npage->size = NUM(newplace) - NUM(npage->addr);
                do {
                  npage = npage->prev;
                } while (!npage->marked_on || npage->has_new);
                if (npage == work)
                  npage = allocate_compact_target(gc, work);
                avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size);
                newplace = PPTR(NUM(npage->addr) + npage->size);
              }

              if (npage->mprotected) {
                npage->mprotected = 0;
                vm_protect_pages(npage->addr, APAGE_SIZE, 1);
              }

              GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n",
                       gcWORDS_TO_BYTES(info->size), start+1, newplace+1));
              memcpy(newplace, start, gcWORDS_TO_BYTES(info->size));
              info->moved = 1;
              *(PPTR(OBJHEAD_TO_OBJPTR(start))) = OBJHEAD_TO_OBJPTR(newplace);
              copy_backtrace_source(npage, newplace, work, start);
              newplace += info->size;
              avail -= info->size;
            }
            start += info->size;       
          }
          npage->size = NUM(newplace) - NUM(npage->addr);

          prev = work->prev;

          if(prev) prev->next = work->next; else gc->gen1_pages[i] = work->next;
          if(work->next) work->next->prev = prev;

          /* push work onto gc->release_pages */
          work->next = gc->release_pages;
          gc->release_pages = work;

          /* add the old page to the page map so fixups can find forwards */
          pagemap_add(pagemap, work);

          work = prev;
        } else { 
          work = work->prev;
        }
      } else {
        if (npage == work)
          npage = npage->prev;
        work = work->prev;
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void do_ordered_level3 ( NewGC gc) [inline, static]

Definition at line 1354 of file newgc.c.

{
  struct finalizer *temp;
  Mark_Proc *mark_table = gc->mark_table;

  for(temp = GC_resolve(gc->finalizers); temp; temp = GC_resolve(temp->next))
    if(!marked(gc, temp->p)) {
      GCDEBUG((DEBUGOUTF,
               "LVL3: %p is not marked. Marking payload (%p)\n", 
               temp, temp->p));
      set_backtrace_source(temp, BT_FINALIZER);
      if(temp->tagged) mark_table[*(unsigned short*)temp->p](temp->p);
      if(!temp->tagged) GC_mark_xtagged(temp->p);
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* fast_malloc_one_small_tagged ( size_t  request_size,
int  dirty 
) [inline, static]

Definition at line 769 of file newgc.c.

{
  unsigned long newptr;
  const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);

  newptr = GC_gen0_alloc_page_ptr + allocate_size;
  ASSERT_VALID_OBJPTR(newptr);

  if(OVERFLOWS_GEN0(newptr)) {
    return GC_malloc_one_tagged(request_size);
  } else {
    objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);

    GC_gen0_alloc_page_ptr = newptr;

    if (dirty)
      memset(info, 0, sizeof(objhead)); /* init objhead */
    else
      bzero(info, allocate_size);

#ifdef MZ_USE_PLACES
    memcpy(info, &GC_objhead_template, sizeof(objhead));
#endif

    info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */

    {
      void * objptr = OBJHEAD_TO_OBJPTR(info);
      ASSERT_VALID_OBJPTR(objptr);
      return objptr;
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void free_mpage ( mpage page) [static]

Definition at line 471 of file newgc.c.

{
  free(page);
}

Here is the caller graph for this function:

static void free_page_maps ( PageMap  page_maps1) [inline, static]

Definition at line 283 of file newgc.c.

                                                      {
#ifdef SIXTY_FOUR_BIT_INTEGERS
  unsigned long i;
  unsigned long j;
  mpage ***page_maps2;
  mpage **page_maps3;

  for (i=0; i<PAGEMAP64_LEVEL1_SIZE; i++) {
    page_maps2 = page_maps1[i];
    if (page_maps2) {
      for (j=0; j<PAGEMAP64_LEVEL2_SIZE; j++) {
        page_maps3 = page_maps2[j];
        if (page_maps3) {
          free(page_maps3);
        }
      }
      free(page_maps2);
    }
  }
  free(page_maps1);
#else
  free(page_maps1);
#endif
}

Here is the caller graph for this function:

static void free_pages ( NewGC gc,
void p,
size_t  len 
) [static]

Definition at line 254 of file newgc.c.

{
  gc->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
  vm_free_pages(gc->vm, p, len);
}

Here is the call graph for this function:

static void garbage_collect ( NewGC gc,
int  force_full 
) [static]

Definition at line 3051 of file newgc.c.

{
  unsigned long old_mem_use = gc->memory_in_use;
  unsigned long old_gen0    = gc->gen0.current_size;
  int next_gc_full;
  TIME_DECLS();

  /* determine if this should be a full collection or not */
  gc->gc_full = force_full || !gc->generations_available 
    || (gc->since_last_full > 100) || (gc->memory_in_use > (2 * gc->last_full_mem_use));
#if 0
  printf("Collection %li (full = %i): %i / %i / %i / %i  %ld\n", number_of_gc_runs, 
      gc->gc_full, force_full, !generations_available,
      (gc->since_last_full > 100), (gc->memory_in_use > (2 * gc->last_full_mem_use)),
      gc->last_full_mem_use);
#endif

  next_gc_full = gc->gc_full;

  if (gc->full_needed_for_finalization) {
    gc->full_needed_for_finalization= 0;
    gc->gc_full = 1;
  }

  gc->number_of_gc_runs++; 
  INIT_DEBUG_FILE(); DUMP_HEAP();

  /* we don't want the low-level allocator freaking because we've gone past
     half the available memory */
  gc->in_unsafe_allocation_mode = 1;
  gc->unsafe_allocation_abort = out_of_memory;

  TIME_INIT();

  /* inform the system (if it wants us to) that we're starting collection */
  if(gc->GC_collect_start_callback)
    gc->GC_collect_start_callback();

  TIME_STEP("started");

  gc->no_further_modifications = 1;

  if (gc->gc_full)
    reset_gen1_pages_live_and_previous_sizes(gc);
  else /* minor collection */
    remove_all_gen1_pages_from_pagemap(gc);

  init_weak_boxes(gc);
  init_weak_arrays(gc);
  init_ephemerons(gc);

  /* at this point, the page map should only include pages that contain
     collectable objects */

  TIME_STEP("prepared");

  /* mark and repair the roots for collection */
  mark_backpointers(gc);
  TIME_STEP("backpointered");
  mark_finalizer_structs(gc);
  mark_weak_finalizer_structs(gc);
  TIME_STEP("pre-rooted");
  mark_roots(gc);
  mark_immobiles(gc);
  TIME_STEP("rooted");
#ifdef MZ_USE_PLACES
  if (!is_master_gc(gc))
#endif
    GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);

  TIME_STEP("stacked");

  /* now propagate/repair the marks we got from these roots, and do the
     finalizer passes */
  propagate_marks(gc);
  mark_ready_ephemerons(gc); 
  propagate_marks(gc); 

  check_finalizers(gc, 1);
  mark_ready_ephemerons(gc);
  propagate_marks(gc);

  check_finalizers(gc, 2);
  mark_ready_ephemerons(gc);
  propagate_marks(gc);

  if(gc->gc_full) zero_weak_finalizers(gc);
  do_ordered_level3(gc); propagate_marks(gc);
  check_finalizers(gc, 3); propagate_marks(gc);
  if(gc->gc_full) {
    reset_weak_finalizers(gc); 
    propagate_marks(gc);
  }
#ifndef NEWGC_BTC_ACCOUNT
  /* we need to clear out the stack pages. If we're doing memory accounting,
     though, we might as well leave them up for now and let the accounting
     system clear them later. Better then freeing them, at least. If we're
     not doing accounting, though, there is no "later" where they'll get
     removed */
  clear_stack_pages();  
#endif

  TIME_STEP("marked");

  zero_weak_boxes(gc); 
  zero_weak_arrays(gc);
  zero_remaining_ephemerons(gc);

  TIME_STEP("zeroed");

  if(gc->gc_full) do_heap_compact(gc);

  TIME_STEP("compacted");

  /* do some cleanup structures that either change state based on the
     heap state after collection or that become useless based on changes
     in state after collection */
#ifdef NEWGC_BTC_ACCOUNT
  BTC_clean_up(gc);
#endif
  TIME_STEP("cleaned");
  repair_finalizer_structs(gc);
  repair_weak_finalizer_structs(gc);
  repair_roots(gc);
  repair_immobiles(gc);
#ifdef MZ_USE_PLACES
  if (!is_master_gc(gc))
#endif
    GC_fixup_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
  TIME_STEP("reparied roots");
  repair_heap(gc);
  TIME_STEP("repaired");
  clean_up_heap(gc);
  TIME_STEP("cleaned heap");
  reset_nursery(gc);
  TIME_STEP("reset nursurey");
#ifdef NEWGC_BTC_ACCOUNT
  if (gc->gc_full)
    BTC_do_accounting(gc);
#endif
  TIME_STEP("accounted");
  if (gc->generations_available)
    protect_old_pages(gc);
  TIME_STEP("protect");
  if (gc->gc_full)
    vm_flush_freed_pages(gc->vm);
  reset_finalizer_tree(gc);

  TIME_STEP("reset");

  /* now we do want the allocator freaking if we go over half */
  gc->in_unsafe_allocation_mode = 0;

  gc->no_further_modifications = 0;

  /* If we have too many idle pages, flush: */
  if (vm_memory_allocated(gc->vm) > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) {
    vm_flush_freed_pages(gc->vm);
  }

  /* update some statistics */
  if(gc->gc_full) gc->num_major_collects++; else gc->num_minor_collects++;
  if(gc->peak_memory_use < gc->memory_in_use) gc->peak_memory_use = gc->memory_in_use;
  if(gc->gc_full)
    gc->since_last_full = 0;
  else if((float)(gc->memory_in_use - old_mem_use) < (0.1 * (float)old_mem_use))
    gc->since_last_full += 1;
  else if((float)(gc->memory_in_use - old_mem_use) < (0.4 * (float)old_mem_use))
    gc->since_last_full += 5;
  else 
    gc->since_last_full += 10;
  if(gc->gc_full)
    gc->last_full_mem_use = gc->memory_in_use;

  /* inform the system (if it wants us to) that we're done with collection */
  if (gc->GC_collect_end_callback)
    gc->GC_collect_end_callback();
  if (gc->GC_collect_inform_callback)
    gc->GC_collect_inform_callback(gc->gc_full, old_mem_use + old_gen0, gc->memory_in_use);

  TIME_STEP("ended");

  TIME_DONE();

  if (!gc->run_queue)
    next_gc_full = 0;

  /* run any queued finalizers, EXCEPT in the case where this collection was
     triggered by the execution of a finalizer. The outside world needs this
     invariant in some corner case I don't have a reference for. In any case,
     if we run a finalizer after collection, and it triggers a collection,
     we should not run the next finalizer in the queue until the "current"
     finalizer completes its execution */
  if(!gc->running_finalizers) {
    gc->running_finalizers = 1;

    /* Finalization might allocate, which might need park: */
    gc->park_save[0] = gc->park[0];
    gc->park_save[1] = gc->park[1];
    gc->park[0] = NULL;
    gc->park[1] = NULL;

    while(gc->run_queue) {
      struct finalizer *f;
      void **saved_gc_variable_stack;

      f = gc->run_queue; gc->run_queue = gc->run_queue->next;
      if(!gc->run_queue) gc->last_in_queue = NULL;

      GCDEBUG((DEBUGOUTF, "Running finalizers %p for pointer %p (lvl %i)\n", f, f->p, f->eager_level));
      saved_gc_variable_stack = GC_variable_stack;
      f->f(f->p, f->data);
      GC_variable_stack = saved_gc_variable_stack;
    }
#ifdef NEWGC_BTC_ACCOUNT
    BTC_run_account_hooks(gc);
#endif
    gc->running_finalizers = 0;

    gc->park[0] = gc->park_save[0];
    gc->park[1] = gc->park_save[1];
    gc->park_save[0] = NULL;
    gc->park_save[1] = NULL;
  }

  DUMP_HEAP(); CLOSE_DEBUG_FILE();

  if (next_gc_full)
    gc->full_needed_for_finalization = 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 908 of file newgc.c.

{
  return APAGE_SIZE;
}
long GC_compute_alloc_size ( long  sizeb)

Definition at line 869 of file newgc.c.

void GC_dump ( void  )

Definition at line 2418 of file newgc.c.

Here is the call graph for this function:

void GC_dump_with_traces ( int  flags,
GC_get_type_name_proc  get_type_name,
GC_get_xtagged_name_proc  get_xtagged_name,
GC_for_each_found_proc  for_each_found,
short  trace_for_tag,
GC_print_tagged_value_proc  print_tagged_value,
int  path_length_limit 
)

Definition at line 2255 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  mpage *page;
  int i;
  static unsigned long counts[MAX_DUMP_TAG], sizes[MAX_DUMP_TAG];

  reset_object_traces();
  if (for_each_found)
    gc->dumping_avoid_collection++;

  /* Traverse tagged pages to count objects: */
  for (i = 0; i < MAX_DUMP_TAG; i++) {
    counts[i] = sizes[i] = 0;
  }
  for (page = gc->gen1_pages[PAGE_TAGGED]; page; page = page->next) {
    void **start = PAGE_START_VSS(page);
    void **end = PAGE_END_VSS(page);

    while(start < end) {
      objhead *info = (objhead *)start;
      if(!info->dead) {
        void *obj_start = OBJHEAD_TO_OBJPTR(start);
        unsigned short tag = *(unsigned short *)obj_start;
        ASSERT_TAG(tag);
        if (tag < MAX_DUMP_TAG) {
          counts[tag]++;
          sizes[tag] += info->size;
        }
        if (tag == trace_for_tag) {
          register_traced_object(obj_start);
          if (for_each_found)
            for_each_found(obj_start);
        }
      }
      start += info->size;
    }
  }
  for (page = gc->gen1_pages[PAGE_BIG]; page; page = page->next) {
    if (page->page_type == PAGE_TAGGED) {
      void **start = PAGE_START_VSS(page);
      void *obj_start = OBJHEAD_TO_OBJPTR(start);
      unsigned short tag = *(unsigned short *)obj_start;
      ASSERT_TAG(tag);
      if (tag < MAX_DUMP_TAG) {
        counts[tag]++;
        sizes[tag] += gcBYTES_TO_WORDS(page->size);
      }
      if ((tag == trace_for_tag)
          || (tag == -trace_for_tag)) {
        register_traced_object(obj_start);
        if (for_each_found)
          for_each_found(obj_start);
      }
    }
  }
  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    for (page = gc->med_pages[i]; page; page = page->next) {
      void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
      void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
      
      while(start <= end) {
        objhead *info = (objhead *)start;
        if (!info->dead) {
          if (info->type == PAGE_TAGGED) {
            void *obj_start = OBJHEAD_TO_OBJPTR(start);
            unsigned short tag = *(unsigned short *)obj_start;
            ASSERT_TAG(tag);
            if (tag < MAX_DUMP_TAG) {
              counts[tag]++;
              sizes[tag] += info->size;
            }
            if (tag == trace_for_tag) {
              register_traced_object(obj_staart);
              if (for_each_found)
                for_each_found(obj_start);
            }
          }
        }
        start += info->size;
      }
    }
  }

  GCPRINT(GCOUTF, "Begin MzScheme3m\n");
  for (i = 0; i < MAX_DUMP_TAG; i++) {
    if (counts[i]) {
      char *tn, buf[256];
      if (get_type_name)
        tn = get_type_name((Type_Tag)i);
      else
        tn = NULL;
      if (!tn) {
        sprintf(buf, "unknown,%d", i);
        tn = buf;
      }
      GCPRINT(GCOUTF, "  %20.20s: %10ld %10ld\n", tn, counts[i], gcWORDS_TO_BYTES(sizes[i]));
    }
  }
  GCPRINT(GCOUTF, "End MzScheme3m\n");

  GCWARN((GCOUTF, "Generation 0: %lu of %li bytes used\n", (unsigned long) gen0_size_in_use(gc), gc->gen0.max_size));

  for(i = 0; i < PAGE_TYPES; i++) {
    unsigned long total_use = 0, count = 0;

    for(page = gc->gen1_pages[i]; page; page = page->next) {
      total_use += page->size;
      count++;
    }
    GCWARN((GCOUTF, "Generation 1 [%s]: %li bytes used in %li pages\n", 
            type_name[i], total_use, count));
  }

  GCWARN((GCOUTF, "Generation 1 [medium]:"));
  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    if (gc->med_pages[i]) {
      long count = 0, page_count = 0;
      for (page = gc->med_pages[i]; page; page = page->next) {
        void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
        void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
        
        page_count++;
        
        while(start <= end) {
          objhead *info = (objhead *)start;
          if (!info->dead) {
            count += info->size;
          }
          start += info->size;
        }
      }
      GCWARN((GCOUTF, " %li [%li/%li]", count, page_count, gc->med_pages[i]->size));
    }
  }
  GCWARN((GCOUTF, "\n"));


  GCWARN((GCOUTF,"\n"));
  GCWARN((GCOUTF,"Current memory use: %li\n", GC_get_memory_use(NULL)));
  GCWARN((GCOUTF,"Peak memory use after a collection: %li\n", gc->peak_memory_use));
  GCWARN((GCOUTF,"Allocated (+reserved) page sizes: %li (+%li)\n", 
          gc->used_pages * APAGE_SIZE, 
          vm_memory_allocated(gc->vm) - (gc->used_pages * APAGE_SIZE)));
  GCWARN((GCOUTF,"# of major collections: %li\n", gc->num_major_collects));
  GCWARN((GCOUTF,"# of minor collections: %li\n", gc->num_minor_collects));
  GCWARN((GCOUTF,"# of installed finalizers: %i\n", gc->num_fnls));
  GCWARN((GCOUTF,"# of traced ephemerons: %i\n", gc->num_last_seen_ephemerons));

  if (flags & GC_DUMP_SHOW_TRACE) {
    print_traced_objects(path_length_limit, get_type_name, get_xtagged_name, print_tagged_value);
  }

  if (for_each_found)
    --gc->dumping_avoid_collection;
}

Here is the call graph for this function:

void GC_finalization_weak_ptr ( void **  p,
int  offset 
)

Definition at line 1370 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  Weak_Finalizer *wfnl;

  gc->park[0] = p; wfnl = GC_malloc_atomic(sizeof(Weak_Finalizer));
  p = gc->park[0]; gc->park[0] = NULL;
  wfnl->p = p; wfnl->offset = offset * sizeof(void*); wfnl->saved = NULL;
  wfnl->next = gc->weak_finalizers; gc->weak_finalizers = wfnl;
}
void GC_fixup ( void pp)

Definition at line 2200 of file newgc.c.

{
  NewGC *gc;
  mpage *page;
  void *p = *(void**)pp;

  if(!p || (NUM(p) & 0x1))
    return;

  gc = GC_get_GC();
  if((page = pagemap_find_page(gc->page_maps, p))) {
    objhead *info;

    if(page->size_class) return;
    info = OBJPTR_TO_OBJHEAD(p);
    if(info->mark && info->moved) 
      *(void**)pp = *(void**)p;
    else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (not moved)\n",p,pp));
  } else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (no page)\n", p, pp));
}

Here is the call graph for this function:

void* GC_fixup_self ( void p)

Definition at line 2195 of file newgc.c.

{
  return p;
}
void GC_free ( void p)

Definition at line 866 of file newgc.c.

{}

Definition at line 3322 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  int i;
  mpage *work;
  mpage *next;
  PageMap pagemap = gc->page_maps;

  remove_signal_handler(gc);

  gen0_free_big_pages(gc);

  for(i = 0; i < PAGE_TYPES; i++) {
    for (work = gc->gen1_pages[i]; work; work = next) {
      next = work->next;

      if (work->mprotected)
        vm_protect_pages(work->addr, (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
      gen1_free_mpage(pagemap, work);
    }
  }

  free(gc->mark_table);
  free(gc->fixup_table);

  free_page_maps(gc->page_maps);

  free(gc->protect_range);

  vm_flush_freed_pages(gc->vm);
  vm_free(gc->vm);
  free(gc);
}

Here is the call graph for this function:

Definition at line 1828 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  garbage_collect(gc, 1);
}

Here is the call graph for this function:

long GC_get_memory_use ( void o)

Definition at line 1854 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
#ifdef NEWGC_BTC_ACCOUNT
  if(o) {
    return BTC_get_memory_use(gc, o);
  }
#endif
  return gen0_size_in_use(gc) + gc->memory_in_use;
}

Here is the call graph for this function:

unsigned long GC_get_stack_base ( void  )

Definition at line 1204 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  return gc->stack_base;
}

Definition at line 1188 of file newgc.c.

{ 
  return GC_variable_stack;
}
void GC_init_type_tags ( int  count,
int  pair,
int  mutable_pair,
int  weakbox,
int  ephemeron,
int  weakarray,
int  custbox 
)

Definition at line 1734 of file newgc.c.

{
  static int initialized = 0;

  if(!initialized) {
    initialized = 1;
    init_type_tags_worker(NULL, count, pair, mutable_pair, weakbox, ephemeron, weakarray, custbox);
  }
  else {
    GCPRINT(GCOUTF, "GC_init_type_tags should only be called once!\n");
    abort();
  }
}

Here is the call graph for this function:

long GC_initial_word ( int  request_size)

Definition at line 874 of file newgc.c.

{
  long w = 0;
  objhead info;

  const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);

#ifdef MZ_USE_PLACES
  memcpy(&info, &GC_objhead_template, sizeof(objhead));
#else
  memset(&info, 0, sizeof(objhead));
#endif

  info.size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
  memcpy(&w, &info, sizeof(objhead));

  return w;
}
void GC_initial_words ( char *  buffer,
long  sizeb 
)

Definition at line 893 of file newgc.c.

{
  objhead *info = (objhead *)buffer;

  const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(sizeb);

#ifdef MZ_USE_PLACES
  memcpy(info, &GC_objhead_template, sizeof(objhead));
#else
  memset(info, 0, sizeof(objhead));
#endif

  info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
}

Definition at line 396 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  return !!pagemap_find_page(gc->page_maps, p);
}

Here is the call graph for this function:

void* GC_malloc ( size_t  s)

Definition at line 855 of file newgc.c.

{ return allocate(s, PAGE_ARRAY); }

Here is the call graph for this function:

Definition at line 861 of file newgc.c.

{ return allocate_medium(s, PAGE_ARRAY); }

Here is the call graph for this function:

void* GC_malloc_array_tagged ( size_t  s)

Definition at line 858 of file newgc.c.

{ return allocate(s, PAGE_TARRAY); }

Here is the call graph for this function:

void* GC_malloc_atomic ( size_t  s)

Definition at line 859 of file newgc.c.

{ return allocate(s, PAGE_ATOMIC); }

Here is the call graph for this function:

Definition at line 862 of file newgc.c.

{ return allocate_big(s, PAGE_ATOMIC); }

Here is the call graph for this function:

Definition at line 860 of file newgc.c.

{ void *p = ofm_malloc_zero(s); return p; }

Here is the call graph for this function:

Definition at line 864 of file newgc.c.

{ return fast_malloc_one_small_tagged(s, 1); }

Here is the call graph for this function:

Definition at line 865 of file newgc.c.

{ return fast_malloc_one_small_tagged(s, 0); }

Here is the call graph for this function:

void* GC_malloc_one_tagged ( size_t  s)

Definition at line 856 of file newgc.c.

{ return allocate(s, PAGE_TAGGED); }

Here is the call graph for this function:

void* GC_malloc_one_xtagged ( size_t  s)

Definition at line 857 of file newgc.c.

{ return allocate(s, PAGE_XTAGGED); }

Here is the call graph for this function:

void* GC_malloc_pair ( void car,
void cdr 
)

Definition at line 805 of file newgc.c.

{
  unsigned long newptr;
  void *pair;
  const size_t allocate_size = PAIR_SIZE_IN_BYTES;

  newptr = GC_gen0_alloc_page_ptr + allocate_size;
  ASSERT_VALID_OBJPTR(newptr);

  if(OVERFLOWS_GEN0(newptr)) {
    NewGC *gc = GC_get_GC();
    gc->park[0] = car;
    gc->park[1] = cdr;
    pair = GC_malloc_one_tagged(sizeof(Scheme_Simple_Object));
    car = gc->park[0];
    cdr = gc->park[1];
    gc->park[0] = NULL;
    gc->park[1] = NULL;
  }
  else {
    objhead *info = (objhead *) PTR(GC_gen0_alloc_page_ptr);
    GC_gen0_alloc_page_ptr = newptr;

#ifdef MZ_USE_PLACES
    memcpy(info, &GC_objhead_template, sizeof(objhead));
#else
    memset(info, 0, sizeof(objhead)); /* init objhead */
#endif


    /* info->type = type; */ /* We know that the type field is already 0 */
    info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */

    pair = OBJHEAD_TO_OBJPTR(info);
    ASSERT_VALID_OBJPTR(pair);
  }
  
  /* initialize pair */
  {
    Scheme_Simple_Object *obj = (Scheme_Simple_Object *) pair;
    obj->iso.so.type = scheme_pair_type;
    obj->iso.so.keyex = 0; /* init first word of SchemeObject to 0 */
    obj->u.pair_val.car = car;
    obj->u.pair_val.cdr = cdr;
  }

  return pair;
}

Here is the call graph for this function:

Definition at line 913 of file newgc.c.

Definition at line 863 of file newgc.c.

{ return allocate_medium(s, PAGE_TAGGED); }

Here is the call graph for this function:

void GC_mark ( const void const_p)

Definition at line 1874 of file newgc.c.

{
  mpage *page;
  void *p = (void*)const_p;
  NewGC *gc;

  if(!p || (NUM(p) & 0x1)) {
    GCDEBUG((DEBUGOUTF, "Not marking %p (bad ptr)\n", p));
    return;
  }

  gc = GC_get_GC();
  if(!(page = pagemap_find_page(gc->page_maps, p))) {
    GCDEBUG((DEBUGOUTF,"Not marking %p (no page)\n",p));
    return;
  }

  /* toss this over to the BTC mark routine if we're doing accounting */
  if(gc->doing_memory_accounting) { 
#ifdef NEWGC_BTC_ACCOUNT
    BTC_memory_account_mark(gc, page, p); return; 
#endif
  }

  if(page->size_class) {
    if(page->size_class > 1) {
      /* This is a bigpage. The first thing we do is see if its been marked
         previously */
      if(page->size_class != 2) {
        GCDEBUG((DEBUGOUTF, "Not marking %p on big %p (already marked)\n", p, page));
        return;
      }
      /* in this case, it has not. So we want to mark it, first off. */
      page->size_class = 3;

      /* if this is in the nursery, we want to move it out of the nursery */
      if(!page->generation) {
        page->generation = 1;

        /* remove page */
        if(page->prev) page->prev->next = page->next; else
          gc->gen0.big_pages = page->next;
        if(page->next) page->next->prev = page->prev;

        backtrace_new_page(gc, page);

        /* add to gen1 */
        page->next = gc->gen1_pages[PAGE_BIG]; 
        page->prev = NULL;
        if(page->next) page->next->prev = page;
        gc->gen1_pages[PAGE_BIG] = page;

        /* if we're doing memory accounting, then we need to make sure the
           btc_mark is right */
#ifdef NEWGC_BTC_ACCOUNT
        BTC_set_btc_mark(gc, BIG_PAGE_TO_OBJHEAD(page));
#endif
      }

      page->marked_on = 1;
      record_backtrace(page, BIG_PAGE_TO_OBJECT(page));
      GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page));
      /* Finally, we want to add this to our mark queue, so we can 
         propagate its pointers */
      push_ptr(p);
    } else {
      /* A medium page. */
      objhead *info = MED_OBJHEAD(p, page->size);
      if (info->mark) {
        GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
        return;
      }
      info->mark = 1;
      page->marked_on = 1;
      p = OBJHEAD_TO_OBJPTR(info);
      backtrace_new_page_if_needed(gc, page);
      record_backtrace(page, p);
      push_ptr(p);
    }
  } else {
    objhead *ohead = OBJPTR_TO_OBJHEAD(p);

    if(ohead->mark) {
      GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
      return;
    }

    /* what we do next depends on whether this is a gen0 or gen1 
       object */
    if(page->generation) {
      /* this is a generation 1 object. This means we are not going
         to move it, we don't have to check to see if it's an atomic
         object masquerading as a tagged object, etc. So all we do
         is add the pointer to the mark queue and note on the page
         that we marked something on it*/
      if((NUM(page->addr) + page->previous_size) <= NUM(p)) {
        GCDEBUG((DEBUGOUTF, "Marking %p (leaving alone)\n", p));
        ohead->mark = 1;
        page->marked_on = 1;
        page->previous_size = PREFIX_SIZE;
        page->live_size += ohead->size;
        record_backtrace(page, p);
        push_ptr(p);
      } else GCDEBUG((DEBUGOUTF, "Not marking %p (it's old; %p / %i)\n",
                      p, page, page->previous_size));
    } else {
      /* this is a generation 0 object. This means that we do have
         to do all of the above. Fun, fun, fun. */
      unsigned short type = ohead->type;
      mpage *work;
      size_t size;
      objhead *newplace;

      /* first check to see if this is an atomic object masquerading
         as a tagged object; if it is, then convert it */
      if(type == PAGE_TAGGED) {
        if((unsigned long)gc->mark_table[*(unsigned short*)p] < PAGE_TYPES)
          type = ohead->type = (int)(unsigned long)gc->mark_table[*(unsigned short*)p];
      }

      /* now set us up for the search for where to put this thing */
      work = gc->gen1_pages[type];
      size = gcWORDS_TO_BYTES(ohead->size);

      /* search for a page with the space to spare */
      if (work && ((work->size + size) >= APAGE_SIZE))
        work = NULL;

      /* now either fetch where we're going to put this object or make
         a new page if we couldn't find a page with space to spare */
      if(work) {
        if (!work->added) {
          pagemap_add(gc->page_maps, work);
          work->added = 1;
        }
        work->marked_on = 1;
        if (work->mprotected) {
          work->mprotected = 0;
          vm_protect_pages(work->addr, APAGE_SIZE, 1);
        }
        newplace = PTR(NUM(work->addr) + work->size);
      } else {
        /* Allocate and prep the page */
        work = malloc_mpage();
        work->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE);
        work->generation = 1;
        work->page_type = type;
        work->size = work->previous_size = PREFIX_SIZE;
        work->marked_on = 1;
        backtrace_new_page(gc, work);
        work->next = gc->gen1_pages[type];
        work->prev = NULL;
        if(work->next)
          work->next->prev = work;
        pagemap_add(gc->page_maps, work);
        work->added = 1;
        gc->gen1_pages[type] = work;
        newplace = PAGE_TO_OBJHEAD(work);
      }

      /* update the size */
      work->size += size;
      work->has_new = 1;

      /* transfer the object */
      ohead->mark = 1; /* mark is copied to newplace, too */
      if (size == PAIR_SIZE_IN_BYTES) 
        /* pairs are common, and compiler tends to inline constant-size memcpys */
        memcpy(newplace, ohead, PAIR_SIZE_IN_BYTES);
      else
        memcpy(newplace, ohead, size);
      /* mark the old location as marked and moved, and the new location
         as marked */
      ohead->moved = 1;
      /* if we're doing memory accounting, then we need the btc_mark
         to be set properly */
#ifdef NEWGC_BTC_ACCOUNT
      BTC_set_btc_mark(gc, newplace);
#endif
      
      {
        /* drop the new location of the object into the forwarding space
           and into the mark queue */
        void *newp = OBJHEAD_TO_OBJPTR(newplace);
        /* record why we marked this one (if enabled) */
        record_backtrace(work, newp);
        /* set forwarding pointer */
        GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", p, newp, work));
        *(void**)p = newp;
        push_ptr(newp);
      }
    }
  }
}

Here is the call graph for this function:

Definition at line 1586 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  return gc->doing_memory_accounting;
}

Definition at line 270 of file newgc.c.

{
  return 0;
}

Definition at line 275 of file newgc.c.

{
  return 0;
}
void GC_register_new_thread ( void t,
void c 
)

Definition at line 1579 of file newgc.c.

{
#ifdef NEWGC_BTC_ACCOUNT
  BTC_register_new_thread(t, c);
#endif
}

Definition at line 1556 of file newgc.c.

{
#ifdef NEWGC_BTC_ACCOUNT
  BTC_register_root_custodian(c);
#endif
}
void GC_register_thread ( void t,
void c 
)

Definition at line 1573 of file newgc.c.

{
#ifdef NEWGC_BTC_ACCOUNT
  BTC_register_thread(t, c);
#endif
}
void GC_register_traversers ( short  tag,
Size_Proc  size,
Mark_Proc  mark,
Fixup_Proc  fixup,
int  constant_Size,
int  atomic 
)

Definition at line 1834 of file newgc.c.

{
  NewGC *gc = GC_get_GC();

  int mark_tag = tag;

#ifdef NEWGC_BTC_ACCOUNT
  mark_tag = BTC_get_redirect_tag(gc, mark_tag);
#endif

#if MZ_GC_BACKTRACE
  /* Keep tagged objects in tagged space: */
  atomic = 0;
#endif

  gc->mark_table[mark_tag]  = atomic ? (Mark_Proc)PAGE_ATOMIC : mark;
  gc->fixup_table[tag]      = fixup;
}
void* GC_resolve ( void p)

Definition at line 2179 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  mpage *page = pagemap_find_page(gc->page_maps, p);
  objhead *info;

  if(!page || page->size_class)
    return p;

  info = OBJPTR_TO_OBJHEAD(p);
  if(info->mark && info->moved)
    return *(void**)p;
  else 
    return p;
}

Here is the call graph for this function:

int GC_set_account_hook ( int  type,
void c1,
unsigned long  b,
void c2 
)

Definition at line 1563 of file newgc.c.

{
#ifdef NEWGC_BTC_ACCOUNT
  BTC_add_account_hook(type, c1, c2, b); 
  return 1;
#else
  return 0;
#endif
}

Definition at line 165 of file newgc.c.

void GC_set_collect_inform_callback ( void(*)(int major_gc, long pre_used, long post_used)  func)

Definition at line 172 of file newgc.c.

Here is the caller graph for this function:

Definition at line 158 of file newgc.c.

void GC_set_get_thread_stack_base ( unsigned long(*)(void func)

Definition at line 1210 of file newgc.c.

                                                               {
  NewGC *gc = GC_get_GC();
  gc->GC_get_thread_stack_base = func;
}

Definition at line 1198 of file newgc.c.

{
  NewGC *gc = GC_get_GC();
  gc->stack_base = (unsigned long)base;
}

Definition at line 1193 of file newgc.c.

Definition at line 1627 of file newgc.c.

Here is the call graph for this function:

static mpage* gen0_create_new_mpage ( NewGC gc) [inline, static]

Definition at line 668 of file newgc.c.

                                                      {
  mpage *newmpage;

  newmpage = malloc_mpage(gc);
  newmpage->addr = malloc_dirty_pages(gc, GEN0_PAGE_SIZE, APAGE_SIZE);
  newmpage->size_class = 0;
  newmpage->size = PREFIX_SIZE;
  pagemap_add_with_size(gc->page_maps, newmpage, GEN0_PAGE_SIZE);

  return newmpage;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void gen0_free_big_pages ( NewGC gc) [inline, static]

Definition at line 2896 of file newgc.c.

                                                  {
  mpage *work;
  mpage *next;
  PageMap pagemap = gc->page_maps;

  for(work = gc->gen0.big_pages; work; work = next) {
    next = work->next;
    pagemap_remove(pagemap, work);
    free_pages(gc, work->addr, round_to_apage_size(work->size));
    free_mpage(work);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void gen0_free_mpage ( NewGC gc,
mpage page 
) [inline, static]

Definition at line 680 of file newgc.c.

Here is the call graph for this function:

Here is the caller graph for this function:

static size_t gen0_size_in_use ( NewGC gc) [inline, static]

Definition at line 689 of file newgc.c.

Here is the caller graph for this function:

static void gen1_free_mpage ( PageMap  pagemap,
mpage page 
) [inline, static]

Definition at line 2875 of file newgc.c.

                                                                 {
  size_t real_page_size = (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE;
  pagemap_remove(pagemap, page);
  free_backtrace(page);
  free_pages(GC, page->addr, real_page_size);
  free_mpage(page);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* get_stack_base ( NewGC gc) [inline, static]

Definition at line 1215 of file newgc.c.

                                              {
  if (gc->GC_get_thread_stack_base) return (void*) gc->GC_get_thread_stack_base();
  return (void*) gc->stack_base;
}

Here is the caller graph for this function:

static NewGC* init_type_tags_worker ( NewGC parentgc,
int  count,
int  pair,
int  mutable_pair,
int  weakbox,
int  ephemeron,
int  weakarray,
int  custbox 
) [static]

Definition at line 1692 of file newgc.c.

{
  NewGC *gc;

  gc = ofm_malloc_zero(sizeof(NewGC));
  /* NOTE sets the constructed GC as the new Thread Specific GC. */
  GC_set_GC(gc);

  gc->weak_box_tag    = weakbox;
  gc->ephemeron_tag   = ephemeron;
  gc->weak_array_tag  = weakarray;
# ifdef NEWGC_BTC_ACCOUNT
  gc->cust_box_tag    = custbox;
# endif

  NewGC_initialize(gc, parentgc);


  /* Our best guess at what the OS will let us allocate: */
  gc->max_pages_in_heap = determine_max_heap_size() / APAGE_SIZE;
  /* Not all of that memory is available for allocating GCable
     objects.  There's the memory used by the stack, code,
     malloc()/free()ed memory, etc., and there's also the
     administrative structures for the GC itself. */
  gc->max_pages_for_use = gc->max_pages_in_heap / 2;

  resize_gen0(gc, GEN0_INITIAL_SIZE);

  if (!parentgc) {
    GC_register_traversers(gc->weak_box_tag, size_weak_box, mark_weak_box, fixup_weak_box, 0, 0);
    GC_register_traversers(gc->ephemeron_tag, size_ephemeron, mark_ephemeron, fixup_ephemeron, 0, 0);
    GC_register_traversers(gc->weak_array_tag, size_weak_array, mark_weak_array, fixup_weak_array, 0, 0);
  }
  initialize_signal_handler(gc);
  GC_add_roots(&gc->park, (char *)&gc->park + sizeof(gc->park) + 1);
  GC_add_roots(&gc->park_save, (char *)&gc->park_save + sizeof(gc->park_save) + 1);

  initialize_protect_page_ranges(gc->protect_range, malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE), APAGE_SIZE);

  return gc;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static int is_finalizable_page ( NewGC gc,
void p 
) [static]

Definition at line 1276 of file newgc.c.

{
  return (pagemap_find_page(gc->page_maps, p) ? 1 : 0);
}

Here is the call graph for this function:

static void* malloc_dirty_pages ( NewGC gc,
size_t  len,
size_t  alignment 
) [static]

Definition at line 245 of file newgc.c.

{
  void *ptr;
  check_used_against_max(gc, len);
  ptr = vm_malloc_pages(gc->vm, len, alignment, 1);
  if (!ptr) out_of_memory();
  return ptr;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mpage* malloc_mpage ( ) [static]

Definition at line 464 of file newgc.c.

{
  mpage *page;
  page = ofm_malloc_zero(sizeof(mpage));
  return page;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* malloc_pages ( NewGC gc,
size_t  len,
size_t  alignment 
) [static]

Definition at line 236 of file newgc.c.

{
  void *ptr;
  check_used_against_max(gc, len);
  ptr = vm_malloc_pages(gc->vm, len, alignment, 0);
  if (!ptr) out_of_memory();
  return ptr;
}

Here is the call graph for this function:

static void mark_backpointers ( NewGC gc) [static]

Definition at line 2526 of file newgc.c.

{
  if(!gc->gc_full) {
    mpage *work;
    int i;
    PageMap pagemap = gc->page_maps;

    /* if this is not a full collection, then we need to mark any pointers
       that point backwards into generation 0, since they're roots. */
    for(i = 0; i < PAGE_TYPES; i++) {
      for(work = gc->gen1_pages[i]; work; work = work->next) {
        if(work->back_pointers) {
          /* these pages are guaranteed not to be write protected, because
             if they were, they wouldn't have this bit set */
          work->marked_on = 1;
          work->previous_size = PREFIX_SIZE;
          pagemap_add(pagemap, work);
          if(work->size_class) {
            /* must be a big page */
            work->size_class = 3;
            push_ptr(BIG_PAGE_TO_OBJECT(work));
          } else {
            if(work->page_type != PAGE_ATOMIC) {
              void **start = PAGE_START_VSS(work);
              void **end = PAGE_END_VSS(work);

              while(start < end) {
                objhead *info = (objhead *)start;
                if(!info->dead) {
                  info->mark = 1;
                  /* This must be a push_ptr, and not a direct call to
                     internal_mark. This is because we need every object
                     in the older heap to be marked out of and noted as
                     marked before we do anything else */
                  push_ptr(OBJHEAD_TO_OBJPTR(start));
                }
                start += info->size;
              }
            }
          }
          work->previous_size = PREFIX_SIZE;
        } else {
          GCDEBUG((DEBUGOUTF,"Setting previous_size on %p to %i\n", work,
                   work->size));
          work->previous_size = work->size;
        }
      }
    }

    for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
      for (work = gc->med_pages[i]; work; work = work->next) {
        if(work->back_pointers) {
          void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
          void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
          
          work->marked_on = 1;
          pagemap_add(pagemap, work);

          while(start <= end) {
            objhead *info = (objhead *)start;
            if(!info->dead) {
              info->mark = 1;
              /* This must be a push_ptr (see above) */
              push_ptr(OBJHEAD_TO_OBJPTR(info));
            }
            start += info->size;
          }
        }
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void mark_finalizer_structs ( NewGC gc) [inline, static]

Definition at line 1283 of file newgc.c.

{
  Fnl *fnl;

  for(fnl = GC_resolve(gc->finalizers); fnl; fnl = GC_resolve(fnl->next)) { 
    set_backtrace_source(fnl, BT_FINALIZER);
    gcMARK(fnl->data); 
    set_backtrace_source(&gc->finalizers, BT_ROOT);
    gcMARK(fnl);
  }
  for(fnl = gc->run_queue; fnl; fnl = fnl->next) {
    set_backtrace_source(fnl, BT_FINALIZER);
    gcMARK(fnl->data);
    gcMARK(fnl->p);
    set_backtrace_source(&gc->run_queue, BT_ROOT);
    gcMARK(fnl);
  }
}  

Here is the call graph for this function:

Here is the caller graph for this function:

static void mark_roots ( NewGC gc) [inline, static]

Definition at line 1260 of file newgc.c.

Here is the caller graph for this function:

static MarkSegment* mark_stack_create_frame ( ) [inline, static]

Definition at line 1460 of file newgc.c.

                                                     {
  MarkSegment *mark_frame = (MarkSegment*)ofm_malloc(STACK_PART_SIZE);
  mark_frame->next = NULL;
  mark_frame->top  = MARK_STACK_START(mark_frame);
  return mark_frame;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void mark_stack_initialize ( ) [inline, static]

Definition at line 1467 of file newgc.c.

                                           {
  /* This happens at the very beginning */
  if(!mark_stack) {
    mark_stack = mark_stack_create_frame();
    mark_stack->prev = NULL;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void mark_weak_finalizer_structs ( NewGC gc) [inline, static]

Definition at line 1381 of file newgc.c.

{
  Weak_Finalizer *work;

  GCDEBUG((DEBUGOUTF, "MARKING WEAK FINALIZERS.\n"));
  for(work = gc->weak_finalizers; work; work = work->next) {
    set_backtrace_source(&gc->weak_finalizers, BT_ROOT);
    gcMARK(work);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static int marked ( NewGC gc,
void p 
) [inline, static]

Definition at line 984 of file newgc.c.

{
  mpage *page;

  if(!p) return 0;
  if(!(page = pagemap_find_page(gc->page_maps, p))) return 1;
  if (page->size_class) {
    if (page->size_class > 1) {
      return (page->size_class > 2);
    }
  } else {
    if((NUM(page->addr) + page->previous_size) > NUM(p)) 
      return 1;
  }
  return OBJPTR_TO_OBJHEAD(p)->mark;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void NewGC_initialize ( NewGC newgc,
NewGC parentgc 
) [static]

Definition at line 1655 of file newgc.c.

                                                            {
  if (parentgc) {
    newgc->mark_table  = parentgc->mark_table;
    newgc->fixup_table = parentgc->fixup_table;
  }
  else {
#ifdef MZ_USE_PLACES
    NewGCMasterInfo_initialize();
#endif
    newgc->mark_table  = ofm_malloc_zero(NUMBER_OF_TAGS * sizeof (Mark_Proc)); 
    newgc->fixup_table = ofm_malloc_zero(NUMBER_OF_TAGS * sizeof (Fixup_Proc)); 
#ifdef NEWGC_BTC_ACCOUNT
    BTC_initialize_mark_table(newgc);
#endif
  }

#ifdef MZ_USE_PLACES
  NewGCMasterInfo_get_next_id(newgc);
#endif

  mark_stack_initialize();

#ifdef SIXTY_FOUR_BIT_INTEGERS
  newgc->page_maps = ofm_malloc_zero(PAGEMAP64_LEVEL1_SIZE * sizeof (mpage***)); 
#else
  newgc->page_maps = ofm_malloc_zero(PAGEMAP32_SIZE * sizeof (mpage*)); 
#endif

  newgc->vm = vm_create();
  newgc->protect_range = ofm_malloc_zero(sizeof(Page_Range));
  
  newgc->generations_available = 1;
  newgc->last_full_mem_use = (20 * 1024 * 1024);
  newgc->new_btc_mark = 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* ofm_malloc ( size_t  size) [static]

Definition at line 193 of file newgc.c.

                                     {
  void *ptr = malloc(size);
  if (!ptr) out_of_memory();
  return ptr;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* ofm_malloc_zero ( size_t  size) [static]

Definition at line 199 of file newgc.c.

                                          {
  void *ptr;
  ptr = ofm_malloc(size);
  memset(ptr, 0, size);
  return ptr;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void out_of_memory ( ) [static]

Definition at line 185 of file newgc.c.

{
  if (GC_report_out_of_memory)
    GC_report_out_of_memory();
  GCPRINT(GCOUTF, "The system has run out of memory!\n");
  abort();
}

Here is the caller graph for this function:

static void pagemap_add ( PageMap  pagemap,
mpage page 
) [inline, static]

Definition at line 376 of file newgc.c.

{
  pagemap_modify(pagemap, page, page);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void pagemap_add_with_size ( PageMap  pagemap,
mpage page,
long  size 
) [inline, static]

Definition at line 381 of file newgc.c.

{
  pagemap_modify_with_size(pagemap, page, size, page);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mpage* pagemap_find_page ( PageMap  page_maps1,
void p 
) [inline, static]

Definition at line 334 of file newgc.c.

                                                                    {
#ifdef SIXTY_FOUR_BIT_INTEGERS
  mpage ***page_maps2;
  mpage **page_maps3;

  page_maps2 = page_maps1[PAGEMAP64_LEVEL1_BITS(p)];
  if (!page_maps2) return NULL;
  page_maps3 = page_maps2[PAGEMAP64_LEVEL2_BITS(p)];
  if (!page_maps3) return NULL;
  return page_maps3[PAGEMAP64_LEVEL3_BITS(p)];
#else
  return page_maps1[PAGEMAP32_BITS(p)];
#endif
}

Here is the caller graph for this function:

static void pagemap_modify ( PageMap  pagemap,
mpage page,
mpage val 
) [inline, static]

Definition at line 371 of file newgc.c.

                                                                            {
  long size = (page->size_class > 1) ? page->size : APAGE_SIZE;
  pagemap_modify_with_size(pagemap, page, size, val);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void pagemap_modify_with_size ( PageMap  pagemap,
mpage page,
long  size,
mpage val 
) [inline, static]

Definition at line 361 of file newgc.c.

                                                                                                 {
  void *p = page->addr;

  while(size > 0) {
    pagemap_set(pagemap, p, val);
    size -= APAGE_SIZE;
    p = (char *)p + APAGE_SIZE;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void pagemap_remove ( PageMap  pagemap,
mpage page 
) [inline, static]

Definition at line 386 of file newgc.c.

{
  pagemap_modify(pagemap, page, NULL);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void pagemap_remove_with_size ( PageMap  pagemap,
mpage page,
long  size 
) [inline, static]

Definition at line 391 of file newgc.c.

{
  pagemap_modify_with_size(pagemap, page, size, NULL);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void pagemap_set ( PageMap  page_maps1,
void p,
mpage value 
) [inline, static]

Definition at line 310 of file newgc.c.

                                                                          {
#ifdef SIXTY_FOUR_BIT_INTEGERS
  unsigned long pos;
  mpage ***page_maps2;
  mpage **page_maps3;

  pos = PAGEMAP64_LEVEL1_BITS(p);
  page_maps2 = page_maps1[pos];
  if (!page_maps2) {
    page_maps2 = (mpage ***)calloc(PAGEMAP64_LEVEL2_SIZE, sizeof(mpage **));
    page_maps1[pos] = page_maps2;
  }
  pos = PAGEMAP64_LEVEL2_BITS(p);
  page_maps3 = page_maps2[pos];
  if (!page_maps3) {
    page_maps3 = (mpage **)calloc(PAGEMAP64_LEVEL3_SIZE, sizeof(mpage *));
    page_maps2[pos] = page_maps3;
  }
  page_maps3[PAGEMAP64_LEVEL3_BITS(p)] = value;
#else
  page_maps1[PAGEMAP32_BITS(p)] = value;
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

static int pop_ptr ( void **  ptr) [inline, static]

Definition at line 1496 of file newgc.c.

{
  if(mark_stack->top == MARK_STACK_START(mark_stack)) {
    if(mark_stack->prev) {
      /* if there is a previous page, go to it */
      mark_stack = mark_stack->prev;
    } else {
      /* if there isn't a previous page, then we've hit the bottom of the stack */
      return 0;
    }
  }

  /* if we get here, we're guaranteed to have data */
  *ptr = *(--mark_stack->top);
  return 1;
}

Here is the caller graph for this function:

static void propagate_marks ( NewGC gc) [static]

Definition at line 2071 of file newgc.c.

{
  void *p;
  PageMap pagemap = gc->page_maps;
  Mark_Proc *mark_table = gc->mark_table;

  while(pop_ptr(&p)) {
    mpage *page = pagemap_find_page(pagemap, p);
    GCDEBUG((DEBUGOUTF, "Popped pointer %p\n", p));

    /* we can assume a lot here -- like it's a valid pointer with a page --
       because we vet bad cases out in GC_mark, above */
    if(page->size_class) {
      if(page->size_class > 1) {
        void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
        void **end = PAGE_END_VSS(page);

        set_backtrace_source(start, page->page_type);

        switch(page->page_type) {
        case PAGE_TAGGED: 
          {
            unsigned short tag = *(unsigned short*)start;
            ASSERT_TAG(tag);
            if((unsigned long)mark_table[tag] < PAGE_TYPES) {
              /* atomic */
            } else {
              GC_ASSERT(mark_table[tag]);
              mark_table[tag](start); break;
            }
          }
        case PAGE_ATOMIC: break;
        case PAGE_ARRAY: while(start < end) gcMARK(*(start++)); break;
        case PAGE_XTAGGED: GC_mark_xtagged(start); break;
        case PAGE_TARRAY: 
          {
            unsigned short tag = *(unsigned short *)start;
            ASSERT_TAG(tag);
            end -= INSET_WORDS;
            while(start < end) {
              GC_ASSERT(mark_table[tag]);
              start += mark_table[tag](start);
            }
            break;
          }
        }
      } else {
        /* Medium page */
        objhead *info = OBJPTR_TO_OBJHEAD(p);

        set_backtrace_source(p, info->type);

        switch(info->type) {
        case PAGE_TAGGED: 
          {
            unsigned short tag = *(unsigned short*)p;
            ASSERT_TAG(tag);
            GC_ASSERT(mark_table[tag]);
            mark_table[tag](p);
            break;
          }
        case PAGE_ARRAY:
          {
            void **start = p;
            void **end = PPTR(info) + info->size;
            while(start < end) gcMARK(*start++);
            break;
          }
        }
      }
    } else {
      objhead *info = OBJPTR_TO_OBJHEAD(p);

      set_backtrace_source(p, info->type);

      switch(info->type) {
      case PAGE_TAGGED: 
        {
          unsigned short tag = *(unsigned short*)p;
          ASSERT_TAG(tag);
          GC_ASSERT(mark_table[tag]);
          mark_table[tag](p);
          break;
        }
      case PAGE_ATOMIC: break;
      case PAGE_ARRAY: {
        void **start = p;
        void **end = PPTR(info) + info->size;
        while(start < end) gcMARK(*start++);
        break;
      }
      case PAGE_TARRAY: {
        void **start = p;
        void **end = PPTR(info) + (info->size - INSET_WORDS);
        unsigned short tag = *(unsigned short *)start;
        ASSERT_TAG(tag);
        while(start < end) {
          GC_ASSERT(mark_table[tag]);
          start += mark_table[tag](start);
        }
        break;
      }
      case PAGE_XTAGGED: GC_mark_xtagged(p); break;
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void protect_old_pages ( NewGC gc) [static]

Definition at line 3003 of file newgc.c.

{
  Page_Range *protect_range = gc->protect_range;
  mpage *page;
  int i;

  for(i = 0; i < PAGE_TYPES; i++) {
    if(i != PAGE_ATOMIC)
      for(page = gc->gen1_pages[i]; page; page = page->next)
        if(page->page_type != PAGE_ATOMIC)  {
          if (!page->mprotected) {
            page->mprotected = 1;
            add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 0);
          }
        }
  }

  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    for (page = gc->med_pages[i]; page; page = page->next) {
      if (!page->mprotected) {
        page->mprotected = 1;
        add_protect_page_range(protect_range, page->addr, APAGE_SIZE, APAGE_SIZE, 0);
      }
    }
  }

  flush_protect_page_ranges(protect_range, 0);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void push_ptr ( void ptr) [inline, static]

Definition at line 1475 of file newgc.c.

{
  /* This happens during propoagation if we go past the end of this MarkSegment*/
  if(mark_stack->top == MARK_STACK_END(mark_stack)) {
    /* test to see if we already have another stack page ready */
    if(mark_stack->next) {
      /* we do, so just use it */
      mark_stack = mark_stack->next;
      mark_stack->top = MARK_STACK_START(mark_stack);
    } else {
      /* we don't, so we need to allocate one */
      mark_stack->next = mark_stack_create_frame();
      mark_stack->next->prev = mark_stack;
      mark_stack = mark_stack->next;
    }
  }

  /* at this point, we're guaranteed to be good to push pointers */
  *(mark_stack->top++) = ptr;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void remove_all_gen1_pages_from_pagemap ( NewGC gc) [static]

Definition at line 2499 of file newgc.c.

{
  mpage *work;
  int i;

  GCDEBUG((DEBUGOUTF, "MINOR COLLECTION - PREPPING PAGES - remove all gen1 pages from pagemap.\n"));

  /* if we're not doing a major collection, then we need to remove all the
     pages in gc->gen1_pages[] from the page map */

  for(i = 0; i < PAGE_TYPES; i++) {
    for(work = gc->gen1_pages[i]; work; work = work->next) {
      remove_gen1_page_from_pagemap(gc, work);
    }
  }

  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    for (work = gc->med_pages[i]; work; work = work->next) {
      if (work->generation) {
        remove_gen1_page_from_pagemap(gc, work);
      }
    }
  }

  flush_protect_page_ranges(gc->protect_range, 1);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void remove_gen1_page_from_pagemap ( NewGC gc,
mpage work 
) [static]

Definition at line 2487 of file newgc.c.

{
  if (gc->generations_available && work->back_pointers && work->mprotected) {
    work->mprotected = 0;
    add_protect_page_range(gc->protect_range, work->addr, 
                           (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 
                           APAGE_SIZE, 1);
  }
  pagemap_remove(gc->page_maps, work);
  work->added = 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void repair_finalizer_structs ( NewGC gc) [inline, static]

Definition at line 1302 of file newgc.c.

{
  Fnl *fnl;

  /* repair the base parts of the list */
  gcFIXUP(gc->finalizers); gcFIXUP(gc->run_queue);
  /* then repair the stuff inside them */
  for(fnl = gc->finalizers; fnl; fnl = fnl->next) {
    gcFIXUP(fnl->data);
    gcFIXUP(fnl->p);
    gcFIXUP(fnl->next);
  }
  for(fnl = gc->run_queue; fnl; fnl = fnl->next) {
    gcFIXUP(fnl->data);
    gcFIXUP(fnl->p);
    gcFIXUP(fnl->next);
  }
}

Here is the caller graph for this function:

static void repair_heap ( NewGC gc) [static]

Definition at line 2717 of file newgc.c.

{
  mpage *page;
  int i;
  Fixup_Proc *fixup_table = gc->fixup_table;

  for(i = 0; i < PAGE_TYPES; i++) {
    for(page = gc->gen1_pages[i]; page; page = page->next) {
      if(page->marked_on) {
        page->has_new = 0;
        /* these are guaranteed not to be protected */
        if(page->size_class)  {
          /* since we get here via gen1_pages, it's a big page */
          void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
          void **end = PAGE_END_VSS(page);

          GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
                   page, start));
          page->size_class = 2; /* remove the mark */
          switch(page->page_type) {
          case PAGE_TAGGED: 
            fixup_table[*(unsigned short*)start](start); 
            break;
          case PAGE_ATOMIC: break;
          case PAGE_ARRAY: 
            while(start < end) gcFIXUP(*(start++)); 
            break;
          case PAGE_XTAGGED: 
            GC_fixup_xtagged(start); 
            break;
          case PAGE_TARRAY: {
            unsigned short tag = *(unsigned short *)start;
            ASSERT_TAG(tag);
            end -= INSET_WORDS;
            while(start < end) start += fixup_table[tag](start);
            break;
          }
          }
        } else {
          void **start = PPTR(NUM(page->addr) + page->previous_size);
          void **end = PAGE_END_VSS(page);

          GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
                page, start));
          switch(page->page_type) {
            case PAGE_TAGGED: 
              while(start < end) {
                objhead *info = (objhead *)start;

                if(info->mark) {
                  void *obj_start = OBJHEAD_TO_OBJPTR(start);
                  unsigned short tag = *(unsigned short *)obj_start;
                  ASSERT_TAG(tag);
                  info->mark = 0;
                  fixup_table[tag](obj_start);
                } else {
                  info->dead = 1;
                }
                start += info->size;
              }
              break;
            case PAGE_ATOMIC:
              while(start < end) {
                objhead *info = (objhead *)start;
                if(info->mark) {
                  info->mark = 0;
                } else info->dead = 1;
                start += info->size;
              }
              break;
            case PAGE_ARRAY: 
              while(start < end) {
                objhead *info = (objhead *)start;
                size_t size = info->size;
                if(info->mark) {
                  void **tempend = PPTR(info) + info->size;
                  start = OBJHEAD_TO_OBJPTR(start);
                  while(start < tempend) gcFIXUP(*start++);
                  info->mark = 0;
                } else { 
                  info->dead = 1;
                  start += size;
                }
              }
              break;
            case PAGE_TARRAY:
              while(start < end) {
                objhead *info = (objhead *)start;
                size_t size = info->size;
                if(info->mark) {
                  void **tempend = PPTR(info) + (info->size - INSET_WORDS);
                  unsigned short tag;
                  start = OBJHEAD_TO_OBJPTR(start);
                  tag = *(unsigned short*)start;
                  ASSERT_TAG(tag);
                  while(start < tempend)
                    start += fixup_table[tag](start);
                  info->mark = 0;
                  start = PPTR(info) + size;
                } else {
                  info->dead = 1;
                  start += size;
                }
              }
              break;
            case PAGE_XTAGGED:
              while(start < end) {
                objhead *info = (objhead *)start;
                if(info->mark) {
                  GC_fixup_xtagged(OBJHEAD_TO_OBJPTR(start));
                  info->mark = 0;
                } else info->dead = 1;
                start += info->size;
              }
          }
        }
      } else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page));
    }
  }

  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    for (page = gc->med_pages[i]; page; page = page->next) {
      if (page->marked_on) {
        void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
        void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
        
        while(start <= end) {
          objhead *info = (objhead *)start;
          if(info->mark) {
            switch(info->type) {
            case PAGE_ARRAY:
              {
                void **tempend = PPTR(info) + info->size;
                start = OBJHEAD_TO_OBJPTR(start);
                while(start < tempend) gcFIXUP(*start++);
              }
              break;
            case PAGE_TAGGED:
              {
                void *obj_start = OBJHEAD_TO_OBJPTR(start);
                unsigned short tag = *(unsigned short *)obj_start;
                ASSERT_TAG(tag);
                fixup_table[tag](obj_start);
                start += info->size;
              }
              break;
            }
            info->mark = 0;
          } else {
            info->dead = 1;
            start += info->size;
          }
        }
      }
    }
  }
}

Here is the caller graph for this function:

static void repair_roots ( NewGC gc) [inline, static]

Definition at line 1265 of file newgc.c.

Here is the caller graph for this function:

static void repair_weak_finalizer_structs ( NewGC gc) [inline, static]

Definition at line 1392 of file newgc.c.

{
  Weak_Finalizer *work;
  Weak_Finalizer *prev;

  gcFIXUP(gc->weak_finalizers);
  work = gc->weak_finalizers; prev = NULL;
  while(work) {
    gcFIXUP(work->next);
    if(!marked(gc, work->p)) {
      if(prev) prev->next = work->next;
      if(!prev) gc->weak_finalizers = work->next;
      work = GC_resolve(work->next);
    } else {
      gcFIXUP(work->p);
      prev = work;
      work = work->next;
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void reset_gen1_page ( NewGC gc,
mpage work 
) [static]

Definition at line 2449 of file newgc.c.

{
  if (gc->generations_available && work->mprotected) {
    work->mprotected = 0;
    add_protect_page_range(gc->protect_range, work->addr, 
                           (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 
                           APAGE_SIZE, 1);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 2459 of file newgc.c.

{
  mpage *work;
  int i;

  GCDEBUG((DEBUGOUTF, "MAJOR COLLECTION - PREPPING PAGES - reset live_size, reset previous_size, unprotect.\n"));
  /* we need to make sure that previous_size for every page is reset, so
     we don't accidentally screw up the mark routine */

  for(i = 0; i < PAGE_TYPES; i++) {
    for(work = gc->gen1_pages[i]; work; work = work->next) {
      reset_gen1_page(gc, work);
      work->live_size = 0;
      work->previous_size = PREFIX_SIZE;
    }
  }

  for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
    for (work = gc->med_pages[i]; work; work = work->next) {
      if (work->generation) {
        reset_gen1_page(gc, work);
      }
    }
  }

  flush_protect_page_ranges(gc->protect_range, 1);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void reset_nursery ( NewGC gc) [inline, static]

Definition at line 969 of file newgc.c.

{
  unsigned long new_gen0_size; 
  new_gen0_size = NUM((GEN0_SIZE_FACTOR * (float)gc->memory_in_use) + GEN0_SIZE_ADDITION);
  if(new_gen0_size > GEN0_MAX_SIZE)
    new_gen0_size = GEN0_MAX_SIZE;

  resize_gen0(gc, new_gen0_size);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void reset_pointer_stack ( void  ) [inline, static]

Definition at line 1538 of file newgc.c.

{
  /* go to the head of the list */
  for(; mark_stack->prev; mark_stack = mark_stack->prev) {}
  /* reset the stack */
  mark_stack->top = MARK_STACK_START(mark_stack);
}
static void reset_weak_finalizers ( NewGC gc) [inline, static]

Definition at line 1423 of file newgc.c.

{
  Weak_Finalizer *wfnl;

  for(wfnl = GC_resolve(gc->weak_finalizers); wfnl; wfnl = GC_resolve(wfnl->next)) {
    if(marked(gc, wfnl->p)) {
      set_backtrace_source(wfnl, BT_WEAKLINK);
      gcMARK(wfnl->saved); 
    }
    *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset) = wfnl->saved;
    wfnl->saved = NULL;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void resize_gen0 ( NewGC gc,
unsigned long  new_size 
) [inline, static]

Definition at line 917 of file newgc.c.

{
  mpage *work = gc->gen0.pages;
  mpage *prev = NULL;
  unsigned long alloced_size = 0;

  /* first, make sure the big pages pointer is clean */
  gc->gen0.big_pages = NULL; 

  /* reset any parts of gen0 we're keeping */
  while(work && (alloced_size < new_size)) {
    alloced_size += GEN0_PAGE_SIZE;
    work->size = PREFIX_SIZE;
    prev = work;
    work = work->next;
  }

  /* if we're short, add more */
  while(alloced_size < new_size) {
    mpage *newpage = gen0_create_new_mpage(gc);

    if(prev)
      prev->next = newpage;
    else gc->gen0.pages = newpage;
    prev = newpage;

    alloced_size += GEN0_PAGE_SIZE;
  }

  /* deallocate any parts left over */
  if (work) {
    prev->next = NULL;

    /* remove the excess pages */
    while(work) {
      mpage *next = work->next;
      gen0_free_mpage(gc, work);
      work = next;
    }
  }

  /* we're going to allocate onto the first page now */
  gc->gen0.curr_alloc_page = gc->gen0.pages;
  GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
  ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
  GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;

  /* set the two size variables */
  gc->gen0.max_size = alloced_size;
  gc->gen0.current_size = 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static size_t round_to_apage_size ( size_t  sizeb) [static]

Definition at line 457 of file newgc.c.

{  
  sizeb += APAGE_SIZE - 1;
  sizeb -= sizeb & (APAGE_SIZE - 1);
  return sizeb;
}

Here is the caller graph for this function:

static void zero_weak_finalizers ( NewGC gc) [inline, static]

Definition at line 1413 of file newgc.c.

{
  Weak_Finalizer *wfnl;

  for(wfnl = GC_resolve(gc->weak_finalizers); wfnl; wfnl = GC_resolve(wfnl->next)) {
    wfnl->saved = *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset);
    *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset) = NULL;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:


Variable Documentation

THREAD_LOCAL NewGC* GC [static]

Definition at line 89 of file newgc.c.

Definition at line 156 of file newgc.c.

Definition at line 452 of file newgc.c.

Definition at line 451 of file newgc.c.

Definition at line 155 of file newgc.c.

Definition at line 153 of file newgc.c.

Definition at line 154 of file newgc.c.

Definition at line 1186 of file newgc.c.

Definition at line 1458 of file newgc.c.

const char* type_name[PAGE_TYPES] [static]
Initial value:
 { 
  "tagged", 
  "atomic", 
  "array",
  "tagged array", 
  "xtagged",
  "big" 
}

Definition at line 73 of file newgc.c.

const char* zero_sized[4] [static]

Definition at line 455 of file newgc.c.