Back to index

plt-scheme  4.2.1
newgc.c
Go to the documentation of this file.
00001 /* A new accouting precise GC for MzScheme
00002    Copyright (C) 2001, 2002 Matthew Flatt and Adam Wick
00003    All rights reserved.
00004 
00005    Please see full copyright in the documentation
00006    Search for "FIXME" for known improvement points 
00007 
00008    IF YOU'RE NOT ADAM (AND PROBABLY IF YOU ARE) READ THIS FIRST:
00009 
00010    This is now a hybrid copying/mark-compact collector. The nursery
00011    (generation 0) is copied into the old generation (generation 1),
00012    but the old generation compacts. This yields a nice combination
00013    of performance, scalability and memory efficiency.
00014 
00015    The following page map invariants are required:
00016 
00017    Outside of collection, only pages in the older generation should
00018    be in the gc->page_maps.
00019 
00020    During the mark phase of collection, only pages which contain
00021    objects which may be marked should be in the page map. This means
00022    that during minor collections, only pages in the nursery should
00023    be in the map.
00024 
00025    During the rest of collection, only pages which contain the past
00026    locations of moved data should be in the page map. This means only
00027    the nursery and pages being compacted.
00028 */
00029 
00030 #define MZ_PRECISE_GC 1 /* required for mz includes to work right */
00031 #include <stdlib.h>
00032 #include <stdio.h>
00033 #include <string.h>
00034 #include <assert.h>
00035 #include "platforms.h"
00036 #include "gc2.h"
00037 #include "gc2_dump.h"
00038 
00039 
00040 /* the number of tags to use for tagged objects */
00041 #define NUMBER_OF_TAGS 512
00042 
00043 #ifdef SIXTY_FOUR_BIT_INTEGERS
00044 #define PAGEMAP64_LEVEL1_SIZE (1 << 16)
00045 #define PAGEMAP64_LEVEL2_SIZE (1 << 16)
00046 #define PAGEMAP64_LEVEL3_SIZE (1 << (32 - LOG_APAGE_SIZE))
00047 #define PAGEMAP64_LEVEL1_BITS(p) (((unsigned long)(p)) >> 48)
00048 #define PAGEMAP64_LEVEL2_BITS(p) ((((unsigned long)(p)) >> 32) & ((PAGEMAP64_LEVEL2_SIZE) - 1))
00049 #define PAGEMAP64_LEVEL3_BITS(p) ((((unsigned long)(p)) >> LOG_APAGE_SIZE) & ((PAGEMAP64_LEVEL3_SIZE) - 1))
00050 #else
00051 #define PAGEMAP32_SIZE (1 << (32 - LOG_APAGE_SIZE))
00052 #define PAGEMAP32_BITS(x) (NUM(x) >> LOG_APAGE_SIZE)
00053 #endif
00054 
00055 #if 0
00056 # define GC_ASSERT(x) assert(x)
00057 #else
00058 # define GC_ASSERT(x) /* empty */
00059 #endif
00060 
00061 /* the page type constants */
00062 enum {
00063   PAGE_TAGGED   = 0,
00064   PAGE_ATOMIC   = 1,
00065   PAGE_ARRAY    = 2,
00066   PAGE_TARRAY   = 3,
00067   PAGE_XTAGGED  = 4,
00068   PAGE_BIG      = 5,
00069   /* the number of page types. */
00070   PAGE_TYPES    = 6,
00071 };
00072 
00073 static const char *type_name[PAGE_TYPES] = { 
00074   "tagged", 
00075   "atomic", 
00076   "array",
00077   "tagged array", 
00078   "xtagged",
00079   "big" 
00080 };
00081 
00082 
00083 #include "newgc.h"
00084 #ifdef MZ_USE_PLACES
00085 static NewGC *MASTERGC;
00086 static NewGCMasterInfo *MASTERGCINFO;
00087 static THREAD_LOCAL objhead GC_objhead_template;
00088 #endif
00089 static THREAD_LOCAL NewGC *GC;
00090 #define GCTYPE NewGC
00091 #define GC_get_GC() (GC)
00092 #define GC_set_GC(gc) (GC = gc)
00093 
00094 #ifdef MZ_USE_PLACES
00095 inline static int is_master_gc(NewGC *gc) {
00096   return (MASTERGC == gc);
00097 }
00098 #endif
00099 
00100 #include "msgprint.c"
00101 
00102 /*****************************************************************************/
00103 /* Collector selection. Change the definitions of these to set or unset the  */
00104 /* particular collector you want.                                            */
00105 /*****************************************************************************/
00106 
00107 /* This turns on automatic memory accounting */
00108 /* #define NEWGC_BTC_ACCOUNT */
00109 /* #undef NEWGC_BTC_ACCOUNT */
00110 
00111 /* This turns on memory tracing */
00112 /* #define NEWGC_MEMORY_TRACE */
00113 
00114 /* This turns on support for heap debugging (FIXME: NOT IMPLEMENTED YET) */
00115 /* #define NEWGC_HEAP_DEBUGGING */
00116 
00117 /* This turns on some internal debugging logs. Don't turn this on unless you
00118    don't care about performance and you're hacking the collector */
00119 /* #define NEWGC_INTERNAL_DEBUGGING */
00120 
00121 /* The initial size of generation 0. This will grow and shrink a bit as time
00122    goes on */
00123 #define GEN0_INITIAL_SIZE (1 * 1024 * 1024)
00124 #define GEN0_SIZE_FACTOR 0.5
00125 #define GEN0_SIZE_ADDITION (512 * 1024)
00126 #define GEN0_MAX_SIZE (32 * 1024 * 1024)
00127 #define GEN0_PAGE_SIZE (1 * 1024 * 1024)
00128 
00129 /* This is the log base 2 of the size of one word, given in bytes */
00130 #ifdef SIXTY_FOUR_BIT_INTEGERS
00131 # define LOG_WORD_SIZE 3
00132 #else
00133 # define LOG_WORD_SIZE 2
00134 #endif
00135 
00136 
00137 /* the size of a page we use for the internal mark stack */
00138 #define STACK_PART_SIZE (1 * 1024 * 1024)
00139 
00140 
00141 /* # define LOG_APAGE_SIZE ... see gc2_obj.h */
00142 /* These are computed from the previous settings. You shouldn't mess with 
00143    them */
00144 #define PTR(x) ((void*)(x))
00145 #define PPTR(x) ((void**)(x))
00146 #define NUM(x) ((unsigned long)(x))
00147 #define WORD_SIZE (1 << LOG_WORD_SIZE)
00148 #define WORD_BITS (8 * WORD_SIZE)
00149 #define APAGE_SIZE (1 << LOG_APAGE_SIZE)
00150 #define GENERATIONS 1
00151 
00152 /* the externals */
00153 void (*GC_out_of_memory)(void);
00154 void (*GC_report_out_of_memory)(void);
00155 void (*GC_mark_xtagged)(void *obj);
00156 void (*GC_fixup_xtagged)(void *obj);
00157 
00158 GC_collect_start_callback_Proc GC_set_collect_start_callback(GC_collect_start_callback_Proc func) {
00159   NewGC *gc = GC_get_GC();
00160   GC_collect_start_callback_Proc old;
00161   old = gc->GC_collect_start_callback;
00162   gc->GC_collect_start_callback = func;
00163   return old;
00164 }
00165 GC_collect_end_callback_Proc GC_set_collect_end_callback(GC_collect_end_callback_Proc func) {
00166   NewGC *gc = GC_get_GC();
00167   GC_collect_end_callback_Proc old;
00168   old = gc->GC_collect_end_callback;
00169   gc->GC_collect_end_callback = func;
00170   return old;
00171 }
00172 void GC_set_collect_inform_callback(void (*func)(int major_gc, long pre_used, long post_used)) {
00173   NewGC *gc = GC_get_GC();
00174   gc->GC_collect_inform_callback = func;
00175 }
00176 
00177 
00178 #include "my_qsort.c"
00179 
00180 /*****************************************************************************/
00181 /* OS-Level Memory Management Routines                                       */
00182 /*****************************************************************************/
00183 static void garbage_collect(NewGC*, int);
00184 
00185 static void out_of_memory()
00186 {
00187   if (GC_report_out_of_memory)
00188     GC_report_out_of_memory();
00189   GCPRINT(GCOUTF, "The system has run out of memory!\n");
00190   abort();
00191 }
00192 
00193 static void *ofm_malloc(size_t size) {
00194   void *ptr = malloc(size);
00195   if (!ptr) out_of_memory();
00196   return ptr;
00197 }
00198 
00199 static void *ofm_malloc_zero(size_t size) {
00200   void *ptr;
00201   ptr = ofm_malloc(size);
00202   memset(ptr, 0, size);
00203   return ptr;
00204 }
00205 
00206 inline static void check_used_against_max(NewGC *gc, size_t len) 
00207 {
00208   gc->used_pages += (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
00209 
00210   if(gc->in_unsafe_allocation_mode) {
00211     if(gc->used_pages > gc->max_pages_in_heap)
00212       gc->unsafe_allocation_abort(gc);
00213   } else {
00214     if(gc->used_pages > gc->max_pages_for_use) {
00215       garbage_collect(gc, 0); /* hopefully this will free enough space */
00216       if(gc->used_pages > gc->max_pages_for_use) {
00217         garbage_collect(gc, 1); /* hopefully *this* will free enough space */
00218         if(gc->used_pages > gc->max_pages_for_use) {
00219           /* too much memory allocated. 
00220            * Inform the thunk and then die semi-gracefully */
00221           if(GC_out_of_memory)
00222             GC_out_of_memory();
00223           out_of_memory();
00224         }
00225       }
00226     }
00227   }
00228 }
00229 
00230 #include "page_range.c"
00231 
00232 #include "vm.c"
00233 
00234 #include "protect_range.c"
00235 
00236 static void *malloc_pages(NewGC *gc, size_t len, size_t alignment)
00237 {
00238   void *ptr;
00239   check_used_against_max(gc, len);
00240   ptr = vm_malloc_pages(gc->vm, len, alignment, 0);
00241   if (!ptr) out_of_memory();
00242   return ptr;
00243 }
00244 
00245 static void *malloc_dirty_pages(NewGC *gc, size_t len, size_t alignment)
00246 {
00247   void *ptr;
00248   check_used_against_max(gc, len);
00249   ptr = vm_malloc_pages(gc->vm, len, alignment, 1);
00250   if (!ptr) out_of_memory();
00251   return ptr;
00252 }
00253 
00254 static void free_pages(NewGC *gc, void *p, size_t len)
00255 {
00256   gc->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
00257   vm_free_pages(gc->vm, p, len);
00258 }
00259 
00260 
00261 /*****************************************************************************/
00262 /* Memory Tracing, Part 1                                                    */
00263 /*****************************************************************************/
00264 #ifdef NEWGC_MEMORY_TRACE
00265 # error "memory tracing not implemented in this particular revision \
00266   please revert to early versions of this collector, and then nag \
00267 Adam (awick@cs.utah.edu) to put this stuff back in"
00268 #endif
00269 
00270 int GC_mtrace_new_id(void *f)
00271 {
00272   return 0;
00273 }
00274 
00275 int GC_mtrace_union_current_with(int newval)
00276 {
00277   return 0;
00278 }
00279 
00280 /*****************************************************************************/
00281 /* Page Map Routines                                                         */
00282 /*****************************************************************************/
00283 inline static void free_page_maps(PageMap page_maps1) {
00284 #ifdef SIXTY_FOUR_BIT_INTEGERS
00285   unsigned long i;
00286   unsigned long j;
00287   mpage ***page_maps2;
00288   mpage **page_maps3;
00289 
00290   for (i=0; i<PAGEMAP64_LEVEL1_SIZE; i++) {
00291     page_maps2 = page_maps1[i];
00292     if (page_maps2) {
00293       for (j=0; j<PAGEMAP64_LEVEL2_SIZE; j++) {
00294         page_maps3 = page_maps2[j];
00295         if (page_maps3) {
00296           free(page_maps3);
00297         }
00298       }
00299       free(page_maps2);
00300     }
00301   }
00302   free(page_maps1);
00303 #else
00304   free(page_maps1);
00305 #endif
00306 }
00307 
00308 /* the page map makes a nice mapping from addresses to pages, allowing
00309    fairly fast lookup. this is useful. */
00310 inline static void pagemap_set(PageMap page_maps1, void *p, mpage *value) {
00311 #ifdef SIXTY_FOUR_BIT_INTEGERS
00312   unsigned long pos;
00313   mpage ***page_maps2;
00314   mpage **page_maps3;
00315 
00316   pos = PAGEMAP64_LEVEL1_BITS(p);
00317   page_maps2 = page_maps1[pos];
00318   if (!page_maps2) {
00319     page_maps2 = (mpage ***)calloc(PAGEMAP64_LEVEL2_SIZE, sizeof(mpage **));
00320     page_maps1[pos] = page_maps2;
00321   }
00322   pos = PAGEMAP64_LEVEL2_BITS(p);
00323   page_maps3 = page_maps2[pos];
00324   if (!page_maps3) {
00325     page_maps3 = (mpage **)calloc(PAGEMAP64_LEVEL3_SIZE, sizeof(mpage *));
00326     page_maps2[pos] = page_maps3;
00327   }
00328   page_maps3[PAGEMAP64_LEVEL3_BITS(p)] = value;
00329 #else
00330   page_maps1[PAGEMAP32_BITS(p)] = value;
00331 #endif
00332 }
00333 
00334 inline static mpage *pagemap_find_page(PageMap page_maps1, void *p) {
00335 #ifdef SIXTY_FOUR_BIT_INTEGERS
00336   mpage ***page_maps2;
00337   mpage **page_maps3;
00338 
00339   page_maps2 = page_maps1[PAGEMAP64_LEVEL1_BITS(p)];
00340   if (!page_maps2) return NULL;
00341   page_maps3 = page_maps2[PAGEMAP64_LEVEL2_BITS(p)];
00342   if (!page_maps3) return NULL;
00343   return page_maps3[PAGEMAP64_LEVEL3_BITS(p)];
00344 #else
00345   return page_maps1[PAGEMAP32_BITS(p)];
00346 #endif
00347 }
00348 
00349 /* These procedures modify or use the page map. The page map provides us very
00350    fast mappings from pointers to the page the reside on, if any. The page 
00351    map itself serves two important purposes:
00352 
00353    Between collections, it maps pointers to write-protected pages, so that 
00354    the write-barrier can identify what page a write has happened to and
00355    mark it as potentially containing pointers from gen 1 to gen 0. 
00356 
00357    During collections, it maps pointers to "from" pages. */
00358 
00359 /* pagemap_modify_with_size could be optimized more for the 64 bit case
00360    repeatedly calling pagemap_set for the 64 bit case is not optimal */
00361 inline static void pagemap_modify_with_size(PageMap pagemap, mpage *page, long size, mpage *val) {
00362   void *p = page->addr;
00363 
00364   while(size > 0) {
00365     pagemap_set(pagemap, p, val);
00366     size -= APAGE_SIZE;
00367     p = (char *)p + APAGE_SIZE;
00368   }
00369 }
00370 
00371 inline static void pagemap_modify(PageMap pagemap, mpage *page, mpage *val) {
00372   long size = (page->size_class > 1) ? page->size : APAGE_SIZE;
00373   pagemap_modify_with_size(pagemap, page, size, val);
00374 }
00375 
00376 inline static void pagemap_add(PageMap pagemap, mpage *page)
00377 {
00378   pagemap_modify(pagemap, page, page);
00379 }
00380 
00381 inline static void pagemap_add_with_size(PageMap pagemap, mpage *page, long size)
00382 {
00383   pagemap_modify_with_size(pagemap, page, size, page);
00384 }
00385 
00386 inline static void pagemap_remove(PageMap pagemap, mpage *page)
00387 {
00388   pagemap_modify(pagemap, page, NULL);
00389 }
00390 
00391 inline static void pagemap_remove_with_size(PageMap pagemap, mpage *page, long size)
00392 {
00393   pagemap_modify_with_size(pagemap, page, size, NULL);
00394 }
00395 
00396 int GC_is_allocated(void *p)
00397 {
00398   NewGC *gc = GC_get_GC();
00399   return !!pagemap_find_page(gc->page_maps, p);
00400 }
00401 
00402 
00403 /*****************************************************************************/
00404 /* Allocation                                                                */
00405 /*****************************************************************************/
00406 
00407 /* struct objhead is defined in gc2_obj.h */
00408 /* Make sure alloction starts out double-word aligned. 
00409    The header on each allocated object is one word, so to make
00410    the content double-word aligned, we deeper. */
00411 #ifdef GC_ALIGN_SIXTEEN
00412 # ifdef SIXTY_FOUR_BIT_INTEGERS
00413 #  define PREFIX_WSIZE 1
00414 # else
00415 #  define PREFIX_WSIZE 3
00416 # endif
00417 #elif defined(GC_ALIGN_EIGHT) 
00418 # if defined(SIXTY_FOUR_BIT_INTEGERS)
00419 #  define PREFIX_WSIZE 0
00420 # else
00421 #  define PREFIX_WSIZE 1
00422 # endif
00423 #else /* GC_ALIGHT_FOUR or byte aligned */
00424 # define PREFIX_WSIZE 0
00425 #endif
00426 #define PREFIX_SIZE (PREFIX_WSIZE * WORD_SIZE)
00427 
00428 #define MED_OBJHEAD(p, bytesize) ((objhead *)(PTR(((((NUM(p) & (APAGE_SIZE - 1)) - PREFIX_SIZE) / bytesize) * bytesize) \
00429                                                          + (NUM(p) & (~(APAGE_SIZE - 1))) + PREFIX_SIZE)))
00430 
00431 /* this is the maximum size of an object that will fit on a page, in words.
00432    the "- 3" is basically used as a fudge/safety factor, and has no real, 
00433    important meaning. */
00434 #define MAX_OBJECT_SIZEW (gcBYTES_TO_WORDS(APAGE_SIZE) - PREFIX_WSIZE - 3)
00435 #define MAX_OBJECT_SIZE  (gcWORDS_TO_BYTES(MAX_OBJECT_SIZEW))
00436 
00437 #define ASSERT_TAG(tag) GC_ASSERT((tag) >= 0 && (tag) <= NUMBER_OF_TAGS)
00438 #define ASSERT_VALID_OBJPTR(objptr) GC_ASSERT(!((long)(objptr) & (0x3)))
00439 
00440 /* Generation 0. Generation 0 is a set of very large pages in a list(gc->gen0.pages),
00441    plus a set of smaller bigpages in a separate list(gc->gen0.big_pages). 
00442    The former is purely an optimization, saving us from constantly deallocating 
00443    and allocating the entire nursery on every GC. The latter is useful because it simplifies
00444    the allocation process (which is also a speed hack, come to think of it) 
00445 
00446    gc->gen0.pages             is the list of very large nursery pages.
00447    gc->gen0.curr_alloc_page   is the member of this list we are currently allocating on.
00448    The size count helps us trigger collection quickly when we're running out of space; see
00449    the test in allocate_big. 
00450 */
00451 THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr = 0;
00452 THREAD_LOCAL unsigned long GC_gen0_alloc_page_end = 0;
00453 
00454 /* miscellaneous variables */
00455 static const char *zero_sized[4]; /* all 0-sized allocs get this */
00456 
00457 static size_t round_to_apage_size(size_t sizeb)
00458 {  
00459   sizeb += APAGE_SIZE - 1;
00460   sizeb -= sizeb & (APAGE_SIZE - 1);
00461   return sizeb;
00462 }
00463 
00464 static mpage *malloc_mpage()
00465 {
00466   mpage *page;
00467   page = ofm_malloc_zero(sizeof(mpage));
00468   return page;
00469 }
00470 
00471 static void free_mpage(mpage *page)
00472 {
00473   free(page);
00474 }
00475 
00476 #ifdef NEWGC_BTC_ACCOUNT
00477 static inline int BTC_single_allocation_limit(NewGC *gc, size_t sizeb);
00478 #endif
00479 
00480 /* ALIGN_BYTES_SIZE DOES NOT assume that the argument is already word-aligned. */
00481 /* INSET_WORDS is how many words in a tagged array can be padding, plus one; it
00482    must also be no more than the minimum size of a tagged element. */
00483 #ifdef GC_ALIGN_SIXTEEN
00484 # ifdef SIXTY_FOUR_BIT_INTEGERS
00485 #  define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
00486 #  define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))
00487 #  define INSET_WORDS 1
00488 # else
00489 #  define ALIGN_SIZE(sizew) (((sizew) & 0x3) ? ((sizew) + (4 - ((sizew) & 0x3))) : (sizew))
00490 #  define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((4 * WORD_SIZE) - 1)) ? ((sizeb) + ((4 * WORD_SIZE) - ((sizeb) & ((4 * WORD_SIZE) - 1)))) : (sizeb))
00491 #  define INSET_WORDS 3
00492 # endif
00493 #else
00494 # ifdef GC_ALIGN_EIGHT
00495 #  ifdef SIXTY_FOUR_BIT_INTEGERS
00496 #   define ALIGN_SIZE(sizew) (sizew)
00497 #   define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & (WORD_SIZE -1)) ? ((sizeb) + (WORD_SIZE - ((sizeb) & (WORD_SIZE - 1)))) : (sizeb))
00498 #   define INSET_WORDS 0
00499 #  else
00500 #   define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
00501 #   define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))
00502 #   define INSET_WORDS 1
00503 #  endif
00504 # else
00505 #  define ALIGN_SIZE(sizew) (sizew)
00506 #  define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & (3)) ? ((sizeb) + (4 - ((sizeb) & (3)))) : (sizeb))
00507 #  define INSET_WORDS 0
00508 # endif
00509 #endif
00510 
00511 #define COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(s) (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE))
00512 #define COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(s) (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE + PREFIX_SIZE))
00513 #define BIG_PAGE_TO_OBJECT(big_page) ((void *) (((char *)((big_page)->addr)) + OBJHEAD_SIZE + PREFIX_SIZE))
00514 #define BIG_PAGE_TO_OBJHEAD(big_page) ((objhead*) (((char *)((big_page)->addr)) + PREFIX_SIZE))
00515 #define PAGE_TO_OBJHEAD(page) ((objhead*) (((char *)((page)->addr)) + PREFIX_SIZE))
00516 #define PAGE_START_VSS(page) ((void**) (((char *)((page)->addr)) + PREFIX_SIZE))
00517 #define PAGE_END_VSS(page) ((void**) (((char *)((page)->addr)) + ((page)->size)))
00518 #define MED_OBJHEAD_TO_OBJECT(ptr, page_size) ((void*) (((char *)MED_OBJHEAD((ptr), (page_size))) + OBJHEAD_SIZE));
00519 
00520 /* the core allocation functions */
00521 static void *allocate_big(const size_t request_size_bytes, int type)
00522 {
00523   NewGC *gc = GC_get_GC();
00524   mpage *bpage;
00525   size_t allocate_size;
00526 
00527 #ifdef NEWGC_BTC_ACCOUNT
00528   if(GC_out_of_memory) {
00529     if (BTC_single_allocation_limit(gc, request_size_bytes)) {
00530       /* We're allowed to fail. Check for allocations that exceed a single-time
00531          limit. Otherwise, the limit doesn't work as intended, because
00532          a program can allocate a large block that nearly exhausts memory,
00533          and then a subsequent allocation can fail. As long as the limit
00534          is much smaller than the actual available memory, and as long as
00535          GC_out_of_memory protects any user-requested allocation whose size
00536          is independent of any existing object, then we can enforce the limit. */
00537       GC_out_of_memory();
00538     }
00539   }
00540 #endif
00541 
00542   /* the actual size of this is the size, ceilinged to the next largest word,
00543      plus one word for the object header.
00544      This last serves many purposes, including making sure the object is 
00545      aligned for Sparcs. */
00546   allocate_size = COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(request_size_bytes);
00547 
00548   if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
00549     if (!gc->dumping_avoid_collection)
00550       garbage_collect(gc, 0);
00551   }
00552   gc->gen0.current_size += allocate_size;
00553 
00554   /* We not only need APAGE_SIZE alignment, we 
00555      need everything consisently mapped within an APAGE_SIZE
00556      segment. So round up. */
00557   bpage = malloc_mpage();
00558   if (type == PAGE_ATOMIC)
00559     bpage->addr = malloc_dirty_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
00560   else
00561     bpage->addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
00562   bpage->size = allocate_size;
00563   bpage->size_class = 2;
00564   bpage->page_type = type;
00565 
00566 #ifdef MZ_USE_PLACES
00567     memcpy(BIG_PAGE_TO_OBJHEAD(bpage), &GC_objhead_template, sizeof(objhead));
00568 #endif
00569 
00570   /* push new bpage onto GC->gen0.big_pages */
00571   bpage->next = gc->gen0.big_pages;
00572   if(bpage->next) bpage->next->prev = bpage;
00573   gc->gen0.big_pages = bpage;
00574   pagemap_add(gc->page_maps, bpage);
00575 
00576   {
00577     void * objptr = BIG_PAGE_TO_OBJECT(bpage);
00578     ASSERT_VALID_OBJPTR(objptr);
00579     return objptr;
00580   }
00581 }
00582 
00583 static void *allocate_medium(size_t sizeb, int type)
00584 {
00585   NewGC *gc;
00586   int sz = 8, pos = 0, n;
00587   void *addr, *p;
00588   mpage *page;
00589   objhead *info;
00590 
00591   if (sizeb > (1 << (LOG_APAGE_SIZE - 1)))
00592     return allocate_big(sizeb, type);
00593  
00594   while (sz < sizeb) {
00595     sz <<= 1;
00596     pos++;
00597   }
00598 
00599   sz += WORD_SIZE; /* add trailing word, in case pointer is to end */
00600   sz += OBJHEAD_SIZE; /* room for objhead */
00601   sz = ALIGN_BYTES_SIZE(sz);
00602 
00603   gc = GC_get_GC();
00604   while (1) {
00605     page = gc->med_freelist_pages[pos];
00606     if (page) {
00607       n = page->previous_size;
00608       while (n <= (APAGE_SIZE - sz)) {
00609         info = (objhead *)PTR(NUM(page->addr) + n);
00610         if (info->dead) {
00611 #ifdef MZ_USE_PLACES
00612           info->owner = GC_objhead_template.owner;
00613           //memcpy(info, &GC_objhead_template, sizeof(objhead));
00614 #endif
00615           info->dead = 0;
00616           info->type = type;
00617           page->previous_size = (n + sz);
00618           page->live_size += sz;
00619           p = OBJHEAD_TO_OBJPTR(info);
00620           memset(p, 0, sz - OBJHEAD_SIZE);
00621           return p;
00622         }
00623         n += sz;
00624       }
00625       gc->med_freelist_pages[pos] = page->prev;
00626     } else
00627       break;
00628   }
00629 
00630   page = malloc_mpage();
00631   addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE);
00632   page->addr = addr;
00633   page->size = sz;
00634   page->size_class = 1;
00635   page->page_type = PAGE_BIG;
00636   page->previous_size = PREFIX_SIZE;
00637   page->live_size = sz;
00638   
00639   for (n = page->previous_size; (n + sz) <= APAGE_SIZE; n += sz) {
00640     info = (objhead *)PTR(NUM(page->addr) + n);
00641 #ifdef MZ_USE_PLACES
00642     memcpy(info, &GC_objhead_template, sizeof(objhead));
00643 #endif
00644     info->dead = 1;
00645     info->size = gcBYTES_TO_WORDS(sz);
00646   }
00647 
00648   page->next = gc->med_pages[pos];
00649   if (page->next)
00650     page->next->prev = page;
00651   gc->med_pages[pos] = page;
00652   gc->med_freelist_pages[pos] = page;
00653 
00654   pagemap_add(gc->page_maps, page);
00655 
00656   n = page->previous_size;
00657   info = (objhead *)PTR(NUM(page->addr) + n);
00658   info->dead = 0;
00659   info->type = type;
00660 
00661   {
00662     void * objptr = OBJHEAD_TO_OBJPTR(info);
00663     ASSERT_VALID_OBJPTR(objptr);
00664     return objptr;
00665   }
00666 }
00667 
00668 inline static mpage *gen0_create_new_mpage(NewGC *gc) {
00669   mpage *newmpage;
00670 
00671   newmpage = malloc_mpage(gc);
00672   newmpage->addr = malloc_dirty_pages(gc, GEN0_PAGE_SIZE, APAGE_SIZE);
00673   newmpage->size_class = 0;
00674   newmpage->size = PREFIX_SIZE;
00675   pagemap_add_with_size(gc->page_maps, newmpage, GEN0_PAGE_SIZE);
00676 
00677   return newmpage;
00678 }
00679 
00680 inline static void gen0_free_mpage(NewGC *gc, mpage *page) {
00681   pagemap_remove_with_size(gc->page_maps, page, GEN0_PAGE_SIZE);
00682   free_pages(gc, page->addr, GEN0_PAGE_SIZE);
00683   free_mpage(page);
00684 }
00685 
00686 //#define OVERFLOWS_GEN0(ptr) ((ptr) > (NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE))
00687 #define OVERFLOWS_GEN0(ptr) ((ptr) > GC_gen0_alloc_page_end)
00688 
00689 inline static size_t gen0_size_in_use(NewGC *gc) {
00690   return (gc->gen0.current_size + ((GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr)) - PREFIX_SIZE));
00691 }
00692 
00693 #define BYTES_MULTIPLE_OF_WORD_TO_WORDS(sizeb) ((sizeb) >> gcLOG_WORD_SIZE)
00694 
00695 inline static void *allocate(const size_t request_size, const int type)
00696 {
00697   size_t allocate_size;
00698   unsigned long newptr;
00699 
00700   if(request_size == 0) return zero_sized;
00701   
00702   allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
00703   if(allocate_size > MAX_OBJECT_SIZE)  return allocate_big(request_size, type);
00704 
00705   /* ensure that allocation will fit in a gen0 page */
00706   newptr = GC_gen0_alloc_page_ptr + allocate_size;
00707   ASSERT_VALID_OBJPTR(newptr);
00708 
00709   while (OVERFLOWS_GEN0(newptr)) {
00710     NewGC *gc = GC_get_GC();
00711     /* bring page size used up to date */
00712     gc->gen0.curr_alloc_page->size = GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr);
00713     gc->gen0.current_size += gc->gen0.curr_alloc_page->size;
00714 
00715     /* try next nursery page if present */
00716     if(gc->gen0.curr_alloc_page->next) { 
00717       gc->gen0.curr_alloc_page  = gc->gen0.curr_alloc_page->next;
00718       GC_gen0_alloc_page_ptr    = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
00719       ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
00720       GC_gen0_alloc_page_end    = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;
00721     }
00722     /* WARNING: tries to avoid a collection but
00723      * malloc_pages can cause a collection due to check_used_against_max */
00724     else if (gc->dumping_avoid_collection) {
00725       mpage *new_mpage = gen0_create_new_mpage(gc);
00726 
00727       /* push page */
00728       new_mpage->next = gc->gen0.curr_alloc_page;
00729       new_mpage->next->prev = new_mpage;
00730 
00731       gc->gen0.curr_alloc_page  = new_mpage;
00732       GC_gen0_alloc_page_ptr    = NUM(new_mpage->addr);
00733       ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
00734       GC_gen0_alloc_page_end    = NUM(new_mpage->addr) + GEN0_PAGE_SIZE;
00735     }
00736     else {
00737       garbage_collect(gc, 0);
00738     }
00739     newptr = GC_gen0_alloc_page_ptr + allocate_size;
00740     ASSERT_VALID_OBJPTR(newptr);
00741   } 
00742 
00743   /* actual Allocation */
00744   {
00745     objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);
00746 
00747     GC_gen0_alloc_page_ptr = newptr;
00748 
00749     if (type == PAGE_ATOMIC)
00750       memset(info, 0, sizeof(objhead)); /* init objhead */
00751     else
00752       bzero(info, allocate_size);
00753 
00754 #ifdef MZ_USE_PLACES
00755     memcpy(info, &GC_objhead_template, sizeof(objhead));
00756 #endif
00757 
00758     info->type = type;
00759     info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
00760     {
00761       void * objptr = OBJHEAD_TO_OBJPTR(info);
00762       ASSERT_VALID_OBJPTR(objptr);
00763       return objptr;
00764     }
00765   }
00766 }
00767 
00768 
00769 inline static void *fast_malloc_one_small_tagged(size_t request_size, int dirty)
00770 {
00771   unsigned long newptr;
00772   const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
00773 
00774   newptr = GC_gen0_alloc_page_ptr + allocate_size;
00775   ASSERT_VALID_OBJPTR(newptr);
00776 
00777   if(OVERFLOWS_GEN0(newptr)) {
00778     return GC_malloc_one_tagged(request_size);
00779   } else {
00780     objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);
00781 
00782     GC_gen0_alloc_page_ptr = newptr;
00783 
00784     if (dirty)
00785       memset(info, 0, sizeof(objhead)); /* init objhead */
00786     else
00787       bzero(info, allocate_size);
00788 
00789 #ifdef MZ_USE_PLACES
00790     memcpy(info, &GC_objhead_template, sizeof(objhead));
00791 #endif
00792 
00793     info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
00794 
00795     {
00796       void * objptr = OBJHEAD_TO_OBJPTR(info);
00797       ASSERT_VALID_OBJPTR(objptr);
00798       return objptr;
00799     }
00800   }
00801 }
00802 
00803 #define PAIR_SIZE_IN_BYTES ALIGN_BYTES_SIZE(sizeof(Scheme_Simple_Object) + OBJHEAD_SIZE)
00804 
00805 void *GC_malloc_pair(void *car, void *cdr)
00806 {
00807   unsigned long newptr;
00808   void *pair;
00809   const size_t allocate_size = PAIR_SIZE_IN_BYTES;
00810 
00811   newptr = GC_gen0_alloc_page_ptr + allocate_size;
00812   ASSERT_VALID_OBJPTR(newptr);
00813 
00814   if(OVERFLOWS_GEN0(newptr)) {
00815     NewGC *gc = GC_get_GC();
00816     gc->park[0] = car;
00817     gc->park[1] = cdr;
00818     pair = GC_malloc_one_tagged(sizeof(Scheme_Simple_Object));
00819     car = gc->park[0];
00820     cdr = gc->park[1];
00821     gc->park[0] = NULL;
00822     gc->park[1] = NULL;
00823   }
00824   else {
00825     objhead *info = (objhead *) PTR(GC_gen0_alloc_page_ptr);
00826     GC_gen0_alloc_page_ptr = newptr;
00827 
00828 #ifdef MZ_USE_PLACES
00829     memcpy(info, &GC_objhead_template, sizeof(objhead));
00830 #else
00831     memset(info, 0, sizeof(objhead)); /* init objhead */
00832 #endif
00833 
00834 
00835     /* info->type = type; */ /* We know that the type field is already 0 */
00836     info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
00837 
00838     pair = OBJHEAD_TO_OBJPTR(info);
00839     ASSERT_VALID_OBJPTR(pair);
00840   }
00841   
00842   /* initialize pair */
00843   {
00844     Scheme_Simple_Object *obj = (Scheme_Simple_Object *) pair;
00845     obj->iso.so.type = scheme_pair_type;
00846     obj->iso.so.keyex = 0; /* init first word of SchemeObject to 0 */
00847     obj->u.pair_val.car = car;
00848     obj->u.pair_val.cdr = cdr;
00849   }
00850 
00851   return pair;
00852 }
00853 
00854 /* the allocation mechanism we present to the outside world */
00855 void *GC_malloc(size_t s)                         { return allocate(s, PAGE_ARRAY); }
00856 void *GC_malloc_one_tagged(size_t s)              { return allocate(s, PAGE_TAGGED); }
00857 void *GC_malloc_one_xtagged(size_t s)             { return allocate(s, PAGE_XTAGGED); }
00858 void *GC_malloc_array_tagged(size_t s)            { return allocate(s, PAGE_TARRAY); }
00859 void *GC_malloc_atomic(size_t s)                  { return allocate(s, PAGE_ATOMIC); }
00860 void *GC_malloc_atomic_uncollectable(size_t s)    { void *p = ofm_malloc_zero(s); return p; }
00861 void *GC_malloc_allow_interior(size_t s)          { return allocate_medium(s, PAGE_ARRAY); }
00862 void *GC_malloc_atomic_allow_interior(size_t s)   { return allocate_big(s, PAGE_ATOMIC); }
00863 void *GC_malloc_tagged_allow_interior(size_t s)   { return allocate_medium(s, PAGE_TAGGED); }
00864 void *GC_malloc_one_small_dirty_tagged(size_t s)  { return fast_malloc_one_small_tagged(s, 1); }
00865 void *GC_malloc_one_small_tagged(size_t s)        { return fast_malloc_one_small_tagged(s, 0); }
00866 void GC_free(void *p) {}
00867 
00868 
00869 long GC_compute_alloc_size(long sizeb)
00870 {
00871   return COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(sizeb);
00872 }
00873 
00874 long GC_initial_word(int request_size)
00875 {
00876   long w = 0;
00877   objhead info;
00878 
00879   const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
00880 
00881 #ifdef MZ_USE_PLACES
00882   memcpy(&info, &GC_objhead_template, sizeof(objhead));
00883 #else
00884   memset(&info, 0, sizeof(objhead));
00885 #endif
00886 
00887   info.size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
00888   memcpy(&w, &info, sizeof(objhead));
00889 
00890   return w;
00891 }
00892 
00893 void GC_initial_words(char *buffer, long sizeb)
00894 {
00895   objhead *info = (objhead *)buffer;
00896 
00897   const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(sizeb);
00898 
00899 #ifdef MZ_USE_PLACES
00900   memcpy(info, &GC_objhead_template, sizeof(objhead));
00901 #else
00902   memset(info, 0, sizeof(objhead));
00903 #endif
00904 
00905   info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
00906 }
00907 
00908 long GC_alloc_alignment()
00909 {
00910   return APAGE_SIZE;
00911 }
00912 
00913 long GC_malloc_stays_put_threshold() { return gcWORDS_TO_BYTES(MAX_OBJECT_SIZEW); }
00914 
00915 /* this function resizes generation 0 to the closest it can get (erring high)
00916    to the size we've computed as ideal */
00917 inline static void resize_gen0(NewGC *gc, unsigned long new_size)
00918 {
00919   mpage *work = gc->gen0.pages;
00920   mpage *prev = NULL;
00921   unsigned long alloced_size = 0;
00922 
00923   /* first, make sure the big pages pointer is clean */
00924   gc->gen0.big_pages = NULL; 
00925 
00926   /* reset any parts of gen0 we're keeping */
00927   while(work && (alloced_size < new_size)) {
00928     alloced_size += GEN0_PAGE_SIZE;
00929     work->size = PREFIX_SIZE;
00930     prev = work;
00931     work = work->next;
00932   }
00933 
00934   /* if we're short, add more */
00935   while(alloced_size < new_size) {
00936     mpage *newpage = gen0_create_new_mpage(gc);
00937 
00938     if(prev)
00939       prev->next = newpage;
00940     else gc->gen0.pages = newpage;
00941     prev = newpage;
00942 
00943     alloced_size += GEN0_PAGE_SIZE;
00944   }
00945 
00946   /* deallocate any parts left over */
00947   if (work) {
00948     prev->next = NULL;
00949 
00950     /* remove the excess pages */
00951     while(work) {
00952       mpage *next = work->next;
00953       gen0_free_mpage(gc, work);
00954       work = next;
00955     }
00956   }
00957 
00958   /* we're going to allocate onto the first page now */
00959   gc->gen0.curr_alloc_page = gc->gen0.pages;
00960   GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
00961   ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
00962   GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;
00963 
00964   /* set the two size variables */
00965   gc->gen0.max_size = alloced_size;
00966   gc->gen0.current_size = 0;
00967 }
00968 
00969 inline static void reset_nursery(NewGC *gc)
00970 {
00971   unsigned long new_gen0_size; 
00972   new_gen0_size = NUM((GEN0_SIZE_FACTOR * (float)gc->memory_in_use) + GEN0_SIZE_ADDITION);
00973   if(new_gen0_size > GEN0_MAX_SIZE)
00974     new_gen0_size = GEN0_MAX_SIZE;
00975 
00976   resize_gen0(gc, new_gen0_size);
00977 }
00978 
00979 /* This procedure fundamentally returns true if a pointer is marked, and
00980    false if it isn't. This function assumes that you're talking, at this
00981    point, purely about the mark field of the object. It ignores things like
00982    the object not being one of our GC heap objects, being in a higher gen
00983    than we're collecting, not being a pointer at all, etc. */
00984 inline static int marked(NewGC *gc, void *p)
00985 {
00986   mpage *page;
00987 
00988   if(!p) return 0;
00989   if(!(page = pagemap_find_page(gc->page_maps, p))) return 1;
00990   if (page->size_class) {
00991     if (page->size_class > 1) {
00992       return (page->size_class > 2);
00993     }
00994   } else {
00995     if((NUM(page->addr) + page->previous_size) > NUM(p)) 
00996       return 1;
00997   }
00998   return OBJPTR_TO_OBJHEAD(p)->mark;
00999 }
01000 
01001 /*****************************************************************************/
01002 /* Internal Debugging Routines                                               */
01003 /*****************************************************************************/
01004 #ifdef NEWGC_INTERNAL_DEBUGGING
01005 static FILE *dump;
01006 static int collections = 0;
01007 
01008 static void init_debug_file(void) 
01009 {
01010   /*
01011     char filename_buf[20];
01012     snprintf(filename_buf, 20, "gclog%d%d", (collections / 10), (collections % 10));
01013     dump = fopen(filename_buf, "a");
01014     collections += 1;
01015   */
01016 
01017   char *filename = ofm_malloc(8 * sizeof(char));
01018 
01019   filename[0] = 'g'; filename[1] = 'c'; filename[2] = 'l';
01020   filename[3] = 'o'; filename[4] = 'g';
01021   filename[5] = '0' + (collections / 10);
01022   filename[6] = '0' + (collections % 10);
01023   filename[7] = 0;
01024 
01025   dump = fopen(filename, "a");
01026   collections += 1;
01027 }
01028 
01029 static void close_debug_file(void)
01030 {
01031   fclose(dump);
01032 }
01033 
01034 static void dump_region(void **start, void **end)
01035 {
01036   while(start < end) {
01037     fprintf(dump, "%.8lx: %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n", 
01038             NUM(start), NUM(*start), NUM(*(start + 1)), NUM(*(start + 2)),
01039             NUM(*(start + 3)), NUM(*(start + 4)), NUM(*(start + 5)), 
01040             NUM(*(start + 6)), NUM(*(start + 7)));
01041     start += 8;
01042   }
01043   fprintf(dump, "\n\n");
01044 }
01045 
01046 static void dump_heap(NewGC *gc)
01047 {
01048   mpage *page;
01049   short i;
01050 
01051   if(collections >= 0) {
01052     for(page = gc->gen0.pages; page; page = page->next) {
01053       fprintf(dump, "Generation 0 Page (%p:%p - %p, size %i):\n", 
01054               page, page->addr, PTR(NUM(page->addr) + GEN0_PAGE_SIZE), page->size);
01055       dump_region(PAGE_START_VSS(page), PAGE_END_VSS(page));
01056     }
01057     for(page = gc->gen0.big_pages; page; page = page->next) {
01058       fprintf(dump, "Page %p:%p (gen %i, type %i, big %i, back %i, size %i)\n",
01059               page, page->addr, page->generation, page->page_type, page->big_page,
01060               page->back_pointers, page->size);
01061       dump_region(PAGE_START_VSS(page), PAGE_END_VSS(page));
01062     }
01063     for(i = 0; i < PAGE_TYPES; i++)
01064       for(page = gc->gen1_pages[i]; page; page = page->next) {
01065         fprintf(dump, "Page %p:%p (gen %i, type %i, big %i, back %i, size %i)\n",
01066                 page, page->addr, page->generation, page->page_type, page->big_page,
01067                 page->back_pointers, page->size);
01068         dump_region(PAGE_START_VSS(page), PAGE_END_VSS(page));
01069       }
01070     fprintf(dump, "STACK:\n");
01071     dump_region((void*)(NUM(&i) & 0xfffffff0), (void*)(get_stack_base() & 0xfffffff0)); 
01072     fflush(dump);
01073   }
01074 }
01075 #endif
01076 
01077 #ifdef NEWGC_INTERNAL_DEBUGGING
01078 # define INIT_DEBUG_FILE() init_debug_file()
01079 # define CLOSE_DEBUG_FILE() close_debug_file()
01080 # define DUMP_HEAP() dump_heap()
01081 # define DEBUGOUTF dump
01082 # define GCDEBUG(args) { GCPRINT args; GCFLUSHOUT(); }
01083 #else
01084 # define INIT_DEBUG_FILE() /* */
01085 # define CLOSE_DEBUG_FILE() /* */
01086 # define DUMP_HEAP() /* */
01087 # define GCDEBUG(args) /* */
01088 #endif
01089 
01090 #define GCWARN(args) { GCPRINT args; GCFLUSHOUT(); }
01091 #define GCERR(args) { GCPRINT args; GCFLUSHOUT(); abort(); }
01092 
01093 /*****************************************************************************/
01094 /* Backtrace                                                                 */
01095 /*****************************************************************************/
01096 
01097 #if MZ_GC_BACKTRACE
01098 
01099 static void backtrace_new_page(NewGC *gc, mpage *page)
01100 {
01101   /* This is a little wastefull for big pages, because we'll
01102      only use the first few words: */
01103   page->backtrace = (void **)malloc_pages(gc, APAGE_SIZE, APAGE_SIZE);
01104 }
01105 
01106 # define backtrace_new_page_if_needed(gc, page) if (!page->backtrace) backtrace_new_page(gc, page)
01107 
01108 static void free_backtrace(mpage *page)
01109 {
01110   if (page->backtrace)
01111     free_pages(GC, page->backtrace, APAGE_SIZE);
01112 }
01113 
01114 static void *bt_source;
01115 static int bt_type;
01116 
01117 static void set_backtrace_source(void *source, int type)
01118 {
01119   bt_source = source;
01120   bt_type = type;
01121 }
01122 
01123 static void record_backtrace(mpage *page, void *ptr)
01124 /* ptr is after objhead */
01125 {
01126   unsigned long delta;
01127 
01128   delta = PPTR(ptr) - PPTR(page->addr);
01129   page->backtrace[delta - 1] = bt_source;
01130   ((long *)page->backtrace)[delta] = bt_type;
01131 }
01132 
01133 static void copy_backtrace_source(mpage *to_page, void *to_ptr,
01134                                   mpage *from_page, void *from_ptr)
01135 /* ptrs are at objhead */
01136 {
01137   unsigned long to_delta, from_delta;
01138 
01139   to_delta = PPTR(to_ptr) - PPTR(to_page->addr);
01140   from_delta = PPTR(from_ptr) - PPTR(from_page->addr);
01141 
01142   to_page->backtrace[to_delta] = from_page->backtrace[from_delta];
01143   to_page->backtrace[to_delta+1] = from_page->backtrace[from_delta+1];
01144 }
01145 
01146 static void *get_backtrace(mpage *page, void *ptr)
01147 /* ptr is after objhead */
01148 {
01149   unsigned long delta;
01150 
01151   if (page->size_class) {
01152     if (page->size_class > 1)
01153       ptr = BIG_PAGE_TO_OBJECT(page);
01154     else
01155       ptr = MED_OBJHEAD_TO_OBJECT(ptr, page->size);
01156   }
01157 
01158   delta = PPTR(ptr) - PPTR(page->addr);
01159   return page->backtrace[delta - 1];
01160 }
01161 
01162 
01163 # define BT_STACK      (PAGE_TYPES + 0)
01164 # define BT_ROOT       (PAGE_TYPES + 1)
01165 # define BT_FINALIZER  (PAGE_TYPES + 2)
01166 # define BT_WEAKLINK   (PAGE_TYPES + 3)
01167 # define BT_IMMOBILE   (PAGE_TYPES + 4)
01168 
01169 #else
01170 # define backtrace_new_page(gc, page) /* */
01171 # define backtrace_new_page_if_needed(gc, page) /* */
01172 # define free_backtrace(page) /* */
01173 # define set_backtrace_source(ptr, type) /* */
01174 # define record_backtrace(page, ptr) /* */
01175 # define copy_backtrace_source(to_page, to_ptr, from_page, from_ptr) /* */
01176 #endif
01177 
01178 #define two_arg_no_op(a, b) /* */
01179 
01180 /*****************************************************************************/
01181 /* Routines dealing with various runtime execution stacks                    */
01182 /*                                                                           */
01183 /* With the exception of the "traverse" macro and resultant simplification,  */
01184 /* this code is entirely lifted from compact.c                               */
01185 /*****************************************************************************/
01186 THREAD_LOCAL void **GC_variable_stack;
01187 
01188 void **GC_get_variable_stack()
01189 { 
01190   return GC_variable_stack;
01191 }
01192 
01193 void GC_set_variable_stack(void **p)
01194 {
01195   GC_variable_stack = p;
01196 }
01197 
01198 void GC_set_stack_base(void *base) 
01199 {
01200   NewGC *gc = GC_get_GC();
01201   gc->stack_base = (unsigned long)base;
01202 }
01203 
01204 unsigned long GC_get_stack_base() 
01205 {
01206   NewGC *gc = GC_get_GC();
01207   return gc->stack_base;
01208 }
01209 
01210 void GC_set_get_thread_stack_base(unsigned long (*func)(void)) {
01211   NewGC *gc = GC_get_GC();
01212   gc->GC_get_thread_stack_base = func;
01213 }
01214 
01215 static inline void *get_stack_base(NewGC *gc) {
01216   if (gc->GC_get_thread_stack_base) return (void*) gc->GC_get_thread_stack_base();
01217   return (void*) gc->stack_base;
01218 }
01219 
01220 #include "stack_comp.c"
01221 
01222 #define GC_X_variable_stack GC_mark_variable_stack
01223 #define gcX(a) gcMARK(*a)
01224 #define X_source(stk, p) set_backtrace_source((stk ? stk : p), BT_STACK)
01225 #include "var_stack.c"
01226 #undef GC_X_variable_stack
01227 #undef gcX
01228 #undef X_source
01229 
01230 #define GC_X_variable_stack GC_fixup_variable_stack
01231 #define gcX(a) gcFIXUP(*a)
01232 #define X_source(stk, p) /* */
01233 #include "var_stack.c"
01234 #undef GC_X_variable_stack
01235 #undef gcX
01236 #undef X_source
01237 
01238 /*****************************************************************************/
01239 /* Routines for root sets                                                    */
01240 /*****************************************************************************/
01241 
01242 #include "roots.c"
01243 
01244 #define traverse_roots(gcMUCK, set_bt_src) {    \
01245     unsigned long j;                            \
01246     Roots *roots = &gc->roots;                  \
01247     if(roots->roots) {                          \
01248       sort_and_merge_roots(roots);              \
01249       for(j = 0; j < roots->count; j += 2) {    \
01250         void **start = (void**)roots->roots[j]; \
01251         void **end = (void**)roots->roots[j+1]; \
01252         while(start < end) {                    \
01253           set_bt_src(start, BT_ROOT);           \
01254           gcMUCK(*start++);                     \
01255         }                                       \
01256       }                                         \
01257     }                                           \
01258   }
01259 
01260 inline static void mark_roots(NewGC *gc) 
01261 {
01262   traverse_roots(gcMARK, set_backtrace_source);
01263 }
01264 
01265 inline static void repair_roots(NewGC *gc)
01266 {
01267   traverse_roots(gcFIXUP, two_arg_no_op);
01268 }
01269 
01270 #include "immobile_boxes.c"
01271 
01272 /*****************************************************************************/
01273 /* finalizers                                                                */
01274 /*****************************************************************************/
01275 
01276 static int is_finalizable_page(NewGC *gc, void *p)
01277 {
01278   return (pagemap_find_page(gc->page_maps, p) ? 1 : 0);
01279 }
01280 
01281 #include "fnls.c"
01282 
01283 inline static void mark_finalizer_structs(NewGC *gc)
01284 {
01285   Fnl *fnl;
01286 
01287   for(fnl = GC_resolve(gc->finalizers); fnl; fnl = GC_resolve(fnl->next)) { 
01288     set_backtrace_source(fnl, BT_FINALIZER);
01289     gcMARK(fnl->data); 
01290     set_backtrace_source(&gc->finalizers, BT_ROOT);
01291     gcMARK(fnl);
01292   }
01293   for(fnl = gc->run_queue; fnl; fnl = fnl->next) {
01294     set_backtrace_source(fnl, BT_FINALIZER);
01295     gcMARK(fnl->data);
01296     gcMARK(fnl->p);
01297     set_backtrace_source(&gc->run_queue, BT_ROOT);
01298     gcMARK(fnl);
01299   }
01300 }  
01301 
01302 inline static void repair_finalizer_structs(NewGC *gc)
01303 {
01304   Fnl *fnl;
01305 
01306   /* repair the base parts of the list */
01307   gcFIXUP(gc->finalizers); gcFIXUP(gc->run_queue);
01308   /* then repair the stuff inside them */
01309   for(fnl = gc->finalizers; fnl; fnl = fnl->next) {
01310     gcFIXUP(fnl->data);
01311     gcFIXUP(fnl->p);
01312     gcFIXUP(fnl->next);
01313   }
01314   for(fnl = gc->run_queue; fnl; fnl = fnl->next) {
01315     gcFIXUP(fnl->data);
01316     gcFIXUP(fnl->p);
01317     gcFIXUP(fnl->next);
01318   }
01319 }
01320 
01321 inline static void check_finalizers(NewGC *gc, int level)
01322 {
01323   Fnl *work = GC_resolve(gc->finalizers);
01324   Fnl *prev = NULL;
01325 
01326   GCDEBUG((DEBUGOUTF, "CFNL: Checking level %i finalizers\n", level));
01327   while(work) {
01328     if((work->eager_level == level) && !marked(gc, work->p)) {
01329       struct finalizer *next = GC_resolve(work->next);
01330 
01331       GCDEBUG((DEBUGOUTF, 
01332                "CFNL: Level %i finalizer %p on %p queued for finalization.\n",
01333                work->eager_level, work, work->p));
01334       set_backtrace_source(work, BT_FINALIZER);
01335       gcMARK(work->p);
01336       if(prev) prev->next = next;
01337       if(!prev) gc->finalizers = next;
01338       if(gc->last_in_queue) gc->last_in_queue = gc->last_in_queue->next = work;
01339       if(!gc->last_in_queue) gc->run_queue = gc->last_in_queue = work;
01340       work->next = NULL;
01341       --gc->num_fnls;
01342 
01343       work = next;
01344     } else { 
01345       GCDEBUG((DEBUGOUTF, "CFNL: Not finalizing %p (level %i on %p): %p / %i\n",
01346                work, work->eager_level, work->p, pagemap_find_page(gc->page_maps, work->p),
01347                marked(work->p)));
01348       prev = work; 
01349       work = GC_resolve(work->next); 
01350     }
01351   }
01352 }
01353 
01354 inline static void do_ordered_level3(NewGC *gc)
01355 {
01356   struct finalizer *temp;
01357   Mark_Proc *mark_table = gc->mark_table;
01358 
01359   for(temp = GC_resolve(gc->finalizers); temp; temp = GC_resolve(temp->next))
01360     if(!marked(gc, temp->p)) {
01361       GCDEBUG((DEBUGOUTF,
01362                "LVL3: %p is not marked. Marking payload (%p)\n", 
01363                temp, temp->p));
01364       set_backtrace_source(temp, BT_FINALIZER);
01365       if(temp->tagged) mark_table[*(unsigned short*)temp->p](temp->p);
01366       if(!temp->tagged) GC_mark_xtagged(temp->p);
01367     }
01368 }
01369 
01370 void GC_finalization_weak_ptr(void **p, int offset)
01371 {
01372   NewGC *gc = GC_get_GC();
01373   Weak_Finalizer *wfnl;
01374 
01375   gc->park[0] = p; wfnl = GC_malloc_atomic(sizeof(Weak_Finalizer));
01376   p = gc->park[0]; gc->park[0] = NULL;
01377   wfnl->p = p; wfnl->offset = offset * sizeof(void*); wfnl->saved = NULL;
01378   wfnl->next = gc->weak_finalizers; gc->weak_finalizers = wfnl;
01379 }
01380 
01381 inline static void mark_weak_finalizer_structs(NewGC *gc)
01382 {
01383   Weak_Finalizer *work;
01384 
01385   GCDEBUG((DEBUGOUTF, "MARKING WEAK FINALIZERS.\n"));
01386   for(work = gc->weak_finalizers; work; work = work->next) {
01387     set_backtrace_source(&gc->weak_finalizers, BT_ROOT);
01388     gcMARK(work);
01389   }
01390 }
01391 
01392 inline static void repair_weak_finalizer_structs(NewGC *gc)
01393 {
01394   Weak_Finalizer *work;
01395   Weak_Finalizer *prev;
01396 
01397   gcFIXUP(gc->weak_finalizers);
01398   work = gc->weak_finalizers; prev = NULL;
01399   while(work) {
01400     gcFIXUP(work->next);
01401     if(!marked(gc, work->p)) {
01402       if(prev) prev->next = work->next;
01403       if(!prev) gc->weak_finalizers = work->next;
01404       work = GC_resolve(work->next);
01405     } else {
01406       gcFIXUP(work->p);
01407       prev = work;
01408       work = work->next;
01409     }
01410   }
01411 }
01412 
01413 inline static void zero_weak_finalizers(NewGC *gc)
01414 {
01415   Weak_Finalizer *wfnl;
01416 
01417   for(wfnl = GC_resolve(gc->weak_finalizers); wfnl; wfnl = GC_resolve(wfnl->next)) {
01418     wfnl->saved = *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset);
01419     *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset) = NULL;
01420   }
01421 }
01422 
01423 inline static void reset_weak_finalizers(NewGC *gc)
01424 {
01425   Weak_Finalizer *wfnl;
01426 
01427   for(wfnl = GC_resolve(gc->weak_finalizers); wfnl; wfnl = GC_resolve(wfnl->next)) {
01428     if(marked(gc, wfnl->p)) {
01429       set_backtrace_source(wfnl, BT_WEAKLINK);
01430       gcMARK(wfnl->saved); 
01431     }
01432     *(void**)(NUM(GC_resolve(wfnl->p)) + wfnl->offset) = wfnl->saved;
01433     wfnl->saved = NULL;
01434   }
01435 }
01436 
01437 /*****************************************************************************/
01438 /* weak boxes and arrays                                                     */
01439 /*****************************************************************************/
01440 
01441 #define is_marked(gc, p) marked(gc, p)
01442 #define weak_box_resolve(p) GC_resolve(p)
01443 #include "weak.c"
01444 #undef is_marked
01445 #undef weak_box_resolve
01446 
01447 /*****************************************************************************/
01448 /* Internal Stack Routines                                                   */
01449 /*****************************************************************************/
01450 
01451 /* This is the code we use to implement the mark stack. We can't, sadly, use
01452    the standard C stack because we'll blow it; propagation makes for a *very*
01453    deep stack. So we use this instead. */
01454 
01455 #define MARK_STACK_START(ms) ((void **)(void *)&ms[1])
01456 #define MARK_STACK_END(ms) ((void **)((char *)ms + STACK_PART_SIZE))
01457 
01458 static THREAD_LOCAL MarkSegment *mark_stack = NULL;
01459 
01460 inline static MarkSegment* mark_stack_create_frame() {
01461   MarkSegment *mark_frame = (MarkSegment*)ofm_malloc(STACK_PART_SIZE);
01462   mark_frame->next = NULL;
01463   mark_frame->top  = MARK_STACK_START(mark_frame);
01464   return mark_frame;
01465 }
01466 
01467 inline static void mark_stack_initialize() {
01468   /* This happens at the very beginning */
01469   if(!mark_stack) {
01470     mark_stack = mark_stack_create_frame();
01471     mark_stack->prev = NULL;
01472   }
01473 }
01474 
01475 inline static void push_ptr(void *ptr)
01476 {
01477   /* This happens during propoagation if we go past the end of this MarkSegment*/
01478   if(mark_stack->top == MARK_STACK_END(mark_stack)) {
01479     /* test to see if we already have another stack page ready */
01480     if(mark_stack->next) {
01481       /* we do, so just use it */
01482       mark_stack = mark_stack->next;
01483       mark_stack->top = MARK_STACK_START(mark_stack);
01484     } else {
01485       /* we don't, so we need to allocate one */
01486       mark_stack->next = mark_stack_create_frame();
01487       mark_stack->next->prev = mark_stack;
01488       mark_stack = mark_stack->next;
01489     }
01490   }
01491 
01492   /* at this point, we're guaranteed to be good to push pointers */
01493   *(mark_stack->top++) = ptr;
01494 }
01495 
01496 inline static int pop_ptr(void **ptr)
01497 {
01498   if(mark_stack->top == MARK_STACK_START(mark_stack)) {
01499     if(mark_stack->prev) {
01500       /* if there is a previous page, go to it */
01501       mark_stack = mark_stack->prev;
01502     } else {
01503       /* if there isn't a previous page, then we've hit the bottom of the stack */
01504       return 0;
01505     }
01506   }
01507 
01508   /* if we get here, we're guaranteed to have data */
01509   *ptr = *(--mark_stack->top);
01510   return 1;
01511 }
01512 
01513 inline static void clear_stack_pages(void)
01514 {
01515   if(mark_stack) {
01516     MarkSegment *temp;
01517     MarkSegment *base;
01518     int keep = 2;
01519 
01520     /* go to the head of the list */
01521     for(; mark_stack->prev; mark_stack = mark_stack->prev) {}
01522     /* then go through and clear them out */
01523     base = mark_stack;
01524     for(; mark_stack; mark_stack = temp) {
01525       temp = mark_stack->next;
01526       if(keep) { 
01527         keep--; 
01528         if (!keep)
01529           mark_stack->next = NULL;
01530       } else 
01531         free(mark_stack);
01532     }
01533     mark_stack = base;
01534     mark_stack->top = MARK_STACK_START(mark_stack);
01535   }
01536 }
01537 
01538 inline static void reset_pointer_stack(void)
01539 {
01540   /* go to the head of the list */
01541   for(; mark_stack->prev; mark_stack = mark_stack->prev) {}
01542   /* reset the stack */
01543   mark_stack->top = MARK_STACK_START(mark_stack);
01544 }
01545 
01546 /*****************************************************************************/
01547 /* MEMORY ACCOUNTING                                                         */
01548 /*****************************************************************************/
01549 
01550 #ifdef NEWGC_BTC_ACCOUNT
01551 # include "mem_account.c"
01552 #else
01553 # define clean_up_thread_list() /* */
01554 #endif
01555 
01556 void GC_register_root_custodian(void *c)
01557 {
01558 #ifdef NEWGC_BTC_ACCOUNT
01559   BTC_register_root_custodian(c);
01560 #endif
01561 }
01562 
01563 int GC_set_account_hook(int type, void *c1, unsigned long b, void *c2)
01564 {
01565 #ifdef NEWGC_BTC_ACCOUNT
01566   BTC_add_account_hook(type, c1, c2, b); 
01567   return 1;
01568 #else
01569   return 0;
01570 #endif
01571 }
01572 
01573 void GC_register_thread(void *t, void *c)
01574 {
01575 #ifdef NEWGC_BTC_ACCOUNT
01576   BTC_register_thread(t, c);
01577 #endif
01578 }
01579 void GC_register_new_thread(void *t, void *c)
01580 {
01581 #ifdef NEWGC_BTC_ACCOUNT
01582   BTC_register_new_thread(t, c);
01583 #endif
01584 }
01585 
01586 int GC_merely_accounting()
01587 {
01588   NewGC *gc = GC_get_GC();
01589   return gc->doing_memory_accounting;
01590 }
01591 
01592 /*****************************************************************************/
01593 /* administration / initialization                                           */
01594 /*****************************************************************************/
01595 
01596 static int designate_modified_gc(NewGC *gc, void *p)
01597 {
01598   mpage *page = pagemap_find_page(gc->page_maps, p);
01599 
01600   if (gc->no_further_modifications) {
01601     GCPRINT(GCOUTF, "Seg fault (internal error during gc) at %p\n", p);
01602     return 0;
01603   }
01604 
01605   if(page) {
01606     if (!page->back_pointers) {
01607       page->mprotected = 0;
01608       vm_protect_pages(page->addr, (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
01609       page->back_pointers = 1;
01610       return 1;
01611     }
01612   } else {
01613     if (gc->primoridal_gc) {
01614       return designate_modified_gc(gc->primoridal_gc, p);
01615     }
01616     GCPRINT(GCOUTF, "Seg fault (internal error) at %p\n", p);
01617   }
01618   return 0;
01619 }
01620 
01621 static int designate_modified(void *p) {
01622   NewGC *gc = GC_get_GC();
01623   return designate_modified_gc(gc, p);
01624 }
01625 
01626 
01627 void GC_write_barrier(void *p) 
01628 {
01629   (void)designate_modified(p);
01630 }
01631 
01632 #include "sighand.c"
01633 
01634 #ifdef MZ_USE_PLACES
01635 static void NewGCMasterInfo_initialize() {
01636   MASTERGCINFO = ofm_malloc_zero(sizeof(NewGCMasterInfo));
01637   mzrt_rwlock_create(&MASTERGCINFO->cangc);
01638 }
01639 
01640 static void NewGCMasterInfo_cleanup() {
01641   mzrt_rwlock_destroy(MASTERGCINFO->cangc);
01642   free(MASTERGCINFO);
01643   MASTERGCINFO = NULL;
01644 }
01645 
01646 static void NewGCMasterInfo_get_next_id(NewGC *newgc) {
01647   /* this could just be an atomic op if we had those */
01648   /* waiting for other threads to finish a possible concurrent GC is not optimal*/
01649   mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
01650   GC_objhead_template.owner = MASTERGCINFO->next_GC_id++;
01651   mzrt_rwlock_unlock(MASTERGCINFO->cangc);
01652 }
01653 #endif
01654 
01655 static void NewGC_initialize(NewGC *newgc, NewGC *parentgc) {
01656   if (parentgc) {
01657     newgc->mark_table  = parentgc->mark_table;
01658     newgc->fixup_table = parentgc->fixup_table;
01659   }
01660   else {
01661 #ifdef MZ_USE_PLACES
01662     NewGCMasterInfo_initialize();
01663 #endif
01664     newgc->mark_table  = ofm_malloc_zero(NUMBER_OF_TAGS * sizeof (Mark_Proc)); 
01665     newgc->fixup_table = ofm_malloc_zero(NUMBER_OF_TAGS * sizeof (Fixup_Proc)); 
01666 #ifdef NEWGC_BTC_ACCOUNT
01667     BTC_initialize_mark_table(newgc);
01668 #endif
01669   }
01670 
01671 #ifdef MZ_USE_PLACES
01672   NewGCMasterInfo_get_next_id(newgc);
01673 #endif
01674 
01675   mark_stack_initialize();
01676 
01677 #ifdef SIXTY_FOUR_BIT_INTEGERS
01678   newgc->page_maps = ofm_malloc_zero(PAGEMAP64_LEVEL1_SIZE * sizeof (mpage***)); 
01679 #else
01680   newgc->page_maps = ofm_malloc_zero(PAGEMAP32_SIZE * sizeof (mpage*)); 
01681 #endif
01682 
01683   newgc->vm = vm_create();
01684   newgc->protect_range = ofm_malloc_zero(sizeof(Page_Range));
01685   
01686   newgc->generations_available = 1;
01687   newgc->last_full_mem_use = (20 * 1024 * 1024);
01688   newgc->new_btc_mark = 1;
01689 }
01690 
01691 /* NOTE This method sets the constructed GC as the new Thread Specific GC. */
01692 static NewGC *init_type_tags_worker(NewGC *parentgc, int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
01693 {
01694   NewGC *gc;
01695 
01696   gc = ofm_malloc_zero(sizeof(NewGC));
01697   /* NOTE sets the constructed GC as the new Thread Specific GC. */
01698   GC_set_GC(gc);
01699 
01700   gc->weak_box_tag    = weakbox;
01701   gc->ephemeron_tag   = ephemeron;
01702   gc->weak_array_tag  = weakarray;
01703 # ifdef NEWGC_BTC_ACCOUNT
01704   gc->cust_box_tag    = custbox;
01705 # endif
01706 
01707   NewGC_initialize(gc, parentgc);
01708 
01709 
01710   /* Our best guess at what the OS will let us allocate: */
01711   gc->max_pages_in_heap = determine_max_heap_size() / APAGE_SIZE;
01712   /* Not all of that memory is available for allocating GCable
01713      objects.  There's the memory used by the stack, code,
01714      malloc()/free()ed memory, etc., and there's also the
01715      administrative structures for the GC itself. */
01716   gc->max_pages_for_use = gc->max_pages_in_heap / 2;
01717 
01718   resize_gen0(gc, GEN0_INITIAL_SIZE);
01719 
01720   if (!parentgc) {
01721     GC_register_traversers(gc->weak_box_tag, size_weak_box, mark_weak_box, fixup_weak_box, 0, 0);
01722     GC_register_traversers(gc->ephemeron_tag, size_ephemeron, mark_ephemeron, fixup_ephemeron, 0, 0);
01723     GC_register_traversers(gc->weak_array_tag, size_weak_array, mark_weak_array, fixup_weak_array, 0, 0);
01724   }
01725   initialize_signal_handler(gc);
01726   GC_add_roots(&gc->park, (char *)&gc->park + sizeof(gc->park) + 1);
01727   GC_add_roots(&gc->park_save, (char *)&gc->park_save + sizeof(gc->park_save) + 1);
01728 
01729   initialize_protect_page_ranges(gc->protect_range, malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE), APAGE_SIZE);
01730 
01731   return gc;
01732 }
01733 
01734 void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
01735 {
01736   static int initialized = 0;
01737 
01738   if(!initialized) {
01739     initialized = 1;
01740     init_type_tags_worker(NULL, count, pair, mutable_pair, weakbox, ephemeron, weakarray, custbox);
01741   }
01742   else {
01743     GCPRINT(GCOUTF, "GC_init_type_tags should only be called once!\n");
01744     abort();
01745   }
01746 }
01747 
01748 #ifdef MZ_USE_PLACES
01749 void GC_construct_child_gc() {
01750   NewGC *gc = MASTERGC;
01751   NewGC *newgc = init_type_tags_worker(gc, 0, 0, 0, gc->weak_box_tag, gc->ephemeron_tag, gc->weak_array_tag, gc->cust_box_tag);
01752   newgc->primoridal_gc = MASTERGC;
01753 }
01754 
01755 static inline void save_globals_to_gc(NewGC *gc) {
01756   gc->saved_mark_stack              = mark_stack;
01757   gc->saved_GC_variable_stack       = GC_variable_stack;
01758   gc->saved_GC_gen0_alloc_page_ptr  = GC_gen0_alloc_page_ptr;
01759   gc->saved_GC_gen0_alloc_page_end  = GC_gen0_alloc_page_end;
01760   gc->saved_GC_objhead_template     = GC_objhead_template;
01761 }
01762 
01763 static inline void restore_globals_from_gc(NewGC *gc) {
01764   mark_stack              = gc->saved_mark_stack;
01765   GC_variable_stack       = gc->saved_GC_variable_stack;
01766   GC_gen0_alloc_page_ptr  = gc->saved_GC_gen0_alloc_page_ptr;
01767   GC_gen0_alloc_page_end  = gc->saved_GC_gen0_alloc_page_end;
01768   GC_objhead_template     = gc->saved_GC_objhead_template;
01769 }
01770 
01771 void GC_switch_out_master_gc() {
01772   static int initialized = 0;
01773 
01774   if(!initialized) {
01775     initialized = 1;
01776     MASTERGC = GC_get_GC();
01777     MASTERGC->dumping_avoid_collection = 1;
01778     save_globals_to_gc(MASTERGC);
01779     GC_construct_child_gc();
01780   }
01781   else {
01782     GCPRINT(GCOUTF, "GC_switch_out_master_gc should only be called once!\n");
01783     abort();
01784   }
01785 }
01786 
01787 /* used to initialize a MasterGC Thread, bad idea
01788  * scheme_master_fast_path is more performant */
01789 void GC_switch_in_master_gc() {
01790   GC_set_GC(MASTERGC);
01791   restore_globals_from_gc(MASTERGC);
01792 }
01793 
01794 /*used in scheme_master_fast_path*/
01795 void *GC_switch_to_master_gc() {
01796   NewGC *gc = GC_get_GC();
01797   /* return if MASTERGC hasn't been constructed yet, allow recursive locking */
01798   if (!MASTERGC || gc == MASTERGC) {
01799     return MASTERGC;
01800   }
01801   save_globals_to_gc(gc);
01802 
01803   /*obtain exclusive access to MASTERGC*/
01804   mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
01805 
01806   GC_set_GC(MASTERGC);
01807   restore_globals_from_gc(MASTERGC);
01808   return gc;
01809 }
01810 
01811 void GC_switch_back_from_master(void *gc) {
01812   /* return if MASTERGC hasn't been constructed yet, allow recursive locking */
01813   if (!MASTERGC || gc == MASTERGC) {
01814     return;
01815   }
01816   save_globals_to_gc(MASTERGC);
01817 
01818   /*release exclusive access to MASTERGC*/
01819   mzrt_rwlock_unlock(MASTERGCINFO->cangc);
01820 
01821   GC_set_GC(gc);
01822   restore_globals_from_gc(gc);
01823 }
01824 
01825   
01826 #endif
01827 
01828 void GC_gcollect(void)
01829 {
01830   NewGC *gc = GC_get_GC();
01831   garbage_collect(gc, 1);
01832 }
01833 
01834 void GC_register_traversers(short tag, Size_Proc size, Mark_Proc mark,
01835                             Fixup_Proc fixup, int constant_Size, int atomic)
01836 {
01837   NewGC *gc = GC_get_GC();
01838 
01839   int mark_tag = tag;
01840 
01841 #ifdef NEWGC_BTC_ACCOUNT
01842   mark_tag = BTC_get_redirect_tag(gc, mark_tag);
01843 #endif
01844 
01845 #if MZ_GC_BACKTRACE
01846   /* Keep tagged objects in tagged space: */
01847   atomic = 0;
01848 #endif
01849 
01850   gc->mark_table[mark_tag]  = atomic ? (Mark_Proc)PAGE_ATOMIC : mark;
01851   gc->fixup_table[tag]      = fixup;
01852 }
01853 
01854 long GC_get_memory_use(void *o) 
01855 {
01856   NewGC *gc = GC_get_GC();
01857 #ifdef NEWGC_BTC_ACCOUNT
01858   if(o) {
01859     return BTC_get_memory_use(gc, o);
01860   }
01861 #endif
01862   return gen0_size_in_use(gc) + gc->memory_in_use;
01863 }
01864 
01865 /*****************************************************************************/
01866 /* Garbage collection proper ... and all the mess therein                    */
01867 /*****************************************************************************/
01868 
01869 /* We use two mark routines to handle propagation. Why two? The first is the
01870    one that we export out, and it does a metric crapload of work. The second
01871    we use internally, and it doesn't do nearly as much. */
01872 
01873 /* This is the first mark routine. It's a bit complicated. */
01874 void GC_mark(const void *const_p)
01875 {
01876   mpage *page;
01877   void *p = (void*)const_p;
01878   NewGC *gc;
01879 
01880   if(!p || (NUM(p) & 0x1)) {
01881     GCDEBUG((DEBUGOUTF, "Not marking %p (bad ptr)\n", p));
01882     return;
01883   }
01884 
01885   gc = GC_get_GC();
01886   if(!(page = pagemap_find_page(gc->page_maps, p))) {
01887     GCDEBUG((DEBUGOUTF,"Not marking %p (no page)\n",p));
01888     return;
01889   }
01890 
01891   /* toss this over to the BTC mark routine if we're doing accounting */
01892   if(gc->doing_memory_accounting) { 
01893 #ifdef NEWGC_BTC_ACCOUNT
01894     BTC_memory_account_mark(gc, page, p); return; 
01895 #endif
01896   }
01897 
01898   if(page->size_class) {
01899     if(page->size_class > 1) {
01900       /* This is a bigpage. The first thing we do is see if its been marked
01901          previously */
01902       if(page->size_class != 2) {
01903         GCDEBUG((DEBUGOUTF, "Not marking %p on big %p (already marked)\n", p, page));
01904         return;
01905       }
01906       /* in this case, it has not. So we want to mark it, first off. */
01907       page->size_class = 3;
01908 
01909       /* if this is in the nursery, we want to move it out of the nursery */
01910       if(!page->generation) {
01911         page->generation = 1;
01912 
01913         /* remove page */
01914         if(page->prev) page->prev->next = page->next; else
01915           gc->gen0.big_pages = page->next;
01916         if(page->next) page->next->prev = page->prev;
01917 
01918         backtrace_new_page(gc, page);
01919 
01920         /* add to gen1 */
01921         page->next = gc->gen1_pages[PAGE_BIG]; 
01922         page->prev = NULL;
01923         if(page->next) page->next->prev = page;
01924         gc->gen1_pages[PAGE_BIG] = page;
01925 
01926         /* if we're doing memory accounting, then we need to make sure the
01927            btc_mark is right */
01928 #ifdef NEWGC_BTC_ACCOUNT
01929         BTC_set_btc_mark(gc, BIG_PAGE_TO_OBJHEAD(page));
01930 #endif
01931       }
01932 
01933       page->marked_on = 1;
01934       record_backtrace(page, BIG_PAGE_TO_OBJECT(page));
01935       GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page));
01936       /* Finally, we want to add this to our mark queue, so we can 
01937          propagate its pointers */
01938       push_ptr(p);
01939     } else {
01940       /* A medium page. */
01941       objhead *info = MED_OBJHEAD(p, page->size);
01942       if (info->mark) {
01943         GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
01944         return;
01945       }
01946       info->mark = 1;
01947       page->marked_on = 1;
01948       p = OBJHEAD_TO_OBJPTR(info);
01949       backtrace_new_page_if_needed(gc, page);
01950       record_backtrace(page, p);
01951       push_ptr(p);
01952     }
01953   } else {
01954     objhead *ohead = OBJPTR_TO_OBJHEAD(p);
01955 
01956     if(ohead->mark) {
01957       GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
01958       return;
01959     }
01960 
01961     /* what we do next depends on whether this is a gen0 or gen1 
01962        object */
01963     if(page->generation) {
01964       /* this is a generation 1 object. This means we are not going
01965          to move it, we don't have to check to see if it's an atomic
01966          object masquerading as a tagged object, etc. So all we do
01967          is add the pointer to the mark queue and note on the page
01968          that we marked something on it*/
01969       if((NUM(page->addr) + page->previous_size) <= NUM(p)) {
01970         GCDEBUG((DEBUGOUTF, "Marking %p (leaving alone)\n", p));
01971         ohead->mark = 1;
01972         page->marked_on = 1;
01973         page->previous_size = PREFIX_SIZE;
01974         page->live_size += ohead->size;
01975         record_backtrace(page, p);
01976         push_ptr(p);
01977       } else GCDEBUG((DEBUGOUTF, "Not marking %p (it's old; %p / %i)\n",
01978                       p, page, page->previous_size));
01979     } else {
01980       /* this is a generation 0 object. This means that we do have
01981          to do all of the above. Fun, fun, fun. */
01982       unsigned short type = ohead->type;
01983       mpage *work;
01984       size_t size;
01985       objhead *newplace;
01986 
01987       /* first check to see if this is an atomic object masquerading
01988          as a tagged object; if it is, then convert it */
01989       if(type == PAGE_TAGGED) {
01990         if((unsigned long)gc->mark_table[*(unsigned short*)p] < PAGE_TYPES)
01991           type = ohead->type = (int)(unsigned long)gc->mark_table[*(unsigned short*)p];
01992       }
01993 
01994       /* now set us up for the search for where to put this thing */
01995       work = gc->gen1_pages[type];
01996       size = gcWORDS_TO_BYTES(ohead->size);
01997 
01998       /* search for a page with the space to spare */
01999       if (work && ((work->size + size) >= APAGE_SIZE))
02000         work = NULL;
02001 
02002       /* now either fetch where we're going to put this object or make
02003          a new page if we couldn't find a page with space to spare */
02004       if(work) {
02005         if (!work->added) {
02006           pagemap_add(gc->page_maps, work);
02007           work->added = 1;
02008         }
02009         work->marked_on = 1;
02010         if (work->mprotected) {
02011           work->mprotected = 0;
02012           vm_protect_pages(work->addr, APAGE_SIZE, 1);
02013         }
02014         newplace = PTR(NUM(work->addr) + work->size);
02015       } else {
02016         /* Allocate and prep the page */
02017         work = malloc_mpage();
02018         work->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE);
02019         work->generation = 1;
02020         work->page_type = type;
02021         work->size = work->previous_size = PREFIX_SIZE;
02022         work->marked_on = 1;
02023         backtrace_new_page(gc, work);
02024         work->next = gc->gen1_pages[type];
02025         work->prev = NULL;
02026         if(work->next)
02027           work->next->prev = work;
02028         pagemap_add(gc->page_maps, work);
02029         work->added = 1;
02030         gc->gen1_pages[type] = work;
02031         newplace = PAGE_TO_OBJHEAD(work);
02032       }
02033 
02034       /* update the size */
02035       work->size += size;
02036       work->has_new = 1;
02037 
02038       /* transfer the object */
02039       ohead->mark = 1; /* mark is copied to newplace, too */
02040       if (size == PAIR_SIZE_IN_BYTES) 
02041         /* pairs are common, and compiler tends to inline constant-size memcpys */
02042         memcpy(newplace, ohead, PAIR_SIZE_IN_BYTES);
02043       else
02044         memcpy(newplace, ohead, size);
02045       /* mark the old location as marked and moved, and the new location
02046          as marked */
02047       ohead->moved = 1;
02048       /* if we're doing memory accounting, then we need the btc_mark
02049          to be set properly */
02050 #ifdef NEWGC_BTC_ACCOUNT
02051       BTC_set_btc_mark(gc, newplace);
02052 #endif
02053       
02054       {
02055         /* drop the new location of the object into the forwarding space
02056            and into the mark queue */
02057         void *newp = OBJHEAD_TO_OBJPTR(newplace);
02058         /* record why we marked this one (if enabled) */
02059         record_backtrace(work, newp);
02060         /* set forwarding pointer */
02061         GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", p, newp, work));
02062         *(void**)p = newp;
02063         push_ptr(newp);
02064       }
02065     }
02066   }
02067 }
02068 
02069 /* this is the second mark routine. It's not quite as complicated. */
02070 /* this is what actually does mark propagation */
02071 static void propagate_marks(NewGC *gc) 
02072 {
02073   void *p;
02074   PageMap pagemap = gc->page_maps;
02075   Mark_Proc *mark_table = gc->mark_table;
02076 
02077   while(pop_ptr(&p)) {
02078     mpage *page = pagemap_find_page(pagemap, p);
02079     GCDEBUG((DEBUGOUTF, "Popped pointer %p\n", p));
02080 
02081     /* we can assume a lot here -- like it's a valid pointer with a page --
02082        because we vet bad cases out in GC_mark, above */
02083     if(page->size_class) {
02084       if(page->size_class > 1) {
02085         void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
02086         void **end = PAGE_END_VSS(page);
02087 
02088         set_backtrace_source(start, page->page_type);
02089 
02090         switch(page->page_type) {
02091         case PAGE_TAGGED: 
02092           {
02093             unsigned short tag = *(unsigned short*)start;
02094             ASSERT_TAG(tag);
02095             if((unsigned long)mark_table[tag] < PAGE_TYPES) {
02096               /* atomic */
02097             } else {
02098               GC_ASSERT(mark_table[tag]);
02099               mark_table[tag](start); break;
02100             }
02101           }
02102         case PAGE_ATOMIC: break;
02103         case PAGE_ARRAY: while(start < end) gcMARK(*(start++)); break;
02104         case PAGE_XTAGGED: GC_mark_xtagged(start); break;
02105         case PAGE_TARRAY: 
02106           {
02107             unsigned short tag = *(unsigned short *)start;
02108             ASSERT_TAG(tag);
02109             end -= INSET_WORDS;
02110             while(start < end) {
02111               GC_ASSERT(mark_table[tag]);
02112               start += mark_table[tag](start);
02113             }
02114             break;
02115           }
02116         }
02117       } else {
02118         /* Medium page */
02119         objhead *info = OBJPTR_TO_OBJHEAD(p);
02120 
02121         set_backtrace_source(p, info->type);
02122 
02123         switch(info->type) {
02124         case PAGE_TAGGED: 
02125           {
02126             unsigned short tag = *(unsigned short*)p;
02127             ASSERT_TAG(tag);
02128             GC_ASSERT(mark_table[tag]);
02129             mark_table[tag](p);
02130             break;
02131           }
02132         case PAGE_ARRAY:
02133           {
02134             void **start = p;
02135             void **end = PPTR(info) + info->size;
02136             while(start < end) gcMARK(*start++);
02137             break;
02138           }
02139         }
02140       }
02141     } else {
02142       objhead *info = OBJPTR_TO_OBJHEAD(p);
02143 
02144       set_backtrace_source(p, info->type);
02145 
02146       switch(info->type) {
02147       case PAGE_TAGGED: 
02148         {
02149           unsigned short tag = *(unsigned short*)p;
02150           ASSERT_TAG(tag);
02151           GC_ASSERT(mark_table[tag]);
02152           mark_table[tag](p);
02153           break;
02154         }
02155       case PAGE_ATOMIC: break;
02156       case PAGE_ARRAY: {
02157         void **start = p;
02158         void **end = PPTR(info) + info->size;
02159         while(start < end) gcMARK(*start++);
02160         break;
02161       }
02162       case PAGE_TARRAY: {
02163         void **start = p;
02164         void **end = PPTR(info) + (info->size - INSET_WORDS);
02165         unsigned short tag = *(unsigned short *)start;
02166         ASSERT_TAG(tag);
02167         while(start < end) {
02168           GC_ASSERT(mark_table[tag]);
02169           start += mark_table[tag](start);
02170         }
02171         break;
02172       }
02173       case PAGE_XTAGGED: GC_mark_xtagged(p); break;
02174       }
02175     }
02176   }
02177 }
02178 
02179 void *GC_resolve(void *p)
02180 {
02181   NewGC *gc = GC_get_GC();
02182   mpage *page = pagemap_find_page(gc->page_maps, p);
02183   objhead *info;
02184 
02185   if(!page || page->size_class)
02186     return p;
02187 
02188   info = OBJPTR_TO_OBJHEAD(p);
02189   if(info->mark && info->moved)
02190     return *(void**)p;
02191   else 
02192     return p;
02193 }
02194 
02195 void *GC_fixup_self(void *p)
02196 {
02197   return p;
02198 }
02199 
02200 void GC_fixup(void *pp)
02201 {
02202   NewGC *gc;
02203   mpage *page;
02204   void *p = *(void**)pp;
02205 
02206   if(!p || (NUM(p) & 0x1))
02207     return;
02208 
02209   gc = GC_get_GC();
02210   if((page = pagemap_find_page(gc->page_maps, p))) {
02211     objhead *info;
02212 
02213     if(page->size_class) return;
02214     info = OBJPTR_TO_OBJHEAD(p);
02215     if(info->mark && info->moved) 
02216       *(void**)pp = *(void**)p;
02217     else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (not moved)\n",p,pp));
02218   } else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (no page)\n", p, pp));
02219 }
02220 
02221 /*****************************************************************************/
02222 /* memory stats and traces                                                   */
02223 /*****************************************************************************/
02224 
02225 #ifdef MZ_GC_BACKTRACE
02226 # define trace_page_t mpage
02227 # define trace_page_type(page) (page)->page_type
02228 static void *trace_pointer_start(mpage *page, void *p) { 
02229   if (page->size_class) {
02230     if (page->size_class > 1)
02231       return BIG_PAGE_TO_OBJECT(page);
02232     else
02233       return MED_OBJHEAD_TO_OBJECT(p, page->size);
02234   } else 
02235     return p; 
02236 }
02237 # define TRACE_PAGE_TAGGED PAGE_TAGGED
02238 # define TRACE_PAGE_ARRAY PAGE_ARRAY
02239 # define TRACE_PAGE_TAGGED_ARRAY PAGE_TARRAY
02240 # define TRACE_PAGE_ATOMIC PAGE_ATOMIC
02241 # define TRACE_PAGE_XTAGGED PAGE_XTAGGED
02242 # define TRACE_PAGE_MALLOCFREE PAGE_TYPES
02243 # define TRACE_PAGE_BAD PAGE_TYPES
02244 # define trace_page_is_big(page) (page)->size_class
02245 # define trace_backpointer get_backtrace
02246 # include "backtrace.c"
02247 #else
02248 # define reset_object_traces() /* */
02249 # define register_traced_object(p) /* */
02250 # define print_traced_objects(x, y, q, z) /* */
02251 #endif
02252 
02253 #define MAX_DUMP_TAG 256
02254 
02255 void GC_dump_with_traces(int flags,
02256                          GC_get_type_name_proc get_type_name,
02257                          GC_get_xtagged_name_proc get_xtagged_name,
02258                          GC_for_each_found_proc for_each_found,
02259                          short trace_for_tag,
02260                          GC_print_tagged_value_proc print_tagged_value,
02261                          int path_length_limit)
02262 {
02263   NewGC *gc = GC_get_GC();
02264   mpage *page;
02265   int i;
02266   static unsigned long counts[MAX_DUMP_TAG], sizes[MAX_DUMP_TAG];
02267 
02268   reset_object_traces();
02269   if (for_each_found)
02270     gc->dumping_avoid_collection++;
02271 
02272   /* Traverse tagged pages to count objects: */
02273   for (i = 0; i < MAX_DUMP_TAG; i++) {
02274     counts[i] = sizes[i] = 0;
02275   }
02276   for (page = gc->gen1_pages[PAGE_TAGGED]; page; page = page->next) {
02277     void **start = PAGE_START_VSS(page);
02278     void **end = PAGE_END_VSS(page);
02279 
02280     while(start < end) {
02281       objhead *info = (objhead *)start;
02282       if(!info->dead) {
02283         void *obj_start = OBJHEAD_TO_OBJPTR(start);
02284         unsigned short tag = *(unsigned short *)obj_start;
02285         ASSERT_TAG(tag);
02286         if (tag < MAX_DUMP_TAG) {
02287           counts[tag]++;
02288           sizes[tag] += info->size;
02289         }
02290         if (tag == trace_for_tag) {
02291           register_traced_object(obj_start);
02292           if (for_each_found)
02293             for_each_found(obj_start);
02294         }
02295       }
02296       start += info->size;
02297     }
02298   }
02299   for (page = gc->gen1_pages[PAGE_BIG]; page; page = page->next) {
02300     if (page->page_type == PAGE_TAGGED) {
02301       void **start = PAGE_START_VSS(page);
02302       void *obj_start = OBJHEAD_TO_OBJPTR(start);
02303       unsigned short tag = *(unsigned short *)obj_start;
02304       ASSERT_TAG(tag);
02305       if (tag < MAX_DUMP_TAG) {
02306         counts[tag]++;
02307         sizes[tag] += gcBYTES_TO_WORDS(page->size);
02308       }
02309       if ((tag == trace_for_tag)
02310           || (tag == -trace_for_tag)) {
02311         register_traced_object(obj_start);
02312         if (for_each_found)
02313           for_each_found(obj_start);
02314       }
02315     }
02316   }
02317   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02318     for (page = gc->med_pages[i]; page; page = page->next) {
02319       void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
02320       void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
02321       
02322       while(start <= end) {
02323         objhead *info = (objhead *)start;
02324         if (!info->dead) {
02325           if (info->type == PAGE_TAGGED) {
02326             void *obj_start = OBJHEAD_TO_OBJPTR(start);
02327             unsigned short tag = *(unsigned short *)obj_start;
02328             ASSERT_TAG(tag);
02329             if (tag < MAX_DUMP_TAG) {
02330               counts[tag]++;
02331               sizes[tag] += info->size;
02332             }
02333             if (tag == trace_for_tag) {
02334               register_traced_object(obj_staart);
02335               if (for_each_found)
02336                 for_each_found(obj_start);
02337             }
02338           }
02339         }
02340         start += info->size;
02341       }
02342     }
02343   }
02344 
02345   GCPRINT(GCOUTF, "Begin MzScheme3m\n");
02346   for (i = 0; i < MAX_DUMP_TAG; i++) {
02347     if (counts[i]) {
02348       char *tn, buf[256];
02349       if (get_type_name)
02350         tn = get_type_name((Type_Tag)i);
02351       else
02352         tn = NULL;
02353       if (!tn) {
02354         sprintf(buf, "unknown,%d", i);
02355         tn = buf;
02356       }
02357       GCPRINT(GCOUTF, "  %20.20s: %10ld %10ld\n", tn, counts[i], gcWORDS_TO_BYTES(sizes[i]));
02358     }
02359   }
02360   GCPRINT(GCOUTF, "End MzScheme3m\n");
02361 
02362   GCWARN((GCOUTF, "Generation 0: %lu of %li bytes used\n", (unsigned long) gen0_size_in_use(gc), gc->gen0.max_size));
02363 
02364   for(i = 0; i < PAGE_TYPES; i++) {
02365     unsigned long total_use = 0, count = 0;
02366 
02367     for(page = gc->gen1_pages[i]; page; page = page->next) {
02368       total_use += page->size;
02369       count++;
02370     }
02371     GCWARN((GCOUTF, "Generation 1 [%s]: %li bytes used in %li pages\n", 
02372             type_name[i], total_use, count));
02373   }
02374 
02375   GCWARN((GCOUTF, "Generation 1 [medium]:"));
02376   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02377     if (gc->med_pages[i]) {
02378       long count = 0, page_count = 0;
02379       for (page = gc->med_pages[i]; page; page = page->next) {
02380         void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
02381         void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
02382         
02383         page_count++;
02384         
02385         while(start <= end) {
02386           objhead *info = (objhead *)start;
02387           if (!info->dead) {
02388             count += info->size;
02389           }
02390           start += info->size;
02391         }
02392       }
02393       GCWARN((GCOUTF, " %li [%li/%li]", count, page_count, gc->med_pages[i]->size));
02394     }
02395   }
02396   GCWARN((GCOUTF, "\n"));
02397 
02398 
02399   GCWARN((GCOUTF,"\n"));
02400   GCWARN((GCOUTF,"Current memory use: %li\n", GC_get_memory_use(NULL)));
02401   GCWARN((GCOUTF,"Peak memory use after a collection: %li\n", gc->peak_memory_use));
02402   GCWARN((GCOUTF,"Allocated (+reserved) page sizes: %li (+%li)\n", 
02403           gc->used_pages * APAGE_SIZE, 
02404           vm_memory_allocated(gc->vm) - (gc->used_pages * APAGE_SIZE)));
02405   GCWARN((GCOUTF,"# of major collections: %li\n", gc->num_major_collects));
02406   GCWARN((GCOUTF,"# of minor collections: %li\n", gc->num_minor_collects));
02407   GCWARN((GCOUTF,"# of installed finalizers: %i\n", gc->num_fnls));
02408   GCWARN((GCOUTF,"# of traced ephemerons: %i\n", gc->num_last_seen_ephemerons));
02409 
02410   if (flags & GC_DUMP_SHOW_TRACE) {
02411     print_traced_objects(path_length_limit, get_type_name, get_xtagged_name, print_tagged_value);
02412   }
02413 
02414   if (for_each_found)
02415     --gc->dumping_avoid_collection;
02416 }
02417 
02418 void GC_dump(void)
02419 {
02420   GC_dump_with_traces(0, NULL, NULL, NULL, 0, NULL, 0);
02421 }
02422 
02423 #ifdef MZ_GC_BACKTRACE
02424 
02425 int GC_is_tagged(void *p)
02426 {
02427   NewGC *gc = GC_get_GC();
02428   mpage *page;
02429   page = pagemap_find_page(gc->page_maps, p);
02430   return page && (page->page_type == PAGE_TAGGED);
02431 }
02432 
02433 int GC_is_tagged_start(void *p)
02434 {
02435   return 0;
02436 }
02437 
02438 void *GC_next_tagged_start(void *p)
02439 {
02440   return NULL;
02441 }
02442 
02443 #endif
02444 
02445 /*****************************************************************************/
02446 /* garbage collection                                                        */
02447 /*****************************************************************************/
02448 
02449 static void reset_gen1_page(NewGC *gc, mpage *work)
02450 {
02451   if (gc->generations_available && work->mprotected) {
02452     work->mprotected = 0;
02453     add_protect_page_range(gc->protect_range, work->addr, 
02454                            (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 
02455                            APAGE_SIZE, 1);
02456   }
02457 }
02458 
02459 static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc)
02460 {
02461   mpage *work;
02462   int i;
02463 
02464   GCDEBUG((DEBUGOUTF, "MAJOR COLLECTION - PREPPING PAGES - reset live_size, reset previous_size, unprotect.\n"));
02465   /* we need to make sure that previous_size for every page is reset, so
02466      we don't accidentally screw up the mark routine */
02467 
02468   for(i = 0; i < PAGE_TYPES; i++) {
02469     for(work = gc->gen1_pages[i]; work; work = work->next) {
02470       reset_gen1_page(gc, work);
02471       work->live_size = 0;
02472       work->previous_size = PREFIX_SIZE;
02473     }
02474   }
02475 
02476   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02477     for (work = gc->med_pages[i]; work; work = work->next) {
02478       if (work->generation) {
02479         reset_gen1_page(gc, work);
02480       }
02481     }
02482   }
02483 
02484   flush_protect_page_ranges(gc->protect_range, 1);
02485 }
02486 
02487 static void remove_gen1_page_from_pagemap(NewGC *gc, mpage *work)
02488 {
02489   if (gc->generations_available && work->back_pointers && work->mprotected) {
02490     work->mprotected = 0;
02491     add_protect_page_range(gc->protect_range, work->addr, 
02492                            (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 
02493                            APAGE_SIZE, 1);
02494   }
02495   pagemap_remove(gc->page_maps, work);
02496   work->added = 0;
02497 }
02498 
02499 static void remove_all_gen1_pages_from_pagemap(NewGC *gc)
02500 {
02501   mpage *work;
02502   int i;
02503 
02504   GCDEBUG((DEBUGOUTF, "MINOR COLLECTION - PREPPING PAGES - remove all gen1 pages from pagemap.\n"));
02505 
02506   /* if we're not doing a major collection, then we need to remove all the
02507      pages in gc->gen1_pages[] from the page map */
02508 
02509   for(i = 0; i < PAGE_TYPES; i++) {
02510     for(work = gc->gen1_pages[i]; work; work = work->next) {
02511       remove_gen1_page_from_pagemap(gc, work);
02512     }
02513   }
02514 
02515   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02516     for (work = gc->med_pages[i]; work; work = work->next) {
02517       if (work->generation) {
02518         remove_gen1_page_from_pagemap(gc, work);
02519       }
02520     }
02521   }
02522 
02523   flush_protect_page_ranges(gc->protect_range, 1);
02524 }
02525 
02526 static void mark_backpointers(NewGC *gc)
02527 {
02528   if(!gc->gc_full) {
02529     mpage *work;
02530     int i;
02531     PageMap pagemap = gc->page_maps;
02532 
02533     /* if this is not a full collection, then we need to mark any pointers
02534        that point backwards into generation 0, since they're roots. */
02535     for(i = 0; i < PAGE_TYPES; i++) {
02536       for(work = gc->gen1_pages[i]; work; work = work->next) {
02537         if(work->back_pointers) {
02538           /* these pages are guaranteed not to be write protected, because
02539              if they were, they wouldn't have this bit set */
02540           work->marked_on = 1;
02541           work->previous_size = PREFIX_SIZE;
02542           pagemap_add(pagemap, work);
02543           if(work->size_class) {
02544             /* must be a big page */
02545             work->size_class = 3;
02546             push_ptr(BIG_PAGE_TO_OBJECT(work));
02547           } else {
02548             if(work->page_type != PAGE_ATOMIC) {
02549               void **start = PAGE_START_VSS(work);
02550               void **end = PAGE_END_VSS(work);
02551 
02552               while(start < end) {
02553                 objhead *info = (objhead *)start;
02554                 if(!info->dead) {
02555                   info->mark = 1;
02556                   /* This must be a push_ptr, and not a direct call to
02557                      internal_mark. This is because we need every object
02558                      in the older heap to be marked out of and noted as
02559                      marked before we do anything else */
02560                   push_ptr(OBJHEAD_TO_OBJPTR(start));
02561                 }
02562                 start += info->size;
02563               }
02564             }
02565           }
02566           work->previous_size = PREFIX_SIZE;
02567         } else {
02568           GCDEBUG((DEBUGOUTF,"Setting previous_size on %p to %i\n", work,
02569                    work->size));
02570           work->previous_size = work->size;
02571         }
02572       }
02573     }
02574 
02575     for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02576       for (work = gc->med_pages[i]; work; work = work->next) {
02577         if(work->back_pointers) {
02578           void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
02579           void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
02580           
02581           work->marked_on = 1;
02582           pagemap_add(pagemap, work);
02583 
02584           while(start <= end) {
02585             objhead *info = (objhead *)start;
02586             if(!info->dead) {
02587               info->mark = 1;
02588               /* This must be a push_ptr (see above) */
02589               push_ptr(OBJHEAD_TO_OBJPTR(info));
02590             }
02591             start += info->size;
02592           }
02593         }
02594       }
02595     }
02596   }
02597 }
02598 
02599 mpage *allocate_compact_target(NewGC *gc, mpage *work)
02600 {
02601   mpage *npage;
02602 
02603   npage = malloc_mpage();
02604   npage->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE);
02605   npage->previous_size = npage->size = PREFIX_SIZE;
02606   npage->generation = 1;
02607   npage->back_pointers = 0;
02608   npage->size_class = 0;
02609   npage->page_type = work->page_type;
02610   npage->marked_on = 1;
02611   backtrace_new_page(gc, npage);
02612   /* Link in this new replacement page */
02613   npage->prev = work;
02614   npage->next = work->next;
02615   work->next = npage;
02616   if (npage->next)
02617     npage->next->prev = npage;
02618 
02619   return npage;
02620 }
02621 
02622 /* Compact when 1/4 of the space between objects is unused: */
02623 #define should_compact_page(lsize,tsize) (lsize < (tsize - PREFIX_SIZE - (APAGE_SIZE >> 2)))
02624 
02625 inline static void do_heap_compact(NewGC *gc)
02626 {
02627   int i;
02628   PageMap pagemap = gc->page_maps;
02629 
02630   for(i = 0; i < PAGE_BIG; i++) {
02631     mpage *work = gc->gen1_pages[i], *prev, *npage;
02632 
02633     /* Start from the end: */
02634     if (work) {
02635       while (work->next)
02636         work = work->next;
02637     }
02638     npage = work;
02639 
02640     while(work) {
02641       if(work->marked_on && !work->has_new) {
02642         /* then determine if we actually want to do compaction */
02643         if(should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size)) {
02644           void **start = PAGE_START_VSS(work);
02645           void **end = PAGE_END_VSS(work);
02646           void **newplace;
02647           unsigned long avail;
02648 
02649           GCDEBUG((DEBUGOUTF, "Compacting page %p: new version at %p\n", 
02650                    work, npage));
02651 
02652           if (npage == work) {
02653             /* Need to insert a page: */
02654             npage = allocate_compact_target(gc, work);
02655           }
02656           avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size);
02657           newplace = PPTR(NUM(npage->addr) + npage->size);
02658 
02659           while(start < end) {
02660             objhead *info = (objhead *)start;
02661 
02662             if(info->mark) {
02663               while (avail <= info->size) {
02664                 npage->size = NUM(newplace) - NUM(npage->addr);
02665                 do {
02666                   npage = npage->prev;
02667                 } while (!npage->marked_on || npage->has_new);
02668                 if (npage == work)
02669                   npage = allocate_compact_target(gc, work);
02670                 avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size);
02671                 newplace = PPTR(NUM(npage->addr) + npage->size);
02672               }
02673 
02674               if (npage->mprotected) {
02675                 npage->mprotected = 0;
02676                 vm_protect_pages(npage->addr, APAGE_SIZE, 1);
02677               }
02678 
02679               GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n",
02680                        gcWORDS_TO_BYTES(info->size), start+1, newplace+1));
02681               memcpy(newplace, start, gcWORDS_TO_BYTES(info->size));
02682               info->moved = 1;
02683               *(PPTR(OBJHEAD_TO_OBJPTR(start))) = OBJHEAD_TO_OBJPTR(newplace);
02684               copy_backtrace_source(npage, newplace, work, start);
02685               newplace += info->size;
02686               avail -= info->size;
02687             }
02688             start += info->size;       
02689           }
02690           npage->size = NUM(newplace) - NUM(npage->addr);
02691 
02692           prev = work->prev;
02693 
02694           if(prev) prev->next = work->next; else gc->gen1_pages[i] = work->next;
02695           if(work->next) work->next->prev = prev;
02696 
02697           /* push work onto gc->release_pages */
02698           work->next = gc->release_pages;
02699           gc->release_pages = work;
02700 
02701           /* add the old page to the page map so fixups can find forwards */
02702           pagemap_add(pagemap, work);
02703 
02704           work = prev;
02705         } else { 
02706           work = work->prev;
02707         }
02708       } else {
02709         if (npage == work)
02710           npage = npage->prev;
02711         work = work->prev;
02712       }
02713     }
02714   }
02715 }
02716 
02717 static void repair_heap(NewGC *gc)
02718 {
02719   mpage *page;
02720   int i;
02721   Fixup_Proc *fixup_table = gc->fixup_table;
02722 
02723   for(i = 0; i < PAGE_TYPES; i++) {
02724     for(page = gc->gen1_pages[i]; page; page = page->next) {
02725       if(page->marked_on) {
02726         page->has_new = 0;
02727         /* these are guaranteed not to be protected */
02728         if(page->size_class)  {
02729           /* since we get here via gen1_pages, it's a big page */
02730           void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
02731           void **end = PAGE_END_VSS(page);
02732 
02733           GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
02734                    page, start));
02735           page->size_class = 2; /* remove the mark */
02736           switch(page->page_type) {
02737           case PAGE_TAGGED: 
02738             fixup_table[*(unsigned short*)start](start); 
02739             break;
02740           case PAGE_ATOMIC: break;
02741           case PAGE_ARRAY: 
02742             while(start < end) gcFIXUP(*(start++)); 
02743             break;
02744           case PAGE_XTAGGED: 
02745             GC_fixup_xtagged(start); 
02746             break;
02747           case PAGE_TARRAY: {
02748             unsigned short tag = *(unsigned short *)start;
02749             ASSERT_TAG(tag);
02750             end -= INSET_WORDS;
02751             while(start < end) start += fixup_table[tag](start);
02752             break;
02753           }
02754           }
02755         } else {
02756           void **start = PPTR(NUM(page->addr) + page->previous_size);
02757           void **end = PAGE_END_VSS(page);
02758 
02759           GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
02760                 page, start));
02761           switch(page->page_type) {
02762             case PAGE_TAGGED: 
02763               while(start < end) {
02764                 objhead *info = (objhead *)start;
02765 
02766                 if(info->mark) {
02767                   void *obj_start = OBJHEAD_TO_OBJPTR(start);
02768                   unsigned short tag = *(unsigned short *)obj_start;
02769                   ASSERT_TAG(tag);
02770                   info->mark = 0;
02771                   fixup_table[tag](obj_start);
02772                 } else {
02773                   info->dead = 1;
02774                 }
02775                 start += info->size;
02776               }
02777               break;
02778             case PAGE_ATOMIC:
02779               while(start < end) {
02780                 objhead *info = (objhead *)start;
02781                 if(info->mark) {
02782                   info->mark = 0;
02783                 } else info->dead = 1;
02784                 start += info->size;
02785               }
02786               break;
02787             case PAGE_ARRAY: 
02788               while(start < end) {
02789                 objhead *info = (objhead *)start;
02790                 size_t size = info->size;
02791                 if(info->mark) {
02792                   void **tempend = PPTR(info) + info->size;
02793                   start = OBJHEAD_TO_OBJPTR(start);
02794                   while(start < tempend) gcFIXUP(*start++);
02795                   info->mark = 0;
02796                 } else { 
02797                   info->dead = 1;
02798                   start += size;
02799                 }
02800               }
02801               break;
02802             case PAGE_TARRAY:
02803               while(start < end) {
02804                 objhead *info = (objhead *)start;
02805                 size_t size = info->size;
02806                 if(info->mark) {
02807                   void **tempend = PPTR(info) + (info->size - INSET_WORDS);
02808                   unsigned short tag;
02809                   start = OBJHEAD_TO_OBJPTR(start);
02810                   tag = *(unsigned short*)start;
02811                   ASSERT_TAG(tag);
02812                   while(start < tempend)
02813                     start += fixup_table[tag](start);
02814                   info->mark = 0;
02815                   start = PPTR(info) + size;
02816                 } else {
02817                   info->dead = 1;
02818                   start += size;
02819                 }
02820               }
02821               break;
02822             case PAGE_XTAGGED:
02823               while(start < end) {
02824                 objhead *info = (objhead *)start;
02825                 if(info->mark) {
02826                   GC_fixup_xtagged(OBJHEAD_TO_OBJPTR(start));
02827                   info->mark = 0;
02828                 } else info->dead = 1;
02829                 start += info->size;
02830               }
02831           }
02832         }
02833       } else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page));
02834     }
02835   }
02836 
02837   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02838     for (page = gc->med_pages[i]; page; page = page->next) {
02839       if (page->marked_on) {
02840         void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
02841         void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
02842         
02843         while(start <= end) {
02844           objhead *info = (objhead *)start;
02845           if(info->mark) {
02846             switch(info->type) {
02847             case PAGE_ARRAY:
02848               {
02849                 void **tempend = PPTR(info) + info->size;
02850                 start = OBJHEAD_TO_OBJPTR(start);
02851                 while(start < tempend) gcFIXUP(*start++);
02852               }
02853               break;
02854             case PAGE_TAGGED:
02855               {
02856                 void *obj_start = OBJHEAD_TO_OBJPTR(start);
02857                 unsigned short tag = *(unsigned short *)obj_start;
02858                 ASSERT_TAG(tag);
02859                 fixup_table[tag](obj_start);
02860                 start += info->size;
02861               }
02862               break;
02863             }
02864             info->mark = 0;
02865           } else {
02866             info->dead = 1;
02867             start += info->size;
02868           }
02869         }
02870       }
02871     }
02872   }
02873 }
02874 
02875 static inline void gen1_free_mpage(PageMap pagemap, mpage *page) {
02876   size_t real_page_size = (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE;
02877   pagemap_remove(pagemap, page);
02878   free_backtrace(page);
02879   free_pages(GC, page->addr, real_page_size);
02880   free_mpage(page);
02881 }
02882 
02883 static inline void cleanup_vacated_pages(NewGC *gc) {
02884   mpage *pages = gc->release_pages;
02885   PageMap pagemap = gc->page_maps;
02886 
02887   /* Free pages vacated by compaction: */
02888   while (pages) {
02889     mpage *next = pages->next;
02890     gen1_free_mpage(pagemap, pages);
02891     pages = next;
02892   }
02893   gc->release_pages = NULL;
02894 }
02895 
02896 inline static void gen0_free_big_pages(NewGC *gc) {
02897   mpage *work;
02898   mpage *next;
02899   PageMap pagemap = gc->page_maps;
02900 
02901   for(work = gc->gen0.big_pages; work; work = next) {
02902     next = work->next;
02903     pagemap_remove(pagemap, work);
02904     free_pages(gc, work->addr, round_to_apage_size(work->size));
02905     free_mpage(work);
02906   }
02907 }
02908 
02909 static void clean_up_heap(NewGC *gc)
02910 {
02911   int i;
02912   size_t memory_in_use = 0;
02913   PageMap pagemap = gc->page_maps;
02914 
02915   gen0_free_big_pages(gc);
02916 
02917   for(i = 0; i < PAGE_TYPES; i++) {
02918     if(gc->gc_full) {
02919       mpage *work = gc->gen1_pages[i];
02920       mpage *prev = NULL;
02921       while(work) {
02922         mpage *next = work->next;
02923         if(!work->marked_on) {
02924           /* remove work from list */
02925           if(prev) prev->next = next; else gc->gen1_pages[i] = next;
02926           if(next) work->next->prev = prev;
02927           gen1_free_mpage(pagemap, work);
02928         } else {
02929           pagemap_add(pagemap, work);
02930           work->back_pointers = work->marked_on = 0;
02931           memory_in_use += work->size;
02932           prev = work; 
02933         }
02934         work = next;
02935       }
02936     } else {
02937       mpage *work;
02938       for(work = gc->gen1_pages[i]; work; work = work->next) {
02939         pagemap_add(pagemap, work);
02940         work->back_pointers = work->marked_on = 0;
02941         memory_in_use += work->size;
02942       }
02943     }
02944   }
02945 
02946   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
02947     mpage *work;
02948     mpage *prev = NULL, *next;
02949 
02950     for (work = gc->med_pages[i]; work; work = next) {
02951       if (work->marked_on) {
02952         void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
02953         void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
02954         int non_dead = 0;
02955 
02956         while(start <= end) {
02957           objhead *info = (objhead *)start;
02958           if (!info->dead) {
02959             non_dead++;
02960           }
02961           start += info->size;
02962         }
02963 
02964         next = work->next;
02965         if (non_dead) {
02966           work->live_size = (work->size * non_dead);
02967           memory_in_use += work->live_size;
02968           work->previous_size = PREFIX_SIZE;
02969           work->back_pointers = work->marked_on = 0;
02970           work->generation = 1;
02971           pagemap_add(pagemap, work);
02972           prev = work;
02973         } else {
02974           /* free the page */
02975           if(prev) prev->next = next; else gc->med_pages[i] = next;
02976           if(next) work->next->prev = prev;
02977           gen1_free_mpage(pagemap, work);
02978         }
02979       } else if (gc->gc_full || !work->generation) {
02980         /* Page wasn't touched in full GC, or gen-0 not touched,
02981            so we can free it. */
02982         next = work->next;
02983         if(prev) prev->next = next; else gc->med_pages[i] = next;
02984         if(next) work->next->prev = prev;
02985         gen1_free_mpage(pagemap, work);
02986       } else {
02987         /* not touched during minor gc */
02988         memory_in_use += work->live_size;
02989         work->previous_size = PREFIX_SIZE;
02990         next = work->next;
02991         prev = work;
02992         work->back_pointers = 0;
02993         pagemap_add(pagemap, work);
02994       }
02995     }
02996     gc->med_freelist_pages[i] = prev;
02997   }
02998 
02999   gc->memory_in_use = memory_in_use;
03000   cleanup_vacated_pages(gc);
03001 }
03002 
03003 static void protect_old_pages(NewGC *gc)
03004 {
03005   Page_Range *protect_range = gc->protect_range;
03006   mpage *page;
03007   int i;
03008 
03009   for(i = 0; i < PAGE_TYPES; i++) {
03010     if(i != PAGE_ATOMIC)
03011       for(page = gc->gen1_pages[i]; page; page = page->next)
03012         if(page->page_type != PAGE_ATOMIC)  {
03013           if (!page->mprotected) {
03014             page->mprotected = 1;
03015             add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 0);
03016           }
03017         }
03018   }
03019 
03020   for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
03021     for (page = gc->med_pages[i]; page; page = page->next) {
03022       if (!page->mprotected) {
03023         page->mprotected = 1;
03024         add_protect_page_range(protect_range, page->addr, APAGE_SIZE, APAGE_SIZE, 0);
03025       }
03026     }
03027   }
03028 
03029   flush_protect_page_ranges(protect_range, 0);
03030 }
03031 
03032 #if 0
03033 extern double scheme_get_inexact_milliseconds(void);
03034 # define TIME_DECLS() double start, task_start
03035 # define TIME_INIT() start = task_start = scheme_get_inexact_milliseconds(); fprintf(stderr, "GC (%d):\n", gc->gc_full)
03036 # define TIME_STEP(task) fprintf(stderr, "  %s: %lf\n", task, scheme_get_inexact_milliseconds() - task_start); \
03037   task_start = scheme_get_inexact_milliseconds()
03038 # define TIME_DONE() fprintf(stderr, " Total: %lf\n", scheme_get_inexact_milliseconds() - start)
03039 #else
03040 # define TIME_DECLS() 
03041 # define TIME_INIT() 
03042 # define TIME_STEP(task) 
03043 # define TIME_DONE() 
03044 #endif
03045 
03046 /* Full GCs trigger finalization. Finalization releases data
03047    in the old generation. So one more full GC is needed to
03048    really clean up. The full_needed_for_finalization flag triggers 
03049    the second full GC. */
03050 
03051 static void garbage_collect(NewGC *gc, int force_full)
03052 {
03053   unsigned long old_mem_use = gc->memory_in_use;
03054   unsigned long old_gen0    = gc->gen0.current_size;
03055   int next_gc_full;
03056   TIME_DECLS();
03057 
03058   /* determine if this should be a full collection or not */
03059   gc->gc_full = force_full || !gc->generations_available 
03060     || (gc->since_last_full > 100) || (gc->memory_in_use > (2 * gc->last_full_mem_use));
03061 #if 0
03062   printf("Collection %li (full = %i): %i / %i / %i / %i  %ld\n", number_of_gc_runs, 
03063       gc->gc_full, force_full, !generations_available,
03064       (gc->since_last_full > 100), (gc->memory_in_use > (2 * gc->last_full_mem_use)),
03065       gc->last_full_mem_use);
03066 #endif
03067 
03068   next_gc_full = gc->gc_full;
03069 
03070   if (gc->full_needed_for_finalization) {
03071     gc->full_needed_for_finalization= 0;
03072     gc->gc_full = 1;
03073   }
03074 
03075   gc->number_of_gc_runs++; 
03076   INIT_DEBUG_FILE(); DUMP_HEAP();
03077 
03078   /* we don't want the low-level allocator freaking because we've gone past
03079      half the available memory */
03080   gc->in_unsafe_allocation_mode = 1;
03081   gc->unsafe_allocation_abort = out_of_memory;
03082 
03083   TIME_INIT();
03084 
03085   /* inform the system (if it wants us to) that we're starting collection */
03086   if(gc->GC_collect_start_callback)
03087     gc->GC_collect_start_callback();
03088 
03089   TIME_STEP("started");
03090 
03091   gc->no_further_modifications = 1;
03092 
03093   if (gc->gc_full)
03094     reset_gen1_pages_live_and_previous_sizes(gc);
03095   else /* minor collection */
03096     remove_all_gen1_pages_from_pagemap(gc);
03097 
03098   init_weak_boxes(gc);
03099   init_weak_arrays(gc);
03100   init_ephemerons(gc);
03101 
03102   /* at this point, the page map should only include pages that contain
03103      collectable objects */
03104 
03105   TIME_STEP("prepared");
03106 
03107   /* mark and repair the roots for collection */
03108   mark_backpointers(gc);
03109   TIME_STEP("backpointered");
03110   mark_finalizer_structs(gc);
03111   mark_weak_finalizer_structs(gc);
03112   TIME_STEP("pre-rooted");
03113   mark_roots(gc);
03114   mark_immobiles(gc);
03115   TIME_STEP("rooted");
03116 #ifdef MZ_USE_PLACES
03117   if (!is_master_gc(gc))
03118 #endif
03119     GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
03120 
03121   TIME_STEP("stacked");
03122 
03123   /* now propagate/repair the marks we got from these roots, and do the
03124      finalizer passes */
03125   propagate_marks(gc);
03126   mark_ready_ephemerons(gc); 
03127   propagate_marks(gc); 
03128 
03129   check_finalizers(gc, 1);
03130   mark_ready_ephemerons(gc);
03131   propagate_marks(gc);
03132 
03133   check_finalizers(gc, 2);
03134   mark_ready_ephemerons(gc);
03135   propagate_marks(gc);
03136 
03137   if(gc->gc_full) zero_weak_finalizers(gc);
03138   do_ordered_level3(gc); propagate_marks(gc);
03139   check_finalizers(gc, 3); propagate_marks(gc);
03140   if(gc->gc_full) {
03141     reset_weak_finalizers(gc); 
03142     propagate_marks(gc);
03143   }
03144 #ifndef NEWGC_BTC_ACCOUNT
03145   /* we need to clear out the stack pages. If we're doing memory accounting,
03146      though, we might as well leave them up for now and let the accounting
03147      system clear them later. Better then freeing them, at least. If we're
03148      not doing accounting, though, there is no "later" where they'll get
03149      removed */
03150   clear_stack_pages();  
03151 #endif
03152 
03153   TIME_STEP("marked");
03154 
03155   zero_weak_boxes(gc); 
03156   zero_weak_arrays(gc);
03157   zero_remaining_ephemerons(gc);
03158 
03159   TIME_STEP("zeroed");
03160 
03161   if(gc->gc_full) do_heap_compact(gc);
03162 
03163   TIME_STEP("compacted");
03164 
03165   /* do some cleanup structures that either change state based on the
03166      heap state after collection or that become useless based on changes
03167      in state after collection */
03168 #ifdef NEWGC_BTC_ACCOUNT
03169   BTC_clean_up(gc);
03170 #endif
03171   TIME_STEP("cleaned");
03172   repair_finalizer_structs(gc);
03173   repair_weak_finalizer_structs(gc);
03174   repair_roots(gc);
03175   repair_immobiles(gc);
03176 #ifdef MZ_USE_PLACES
03177   if (!is_master_gc(gc))
03178 #endif
03179     GC_fixup_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
03180   TIME_STEP("reparied roots");
03181   repair_heap(gc);
03182   TIME_STEP("repaired");
03183   clean_up_heap(gc);
03184   TIME_STEP("cleaned heap");
03185   reset_nursery(gc);
03186   TIME_STEP("reset nursurey");
03187 #ifdef NEWGC_BTC_ACCOUNT
03188   if (gc->gc_full)
03189     BTC_do_accounting(gc);
03190 #endif
03191   TIME_STEP("accounted");
03192   if (gc->generations_available)
03193     protect_old_pages(gc);
03194   TIME_STEP("protect");
03195   if (gc->gc_full)
03196     vm_flush_freed_pages(gc->vm);
03197   reset_finalizer_tree(gc);
03198 
03199   TIME_STEP("reset");
03200 
03201   /* now we do want the allocator freaking if we go over half */
03202   gc->in_unsafe_allocation_mode = 0;
03203 
03204   gc->no_further_modifications = 0;
03205 
03206   /* If we have too many idle pages, flush: */
03207   if (vm_memory_allocated(gc->vm) > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) {
03208     vm_flush_freed_pages(gc->vm);
03209   }
03210 
03211   /* update some statistics */
03212   if(gc->gc_full) gc->num_major_collects++; else gc->num_minor_collects++;
03213   if(gc->peak_memory_use < gc->memory_in_use) gc->peak_memory_use = gc->memory_in_use;
03214   if(gc->gc_full)
03215     gc->since_last_full = 0;
03216   else if((float)(gc->memory_in_use - old_mem_use) < (0.1 * (float)old_mem_use))
03217     gc->since_last_full += 1;
03218   else if((float)(gc->memory_in_use - old_mem_use) < (0.4 * (float)old_mem_use))
03219     gc->since_last_full += 5;
03220   else 
03221     gc->since_last_full += 10;
03222   if(gc->gc_full)
03223     gc->last_full_mem_use = gc->memory_in_use;
03224 
03225   /* inform the system (if it wants us to) that we're done with collection */
03226   if (gc->GC_collect_end_callback)
03227     gc->GC_collect_end_callback();
03228   if (gc->GC_collect_inform_callback)
03229     gc->GC_collect_inform_callback(gc->gc_full, old_mem_use + old_gen0, gc->memory_in_use);
03230 
03231   TIME_STEP("ended");
03232 
03233   TIME_DONE();
03234 
03235   if (!gc->run_queue)
03236     next_gc_full = 0;
03237 
03238   /* run any queued finalizers, EXCEPT in the case where this collection was
03239      triggered by the execution of a finalizer. The outside world needs this
03240      invariant in some corner case I don't have a reference for. In any case,
03241      if we run a finalizer after collection, and it triggers a collection,
03242      we should not run the next finalizer in the queue until the "current"
03243      finalizer completes its execution */
03244   if(!gc->running_finalizers) {
03245     gc->running_finalizers = 1;
03246 
03247     /* Finalization might allocate, which might need park: */
03248     gc->park_save[0] = gc->park[0];
03249     gc->park_save[1] = gc->park[1];
03250     gc->park[0] = NULL;
03251     gc->park[1] = NULL;
03252 
03253     while(gc->run_queue) {
03254       struct finalizer *f;
03255       void **saved_gc_variable_stack;
03256 
03257       f = gc->run_queue; gc->run_queue = gc->run_queue->next;
03258       if(!gc->run_queue) gc->last_in_queue = NULL;
03259 
03260       GCDEBUG((DEBUGOUTF, "Running finalizers %p for pointer %p (lvl %i)\n", f, f->p, f->eager_level));
03261       saved_gc_variable_stack = GC_variable_stack;
03262       f->f(f->p, f->data);
03263       GC_variable_stack = saved_gc_variable_stack;
03264     }
03265 #ifdef NEWGC_BTC_ACCOUNT
03266     BTC_run_account_hooks(gc);
03267 #endif
03268     gc->running_finalizers = 0;
03269 
03270     gc->park[0] = gc->park_save[0];
03271     gc->park[1] = gc->park_save[1];
03272     gc->park_save[0] = NULL;
03273     gc->park_save[1] = NULL;
03274   }
03275 
03276   DUMP_HEAP(); CLOSE_DEBUG_FILE();
03277 
03278   if (next_gc_full)
03279     gc->full_needed_for_finalization = 1;
03280 }
03281 
03282 #if MZ_GC_BACKTRACE
03283 
03284 static GC_get_type_name_proc stack_get_type_name;
03285 static GC_get_xtagged_name_proc stack_get_xtagged_name;
03286 static GC_print_tagged_value_proc stack_print_tagged_value;
03287 
03288 static void dump_stack_pos(void *a) 
03289 {
03290   GCPRINT(GCOUTF, " @%p: ", a);
03291   print_out_pointer("", *(void **)a, stack_get_type_name, stack_get_xtagged_name, stack_print_tagged_value);
03292 }
03293 
03294 # define GC_X_variable_stack GC_do_dump_variable_stack
03295 # define gcX(a) dump_stack_pos(a)
03296 # define X_source(stk, p) /* */
03297 # include "var_stack.c"
03298 # undef GC_X_variable_stack
03299 # undef gcX
03300 # undef X_source
03301 
03302 void GC_dump_variable_stack(void **var_stack,
03303     long delta,
03304     void *limit,
03305     void *stack_mem,
03306     GC_get_type_name_proc get_type_name,
03307     GC_get_xtagged_name_proc get_xtagged_name,
03308     GC_print_tagged_value_proc print_tagged_value)
03309 {
03310   stack_get_type_name = get_type_name;
03311   stack_get_xtagged_name = get_xtagged_name;
03312   stack_print_tagged_value = print_tagged_value;
03313   GC_do_dump_variable_stack(var_stack, delta, limit, stack_mem);
03314 }
03315 
03316 #endif
03317 
03318 /******************************************************************************/
03319 /*                              GC free all                                   */
03320 /******************************************************************************/
03321 
03322 void GC_free_all(void)
03323 {
03324   NewGC *gc = GC_get_GC();
03325   int i;
03326   mpage *work;
03327   mpage *next;
03328   PageMap pagemap = gc->page_maps;
03329 
03330   remove_signal_handler(gc);
03331 
03332   gen0_free_big_pages(gc);
03333 
03334   for(i = 0; i < PAGE_TYPES; i++) {
03335     for (work = gc->gen1_pages[i]; work; work = next) {
03336       next = work->next;
03337 
03338       if (work->mprotected)
03339         vm_protect_pages(work->addr, (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
03340       gen1_free_mpage(pagemap, work);
03341     }
03342   }
03343 
03344   free(gc->mark_table);
03345   free(gc->fixup_table);
03346 
03347   free_page_maps(gc->page_maps);
03348 
03349   free(gc->protect_range);
03350 
03351   vm_flush_freed_pages(gc->vm);
03352   vm_free(gc->vm);
03353   free(gc);
03354 }