Back to index

plt-scheme  4.2.1
salloc.c
Go to the documentation of this file.
00001 /*
00002   MzScheme
00003   Copyright (c) 2004-2009 PLT Scheme Inc.
00004   Copyright (c) 1995-2001 Matthew Flatt
00005  
00006     This library is free software; you can redistribute it and/or
00007     modify it under the terms of the GNU Library General Public
00008     License as published by the Free Software Foundation; either
00009     version 2 of the License, or (at your option) any later version.
00010 
00011     This library is distributed in the hope that it will be useful,
00012     but WITHOUT ANY WARRANTY; without even the implied warranty of
00013     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014     Library General Public License for more details.
00015 
00016     You should have received a copy of the GNU Library General Public
00017     License along with this library; if not, write to the Free
00018     Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
00019     Boston, MA 02110-1301 USA.
00020 
00021   libscheme
00022   Copyright (c) 1994 Brent Benson
00023   All rights reserved.
00024 */
00025 
00026 /* Some copilers don't like re-def of GC_malloc in schemef.h: */
00027 #ifndef MZ_PRECISE_GC
00028 # define SCHEME_NO_GC_PROTO
00029 #endif
00030 
00031 #include "schpriv.h"
00032 #include <string.h>
00033 #include "schgc.h"
00034 
00035 #ifdef DOS_FAR_POINTERS
00036 # include <alloc.h>
00037 # define MALLOC farmalloc
00038 #else
00039 # define MALLOC malloc
00040 #endif
00041 
00042 #ifdef MZ_JIT_USE_MPROTECT
00043 # include <unistd.h>
00044 # include <sys/mman.h>
00045 # ifndef MAP_ANON
00046 #  include <fcntl.h>
00047 # endif
00048 #endif
00049 #ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
00050 # include <windows.h>
00051 #endif
00052 
00053 static void **dgc_array;
00054 static int *dgc_count;
00055 static int dgc_size;
00056 
00057 extern int scheme_num_copied_stacks;
00058 static unsigned long scheme_primordial_os_thread_stack_base;
00059 static THREAD_LOCAL unsigned long scheme_os_thread_stack_base;
00060 
00061 static Scheme_Report_Out_Of_Memory_Proc more_report_out_of_memory;
00062 
00063 #if defined(MZ_XFORM) && !defined(MZ_PRECISE_GC)
00064 void **GC_variable_stack;
00065 #endif
00066 
00067 #ifndef MZ_PRECISE_GC
00068 extern MZ_DLLIMPORT void GC_register_late_disappearing_link(void **link, void *obj);
00069 #endif
00070 
00071 static int use_registered_statics;
00072 
00073 /************************************************************************/
00074 /*                           stack setup                                */
00075 /************************************************************************/
00076 
00077 #if !defined(MZ_PRECISE_GC) && !defined(USE_SENORA_GC)
00078 extern MZ_DLLIMPORT void GC_init();
00079 extern MZ_DLLIMPORT unsigned long GC_get_stack_base();
00080 #endif
00081 
00082 void scheme_set_stack_base(void *base, int no_auto_statics)
00083 {
00084 #ifdef MZ_PRECISE_GC
00085   GC_init_type_tags(_scheme_last_type_, 
00086                     scheme_pair_type, scheme_mutable_pair_type, scheme_weak_box_type, 
00087                     scheme_ephemeron_type, scheme_rt_weak_array,
00088                     scheme_cust_box_type);
00089   /* We want to be able to allocate symbols early. */
00090   scheme_register_traversers();
00091 #endif
00092 
00093   scheme_primordial_os_thread_stack_base  = (unsigned long) base;
00094   scheme_os_thread_stack_base             = (unsigned long) base;
00095 
00096 #if defined(MZ_PRECISE_GC) || defined(USE_SENORA_GC)
00097   GC_set_stack_base(base);
00098   /* no_auto_statics must always be true! */
00099 #else
00100   GC_stackbottom = base;
00101   if (no_auto_statics) {
00102     GC_no_dls = 1;
00103     GC_init();
00104     GC_clear_roots();
00105   } else {
00106 # if (defined(__APPLE__) && defined(__MACH__)) || defined(MZ_USE_IRIX_SPROCS)
00107     GC_init(); /* For Darwin, CGC requires GC_init() always */
00108 # endif
00109   }
00110 #endif
00111   use_registered_statics = no_auto_statics;
00112 #if defined(MZ_PRECISE_GC)
00113   GC_report_out_of_memory = scheme_out_of_memory_abort;
00114 #endif
00115 }
00116 
00117 void scheme_set_current_os_thread_stack_base(void *base)
00118 {
00119   scheme_os_thread_stack_base = (unsigned long) base;
00120 }
00121 
00122 unsigned long scheme_get_current_os_thread_stack_base()
00123 {
00124   return scheme_os_thread_stack_base;
00125 }
00126 
00127 typedef struct {
00128   Scheme_Env_Main _main;
00129   int argc;
00130   char **argv;
00131 } Scheme_Main_Data;
00132 
00133 static int call_with_basic(void *data)
00134 {
00135   Scheme_Main_Data *ma = (Scheme_Main_Data *)data;
00136   Scheme_Env_Main _main = ma->_main;
00137   
00138   return _main(scheme_basic_env(), ma->argc, ma->argv);
00139 }
00140 
00141 int scheme_main_setup(int no_auto_statics, Scheme_Env_Main _main, int argc, char **argv)
00142 {
00143   Scheme_Main_Data d;
00144   d._main = _main;
00145   d.argc = argc;
00146   d.argv = argv;
00147   return scheme_main_stack_setup(no_auto_statics, call_with_basic, &d);
00148 }
00149 
00150 int scheme_main_stack_setup(int no_auto_statics, Scheme_Nested_Main _main, void *data)
00151 {
00152   void *stack_start;
00153   int volatile return_code;
00154 
00155   scheme_set_stack_base(PROMPT_STACK(stack_start), no_auto_statics);
00156 
00157   return_code = _main(data);
00158 
00159 #ifdef MZ_PRECISE_GC
00160   /* Trick xform conversion to keep start_addr: */
00161   stack_start = NULL;
00162 #endif
00163 
00164   return return_code;
00165 }
00166 
00167 void scheme_set_stack_bounds(void *base, void *deepest, int no_auto_statics)
00168 {
00169   scheme_set_stack_base(base, no_auto_statics);
00170 
00171 #ifdef USE_STACK_BOUNDARY_VAR
00172   if (deepest) {
00173     scheme_stack_boundary = (unsigned long)deepest;
00174   }
00175 #endif
00176 }
00177 
00178 extern unsigned long scheme_get_stack_base()
00179 {
00180 #if !defined(MZ_PRECISE_GC) && !defined(USE_SENORA_GC)
00181   if (GC_stackbottom)
00182     return (unsigned long)GC_stackbottom;
00183   else
00184 #endif
00185     return (unsigned long)GC_get_stack_base();
00186 }
00187 
00188 void scheme_out_of_memory_abort()
00189 {
00190   scheme_log_abort("PLT Scheme virtual machine has run out of memory; aborting");
00191   if (more_report_out_of_memory)
00192     more_report_out_of_memory();
00193   abort();
00194 }
00195 
00196 void scheme_set_report_out_of_memory(Scheme_Report_Out_Of_Memory_Proc p)
00197 {
00198   more_report_out_of_memory = p;
00199 }
00200 
00201 /************************************************************************/
00202 /*                           memory utils                               */
00203 /************************************************************************/
00204 
00205 void scheme_dont_gc_ptr(void *p)
00206 {
00207   int i, oldsize;
00208   void **naya;
00209   int *nayac;
00210 
00211   /* look for existing: */
00212   for (i = 0; i < dgc_size; i++) {
00213     if (dgc_array[i] == p) {
00214       dgc_count[i]++;
00215       return;
00216     }
00217   }
00218 
00219   /* look for empty slot: */
00220   for (i = 0; i < dgc_size; i++) {
00221     if (!dgc_array[i]) {
00222       dgc_array[i] = p;
00223       dgc_count[i] = 1;
00224       return;
00225     }
00226   }
00227 
00228   /* Make more room: */
00229   oldsize = dgc_size;
00230   if (!dgc_array) {
00231     REGISTER_SO(dgc_array);
00232     REGISTER_SO(dgc_count);
00233     dgc_size = 50;
00234   } else
00235     dgc_size *= 2;
00236 
00237   naya = MALLOC_N(void*, dgc_size);
00238   nayac = MALLOC_N(int, dgc_size);
00239 
00240   for (i = 0; i < oldsize; i++) {
00241     naya[i] = dgc_array[i];
00242     nayac[i] = dgc_count[i];
00243   }
00244 
00245   for (; i < dgc_size; i++) {
00246     naya[i] = NULL;
00247     nayac[i] = 0;
00248   }
00249 
00250   dgc_array = naya;
00251   dgc_count = nayac;
00252 
00253   dgc_array[oldsize] = p;
00254   dgc_count[oldsize] = 1;
00255 }
00256 
00257 void scheme_gc_ptr_ok(void *p)
00258 {
00259   int i;
00260   
00261   for (i = 0; i < dgc_size; i++) {
00262     if (dgc_array[i] == p) {
00263       if (!(--dgc_count[i]))
00264        dgc_array[i] = NULL;
00265       break;
00266     }
00267   }
00268 }
00269 
00270 #ifdef NO_GC
00271 void *
00272 scheme_malloc (size_t size)
00273 {
00274   void *space;
00275 
00276   space = MALLOC(size);
00277   if (!space)
00278     scheme_raise_out_of_memory(NULL, NULL);
00279 
00280   return (space);
00281 }
00282 #endif
00283 
00284 void *
00285 scheme_calloc (size_t num, size_t size)
00286 {
00287   void *space;
00288   
00289   space = MALLOC(num*size);
00290   if (!space)
00291     scheme_raise_out_of_memory(NULL, NULL);
00292 #ifdef NO_GC
00293   memset(space, 0, (num*size));
00294 #endif
00295 
00296   return (space);
00297 }
00298 
00299 char *
00300 scheme_strdup(const char *str)
00301 {
00302   char *naya;
00303   long len;
00304 
00305   len = strlen(str) + 1;
00306   naya = (char *)scheme_malloc_atomic (len * sizeof (char));
00307   memcpy (naya, str, len);
00308 
00309   return naya;
00310 }
00311 
00312 char *
00313 scheme_strdup_eternal(const char *str)
00314 {
00315   char *naya;
00316   long len;
00317 
00318   len = strlen(str) + 1;
00319   naya = (char *)scheme_malloc_eternal(len * sizeof (char));
00320   memcpy (naya, str, len);
00321 
00322   return naya;
00323 }
00324 
00325 /************************************************************************/
00326 /*                               cptr                                   */
00327 /************************************************************************/
00328 
00329 Scheme_Object *scheme_make_cptr(void *cptr, Scheme_Object *typetag)
00330 {
00331   Scheme_Object *o;
00332 
00333   o = (Scheme_Object *)scheme_malloc_small_tagged(sizeof(Scheme_Cptr));
00334   o->type = scheme_cpointer_type;
00335   SCHEME_CPTR_VAL(o) = cptr;
00336   SCHEME_CPTR_TYPE(o) = (void *)typetag;
00337 
00338   return o;
00339 }
00340 
00341 Scheme_Object *scheme_make_offset_cptr(void *cptr, long offset, Scheme_Object *typetag)
00342 {
00343   Scheme_Object *o;
00344 
00345   o = (Scheme_Object *)scheme_malloc_small_tagged(sizeof(Scheme_Offset_Cptr));
00346   o->type = scheme_offset_cpointer_type;
00347   SCHEME_CPTR_VAL(o) = cptr;
00348   SCHEME_CPTR_TYPE(o) = (void *)typetag;
00349   ((Scheme_Offset_Cptr *)o)->offset = offset;
00350 
00351   return o;
00352 }
00353 
00354 /************************************************************************/
00355 /*                            allocation                                */
00356 /************************************************************************/
00357 
00358 #ifndef MZ_PRECISE_GC
00359 static Scheme_Hash_Table *immobiles;
00360 #endif
00361 
00362 void **scheme_malloc_immobile_box(void *p)
00363 {
00364 #ifdef MZ_PRECISE_GC
00365   return GC_malloc_immobile_box(p);
00366 #else
00367   void **b;
00368 
00369   if (!immobiles) {
00370     REGISTER_SO(immobiles);
00371     immobiles = scheme_make_hash_table(SCHEME_hash_ptr);
00372   }
00373 
00374   b = scheme_malloc(sizeof(void *));
00375   *b = p;
00376   scheme_hash_set(immobiles, (Scheme_Object *)(void *)b, scheme_true);
00377 
00378   return b;
00379 #endif
00380 }
00381 
00382 void scheme_free_immobile_box(void **b)
00383 {
00384 #ifdef MZ_PRECISE_GC
00385   GC_free_immobile_box(b);
00386 #else
00387   if (immobiles) {
00388     scheme_hash_set(immobiles, (Scheme_Object *)(void *)b, NULL);
00389   }
00390 #endif
00391 }
00392 
00393 static void (*save_oom)(void);
00394 
00395 static void raise_out_of_memory(void)
00396 {
00397   GC_out_of_memory = save_oom;
00398   scheme_raise_out_of_memory(NULL, NULL);
00399 }
00400 
00401 void *scheme_malloc_fail_ok(void *(*f)(size_t), size_t s)
00402 {
00403   void *v;
00404 
00405   save_oom = GC_out_of_memory;
00406   GC_out_of_memory = raise_out_of_memory;
00407   v = f(s);
00408   GC_out_of_memory = save_oom;
00409 
00410   return v;
00411 }
00412 
00413 void scheme_end_stubborn_change(void *p)
00414 {
00415 #ifndef MZ_PRECISE_GC
00416   GC_end_stubborn_change(p);
00417 #endif
00418 }
00419 
00420 void *scheme_malloc_eternal(size_t n)
00421 {
00422 #ifdef USE_SENORA_GC
00423   return GC_malloc_atomic_uncollectable(n);
00424 #else
00425   void *s;
00426 
00427   s = MALLOC(n);
00428   if (!s) {
00429     if (GC_out_of_memory)
00430       GC_out_of_memory();
00431     else {
00432       if (scheme_console_printf)
00433        scheme_console_printf("out of memory\n");
00434       else
00435        printf("out of memory\n");
00436       exit(1);
00437     }
00438   }
00439        
00440 
00441   memset(s, 0, n);
00442 
00443   return s;
00444 #endif
00445 }
00446 
00447 #ifdef MZ_PRECISE_GC
00448 void *scheme_malloc_uncollectable(size_t size_in_bytes)
00449 {
00450   void *p;
00451   p = scheme_malloc(size_in_bytes);
00452   scheme_dont_gc_ptr(p);
00453   return p;
00454 }
00455 #endif
00456 
00457 #ifdef MZ_XFORM
00458 START_XFORM_SKIP;
00459 #endif
00460 
00461 void scheme_register_static(void *ptr, long size)
00462 {
00463 #if defined(MZ_PRECISE_GC) || defined(USE_SENORA_GC)
00464   /* Always register for precise and Senora GC: */
00465   GC_add_roots((char *)ptr, (char *)(((char *)ptr) + size + 1));
00466 #else
00467 # ifdef GC_MIGHT_USE_REGISTERED_STATICS
00468   if (use_registered_statics) {
00469     GC_add_roots((char *)ptr, (char *)(((char *)ptr) + size + 1));
00470   }
00471 # endif
00472 #endif
00473 }
00474 
00475 #ifdef MZ_XFORM
00476 END_XFORM_SKIP;
00477 #endif
00478 
00479 #ifdef USE_TAGGED_ALLOCATION
00480 
00481 struct GC_Set *tagged, *real_tagged, *tagged_atomic, *tagged_eternal, *tagged_uncollectable, *stacks, *envunbox;
00482 struct GC_Set *tagged_while_counting;
00483 
00484 static void trace_count(void *, int);
00485 static void trace_path(void *, unsigned long, void *);
00486 static void trace_init(void);
00487 static void trace_done(void);
00488 static void trace_stack_count(void *, int);
00489 static void trace_stack_path(void *, unsigned long, void *);
00490 static void finalize_object(void *);
00491 
00492 #define TRACE_FUNCTIONS trace_init, trace_done, trace_count, trace_path
00493 
00494 static void init_tagged_counting(void)
00495 {
00496   if (!tagged_while_counting)
00497     tagged_while_counting = GC_new_set("counting", 
00498                                    NULL, NULL, NULL, NULL, NULL,
00499                                    0);
00500 }
00501 
00502 void *scheme_malloc_tagged(size_t s)
00503 {
00504   if (!tagged) {
00505     init_tagged_counting();
00506     real_tagged = tagged = GC_new_set("tagged", TRACE_FUNCTIONS, 
00507                                   finalize_object, 
00508                                   0);
00509   }
00510 
00511   return GC_malloc_specific(s, tagged);
00512 }
00513 
00514 void *scheme_malloc_atomic_tagged(size_t s)
00515 {
00516   if (!tagged_atomic) {
00517     init_tagged_counting();
00518     tagged_atomic = GC_new_set("tagged", TRACE_FUNCTIONS, 
00519                             finalize_object, 
00520                             SGC_ATOMIC_SET);
00521   }
00522 
00523   return GC_malloc_specific(s, tagged_atomic);
00524 }
00525 
00526 void *scheme_malloc_stubborn_tagged(size_t s)
00527 {
00528   return scheme_malloc_tagged(s);
00529 }
00530 
00531 void *scheme_malloc_envunbox(size_t s)
00532 {
00533   if (!envunbox)
00534     envunbox = GC_new_set("envunbox", 
00535                        NULL, NULL, NULL, NULL, NULL,
00536                        0);
00537 
00538   return GC_malloc_specific(s, envunbox);
00539 }
00540 
00541 void *scheme_malloc_stack(size_t s)
00542 {
00543   if (!stacks)
00544     stacks = GC_new_set("envunbox", 
00545                      trace_init, trace_done, trace_stack_count, trace_stack_path, 
00546                      NULL,
00547                      SGC_ATOMIC_SET);
00548 
00549   return GC_malloc_specific(s, stacks);
00550 }
00551 
00552 void *scheme_malloc_eternal_tagged(size_t s)
00553 {
00554   if (!tagged_eternal) {
00555     init_tagged_counting();
00556     tagged_eternal = GC_new_set("tagged", TRACE_FUNCTIONS,
00557                             finalize_object,
00558                             SGC_UNCOLLECTABLE_SET | SGC_ATOMIC_SET);
00559   }
00560 
00561   return GC_malloc_specific(s, tagged_eternal);
00562 }
00563 
00564 void *scheme_malloc_uncollectable_tagged(size_t s)
00565 {
00566   if (!tagged_uncollectable) {
00567     init_tagged_counting();
00568     tagged_uncollectable = GC_new_set("tagged", TRACE_FUNCTIONS, 
00569                                   finalize_object,
00570                                   SGC_UNCOLLECTABLE_SET);
00571   }
00572 
00573   return GC_malloc_specific(s, tagged_uncollectable);
00574 }
00575 
00576 #endif
00577 
00578 /************************************************************************/
00579 /*                         code allocation                              */
00580 /************************************************************************/
00581 
00582 /* We're not supposed to use mprotect() or VirtualProtect() on memory
00583    from malloc(); Posix says that mprotect() only works on memory from
00584    mmap(), and VirtualProtect() similarly requires alignment with a
00585    corresponding VirtualAlloc. So we implement a little allocator here
00586    for code chunks. */
00587 
00588 #ifdef MZ_PRECISE_GC
00589 START_XFORM_SKIP;
00590 #endif
00591 
00592 /* Max of desired alignment and 2 * sizeof(long): */
00593 #define CODE_HEADER_SIZE 16
00594 
00595 long scheme_code_page_total;
00596 
00597 #if defined(MZ_JIT_USE_MPROTECT) && !defined(MAP_ANON)
00598 static int fd, fd_created;
00599 #endif
00600 
00601 #define LOG_CODE_MALLOC(lvl, s) /* if (lvl > 1) s */
00602 #define CODE_PAGE_OF(p) ((void *)(((unsigned long)p) & ~(page_size - 1)))
00603 
00604 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00605 
00606 struct free_list_entry {
00607   long size; /* size of elements in this bucket */
00608   void *elems; /* doubly linked list for free blocks */
00609   int count; /* number of items in `elems' */
00610 };
00611 
00612 static struct free_list_entry *free_list;
00613 static int free_list_bucket_count;
00614 
00615 static long get_page_size()
00616 {
00617 # ifdef PAGESIZE
00618   const long page_size = PAGESIZE;
00619 # else
00620   static unsigned long page_size = -1;
00621   if (page_size == -1) {
00622 #  ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
00623     SYSTEM_INFO info;
00624     GetSystemInfo(&info);
00625     page_size = info.dwPageSize;
00626 #  else
00627     page_size = sysconf (_SC_PAGESIZE);
00628 #  endif
00629   }
00630 # endif
00631 
00632   return page_size;
00633 }
00634 
00635 static void *malloc_page(long size)
00636 {
00637   void *r;
00638 
00639 #ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
00640   {
00641     DWORD old;
00642     r = (void *)VirtualAlloc(NULL, size, 
00643                              MEM_COMMIT | MEM_RESERVE, 
00644                              /* A note in gc/os_dep.c says that VirtualAlloc
00645                                 doesn't like PAGE_EXECUTE_READWRITE. In case
00646                                 that's true, we use a separate VirtualProtect step. */
00647                              PAGE_READWRITE);
00648     if (r)
00649       VirtualProtect(r, size, PAGE_EXECUTE_READWRITE, &old);
00650   }
00651 #else
00652 # ifdef MAP_ANON
00653   r = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
00654 # else
00655   if (!fd_created) {
00656     fd_created = 1;
00657     fd = open("/dev/zero", O_RDWR);
00658   }
00659   r = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
00660 # endif
00661   if (r  == (void *)-1)
00662     r = NULL;
00663 #endif
00664 
00665   if (!r)
00666     scheme_raise_out_of_memory(NULL, NULL);
00667 
00668   return r;
00669 }
00670 
00671 static void free_page(void *p, long size)
00672 {
00673 #ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
00674   VirtualFree(p, 0, MEM_RELEASE);
00675 #else
00676   munmap(p, size);
00677 #endif
00678 }
00679 
00680 static void init_free_list()
00681 {
00682   long page_size = get_page_size();
00683   int pos = 0;
00684   int cnt = 2;
00685   long last_v = page_size, v;
00686 
00687   /* Compute size that fits 2 objects per page, then 3 per page, etc.
00688      Keeping CODE_HEADER_SIZE alignment gives us a small number of
00689      buckets. */
00690   while (1) {
00691     v = (page_size - CODE_HEADER_SIZE) / cnt;
00692     v = (v / CODE_HEADER_SIZE) * CODE_HEADER_SIZE;
00693     if (v != last_v) {
00694       free_list[pos].size = v;
00695       free_list[pos].elems = NULL;
00696       free_list[pos].count = 0;
00697       last_v = v;
00698       pos++;
00699       if (v == CODE_HEADER_SIZE)
00700         break;
00701     }
00702     cnt++;
00703   }
00704 
00705   free_list_bucket_count = pos;
00706 }
00707 
00708 static long free_list_find_bucket(long size)
00709 {
00710   /* binary search */
00711   int lo = 0, hi = free_list_bucket_count - 1, mid;
00712 
00713   while (lo + 1 < hi) {
00714     mid = (lo + hi) / 2;
00715     if (free_list[mid].size > size) {
00716       lo = mid;
00717     } else {
00718       hi = mid;
00719     }
00720   }
00721 
00722   if (free_list[hi].size == size)
00723     return hi;
00724   else
00725     return lo;
00726 }
00727 #endif
00728 
00729 void *scheme_malloc_code(long size)
00730 {
00731 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00732   long size2, bucket, sz, page_size;
00733   void *p, *pg, *prev;
00734 
00735   if (size < CODE_HEADER_SIZE) {
00736     /* ensure CODE_HEADER_SIZE alignment 
00737        and room for free-list pointers */
00738     size = CODE_HEADER_SIZE;
00739   }
00740 
00741   page_size = get_page_size();
00742 
00743   if (!free_list) {
00744     free_list = (struct free_list_entry *)malloc_page(page_size);
00745     scheme_code_page_total += page_size;
00746     init_free_list();
00747   }
00748 
00749   if (size > free_list[0].size) {
00750     /* allocate large object on its own page(s) */
00751     sz = size + CODE_HEADER_SIZE;
00752     sz = (sz + page_size - 1) & ~(page_size - 1);
00753     pg = malloc_page(sz);
00754     scheme_code_page_total += sz;
00755     *(long *)pg = sz;
00756     LOG_CODE_MALLOC(1, printf("allocated large %p (%ld) [now %ld]\n", 
00757                               pg, size + CODE_HEADER_SIZE, scheme_code_page_total));
00758     return ((char *)pg) + CODE_HEADER_SIZE;
00759   }
00760 
00761   bucket = free_list_find_bucket(size);
00762   size2 = free_list[bucket].size;
00763 
00764   if (!free_list[bucket].elems) {
00765     /* add a new page's worth of items to the free list */
00766     int i, count = 0;
00767     pg = malloc_page(page_size);
00768     scheme_code_page_total += page_size;
00769     LOG_CODE_MALLOC(2, printf("new page for %ld / %ld at %p [now %ld]\n", 
00770                               size2, bucket, pg, scheme_code_page_total));
00771     sz = page_size - size2;
00772     for (i = CODE_HEADER_SIZE; i <= sz; i += size2) {
00773       p = ((char *)pg) + i;
00774       prev = free_list[bucket].elems;
00775       ((void **)p)[0] = prev;
00776       ((void **)p)[1] = NULL;
00777       if (prev)
00778         ((void **)prev)[1] = p;
00779       free_list[bucket].elems = p;
00780       count++;
00781     }
00782     ((long *)pg)[0] = bucket; /* first long of page indicates bucket */
00783     ((long *)pg)[1] = 0; /* second long indicates number of allocated on page */
00784     free_list[bucket].count = count;
00785   }
00786 
00787   p = free_list[bucket].elems;
00788   prev = ((void **)p)[0];
00789   free_list[bucket].elems = prev;
00790   --free_list[bucket].count;
00791   if (prev)
00792     ((void **)prev)[1] = NULL;
00793   ((long *)CODE_PAGE_OF(p))[1] += 1;
00794   
00795   LOG_CODE_MALLOC(0, printf("allocated %ld (->%ld / %ld)\n", size, size2, bucket));
00796 
00797   return p;
00798 #else
00799   return malloc(size); /* good luck! */
00800 #endif
00801 }
00802 
00803 void scheme_free_code(void *p)
00804 {
00805 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00806   long size, size2, bucket, page_size;
00807   int per_page, n;
00808   void *prev;
00809 
00810   page_size = get_page_size();
00811 
00812   size = *(long *)CODE_PAGE_OF(p);
00813   
00814   if (size >= page_size) {
00815     /* it was a large object on its own page(s) */
00816     scheme_code_page_total -= size;
00817     LOG_CODE_MALLOC(1, printf("freeing large %p (%ld) [%ld left]\n", 
00818                               p, size, scheme_code_page_total));
00819     free_page((char *)p - CODE_HEADER_SIZE, size);
00820     return;
00821   }
00822 
00823   bucket = size;
00824 
00825   if ((bucket < 0) || (bucket >= free_list_bucket_count)) {
00826     printf("bad free: %p\n", (char *)p + CODE_HEADER_SIZE);
00827     abort();    
00828   }
00829 
00830   size2 = free_list[bucket].size;
00831 
00832   LOG_CODE_MALLOC(0, printf("freeing %ld / %ld\n", size2, bucket));
00833 
00834   /* decrement alloc count for this page: */
00835   per_page = (page_size - CODE_HEADER_SIZE) / size2;
00836   n = ((long *)CODE_PAGE_OF(p))[1];
00837   /* double-check: */
00838   if ((n < 1) || (n > per_page)) {
00839     printf("bad free: %p\n", (char *)p + CODE_HEADER_SIZE);
00840     abort();
00841   }
00842   n--;
00843   ((long *)CODE_PAGE_OF(p))[1] = n;
00844   
00845   /* add to free list: */
00846   prev = free_list[bucket].elems;
00847   ((void **)p)[0] = prev;
00848   ((void **)p)[1] = NULL;
00849   if (prev)
00850     ((void **)prev)[1] = p;
00851   free_list[bucket].elems = p;
00852   free_list[bucket].count++;
00853 
00854   /* Free whole page if it's completely on the free list, and if there
00855      are enough buckets on other pages. */
00856   if ((n == 0) && ((free_list[bucket].count - per_page) >= (per_page / 2))) {
00857     /* remove same-page elements from free list, then free page */
00858     int i;
00859     long sz;
00860     void *pg;
00861 
00862     sz = page_size - size2;
00863     pg = CODE_PAGE_OF(p);
00864     for (i = CODE_HEADER_SIZE; i <= sz; i += size2) {
00865       p = ((char *)pg) + i;
00866       prev = ((void **)p)[1];
00867       if (prev)
00868         ((void **)prev)[0] = ((void **)p)[0];
00869       else
00870         free_list[bucket].elems = ((void **)p)[0];
00871       prev = ((void **)p)[0];
00872       if (prev)
00873         ((void **)prev)[1] = ((void **)p)[1];
00874       --free_list[bucket].count;
00875     }
00876     
00877     scheme_code_page_total -= page_size;
00878     LOG_CODE_MALLOC(2, printf("freeing page at %p [%ld left]\n", 
00879                               CODE_PAGE_OF(p), scheme_code_page_total));
00880     free_page(CODE_PAGE_OF(p), page_size);
00881   }
00882 #else
00883   free(p);
00884 #endif
00885 }
00886 
00887 #ifndef MZ_PRECISE_GC
00888 
00889 /* When using the CGC allocator, we know how GCable memory is
00890    allocated, and we expect mprotect(), etc., to work on it. The JIT
00891    currently takes advantage of that combination, so we support it
00892    with scheme_malloc_gcable_code() --- but only in CGC mode. */
00893 
00894 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00895 static unsigned long jit_prev_page = 0, jit_prev_length = 0;
00896 #endif
00897 
00898 void *scheme_malloc_gcable_code(long size)
00899 {
00900   void *p;
00901   p = scheme_malloc(size);
00902   
00903 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00904   {
00905     /* [This chunk of code moved from our copy of GNU lightning to here.] */
00906     unsigned long page, length, page_size;
00907     void *end;
00908 
00909     page_size = get_page_size();
00910     
00911     end = ((char *)p) + size;
00912 
00913     page = (long) p & ~(page_size - 1);
00914     length = ((char *) end - (char *) page + page_size - 1) & ~(page_size - 1);
00915     
00916     /* Simple-minded attempt at optimizing the common case where a single
00917        chunk of memory is used to compile multiple functions.  */
00918     if (!(page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)) {
00919       
00920 # ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
00921       {
00922         DWORD old;
00923         VirtualProtect((void *)page, length, PAGE_EXECUTE_READWRITE, &old);
00924       }
00925 # else
00926       mprotect ((void *) page, length, PROT_READ | PROT_WRITE | PROT_EXEC);
00927 # endif
00928 
00929       /* See if we can extend the previously mprotect'ed memory area towards
00930          higher addresses: the starting address remains the same as before.  */
00931       if (page >= jit_prev_page && page <= jit_prev_page + jit_prev_length)
00932         jit_prev_length = page + length - jit_prev_page;
00933       
00934       /* See if we can extend the previously mprotect'ed memory area towards
00935          lower addresses: the highest address remains the same as before. */
00936       else if (page < jit_prev_page && page + length >= jit_prev_page 
00937                && page + length <= jit_prev_page + jit_prev_length)
00938         jit_prev_length += jit_prev_page - page, jit_prev_page = page;
00939       
00940       /* Nothing to do, replace the area.  */
00941       else
00942         jit_prev_page = page, jit_prev_length = length;
00943     }
00944   }
00945 #endif
00946 
00947   return p;
00948 }
00949 
00950 void scheme_notify_code_gc()
00951 {
00952 #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
00953   jit_prev_page = 0;
00954   jit_prev_length = 0;
00955 #endif
00956 }
00957 #endif
00958 
00959 #ifdef MZ_PRECISE_GC
00960 END_XFORM_SKIP;
00961 #endif
00962 
00963 /************************************************************************/
00964 /*                           finalization                               */
00965 /************************************************************************/
00966 
00967 typedef struct Finalization {
00968   MZTAG_IF_REQUIRED
00969   void (*f)(void *o, void *data);
00970   void *data;
00971   struct Finalization *next, *prev;
00972 } Finalization;
00973 
00974 typedef struct {
00975   MZTAG_IF_REQUIRED
00976   short lifetime;
00977   Finalization *scheme_first, *scheme_last;
00978   void (*ext_f)(void *o, void *data);
00979   void *ext_data;
00980   Finalization *prim_first, *prim_last;
00981 } Finalizations;
00982 
00983 #ifdef MZ_PRECISE_GC
00984 
00985 #include "../gc2/gc2_dump.h"
00986 
00987 START_XFORM_SKIP;
00988 
00989 #define MARKS_FOR_SALLOC_C
00990 #include "mzmark.c"
00991 
00992 END_XFORM_SKIP;
00993 
00994 #define GC_register_eager_finalizer(o, level, f, d, of, od) GC_set_finalizer(o, 1, level, f, d, of, od)
00995 #define GC_register_finalizer(o, f, d, of, od) GC_set_finalizer(o, 1, 3, f, d, of, od)
00996 
00997 #endif
00998 
00999 static int current_lifetime;
01000 
01001 void scheme_reset_finalizations(void)
01002 {
01003   current_lifetime++;
01004 }
01005 
01006 static void do_next_finalization(void *o, void *data)
01007 {
01008   Finalizations *fns = *(Finalizations **)data;
01009   Finalization *fn;
01010 
01011   if (fns->lifetime != current_lifetime)
01012     return;
01013 
01014   if (fns->scheme_first) {
01015     if (fns->scheme_first->next || fns->ext_f || fns->prim_first) {
01016       /* Re-install low-level finalizer and run a scheme finalizer */
01017       GC_register_eager_finalizer(o, fns->scheme_first->next ? 1 : 2, 
01018                               do_next_finalization, data, NULL, NULL);
01019     }
01020 
01021     fn = fns->scheme_first;
01022     fns->scheme_first = fn->next;
01023     if (!fn->next)
01024       fns->scheme_last = NULL;
01025     else
01026       fn->next->prev = NULL;
01027 
01028     fn->f(o, fn->data);
01029     return;
01030   }
01031 
01032   if (fns->ext_f)
01033     fns->ext_f(o, fns->ext_data);
01034 
01035   for (fn = fns->prim_first; fn; fn = fn->next) {
01036     fn->f(o, fn->data);
01037   }
01038 }
01039 
01040 /* Makes gc2 xformer happy: */
01041 typedef void (*finalizer_function)(void *p, void *data);
01042 static THREAD_LOCAL int traversers_registered;
01043 static THREAD_LOCAL Finalizations **save_fns_ptr;
01044 
01045 static void add_finalizer(void *v, void (*f)(void*,void*), void *data, 
01046                        int prim, int ext,
01047                        void (**ext_oldf)(void *p, void *data),
01048                        void **ext_olddata,
01049                        int no_dup, int rmve)
01050 {
01051   finalizer_function oldf;
01052   void *olddata;
01053   Finalizations *fns, **fns_ptr, *prealloced;
01054   Finalization *fn;
01055 
01056   if (!traversers_registered) {
01057 #ifdef MZ_PRECISE_GC
01058     GC_REG_TRAV(scheme_rt_finalization, mark_finalization);
01059     GC_REG_TRAV(scheme_rt_finalizations, mark_finalizations);
01060     traversers_registered = 1;
01061 #endif
01062     REGISTER_SO(save_fns_ptr);
01063   }
01064 
01065 #ifndef MZ_PRECISE_GC
01066   if (v != GC_base(v))
01067     return;
01068 #endif
01069 
01070   /* Allocate everything first so that we're not changing
01071      finalizations when finalizations could run: */
01072 
01073   if (save_fns_ptr) {
01074     fns_ptr = save_fns_ptr;
01075     save_fns_ptr = NULL;
01076   } else
01077     fns_ptr = MALLOC_ONE(Finalizations*);
01078 
01079   if (!ext && !rmve) {
01080     fn = MALLOC_ONE_RT(Finalization);
01081 #ifdef MZTAG_REQUIRED
01082     fn->type = scheme_rt_finalization;
01083 #endif
01084     fn->f = f;
01085     fn->data = data;
01086   } else
01087     fn = NULL;
01088 
01089   if (!rmve) {
01090     prealloced = MALLOC_ONE_RT(Finalizations); /* may not need this... */
01091 #ifdef MZTAG_REQUIRED
01092     prealloced->type = scheme_rt_finalizations;
01093 #endif
01094   } else
01095     prealloced = NULL;
01096 
01097   GC_register_eager_finalizer(v, prim ? 2 : 1, do_next_finalization, fns_ptr, &oldf, &olddata);
01098 
01099   if (oldf) {
01100     if (oldf != do_next_finalization) {
01101       /* This happens if an extenal use of GC_ routines conflicts with us. */
01102       scheme_warning("warning: non-MzScheme finalization on object dropped!");
01103     } else {
01104       *fns_ptr = *(Finalizations **)olddata;
01105       save_fns_ptr = (Finalizations **)olddata;
01106       *save_fns_ptr = NULL;
01107     }
01108   } else if (rmve) {
01109     GC_register_finalizer(v, NULL, NULL, NULL, NULL);
01110     save_fns_ptr = fns_ptr;
01111     return;
01112   }
01113   
01114   if (!(*fns_ptr)) {
01115     prealloced->lifetime = current_lifetime;
01116     *fns_ptr = prealloced;
01117   }
01118   fns = *fns_ptr;
01119 
01120   if (ext) {
01121     if (ext_oldf)
01122       *ext_oldf = fns->ext_f;
01123     fns->ext_f = f;
01124     if (ext_olddata)
01125       *ext_olddata = fns->ext_data;
01126     fns->ext_data = data;
01127 
01128     if (!f && !fns->prim_first && !fns->scheme_first) {
01129       /* Removed all finalization */
01130       GC_register_finalizer(v, NULL, NULL, NULL, NULL);
01131       save_fns_ptr = fns_ptr;
01132       *save_fns_ptr = NULL;
01133     }
01134   } else {
01135     if (prim) {
01136       if (no_dup) {
01137        /* Make sure it's not already here */
01138        Finalization *fnx;
01139        for (fnx = fns->prim_first; fnx; fnx = fnx->next) {
01140          if (fnx->f == f && fnx->data == data) {
01141            if (rmve) {
01142              if (fnx->prev)
01143               fnx->prev->next = fnx->next;
01144              else
01145               fns->prim_first = fnx->next;
01146              if (fnx->next)
01147               fnx->next->prev = fnx->prev;
01148              else
01149               fns->prim_last = fnx->prev;
01150            }
01151            fn = NULL;
01152            break;
01153          }
01154        }
01155       }
01156       if (fn) {
01157        fn->next = fns->prim_first;
01158        fns->prim_first = fn;
01159        if (!fn->next)
01160          fns->prim_last = fn;
01161        else
01162          fn->next->prev = fn;
01163       }
01164       /* Removed all finalization? */
01165       if (!fns->ext_f && !fns->prim_first && !fns->scheme_first) {
01166        GC_register_finalizer(v, NULL, NULL, NULL, NULL);
01167        save_fns_ptr = fns_ptr;
01168        *save_fns_ptr = NULL;
01169       }
01170     } else {
01171       fn->next = fns->scheme_first;
01172       fns->scheme_first = fn;
01173       if (!fn->next)
01174        fns->scheme_last = fn;
01175       else
01176        fn->next->prev = fn;
01177     }
01178   }
01179 }
01180 
01181 #ifndef MZ_PRECISE_GC
01182 void scheme_weak_reference(void **p)
01183 {
01184   scheme_weak_reference_indirect(p, *p);
01185 }
01186 
01187 void scheme_weak_reference_indirect(void **p, void *v)
01188 {
01189   if (GC_base(v) == v)
01190     GC_register_late_disappearing_link(p, v);
01191 }
01192 
01193 void scheme_unweak_reference(void **p)
01194 {
01195   GC_unregister_disappearing_link(p);
01196 }
01197 #endif
01198 
01199 void scheme_add_finalizer(void *p, void (*f)(void *p, void *data), void *data)
01200 {
01201   add_finalizer(p, f, data, 1, 0, NULL, NULL, 0, 0);
01202 }
01203 
01204 void scheme_add_finalizer_once(void *p, void (*f)(void *p, void *data), void *data)
01205 {
01206   add_finalizer(p, f, data, 1, 0, NULL, NULL, 1, 0);
01207 }
01208 
01209 void scheme_subtract_finalizer(void *p, void (*f)(void *p, void *data), void *data)
01210 {
01211   add_finalizer(p, f, data, 1, 0, NULL, NULL, 1, 1);
01212 }
01213 
01214 void scheme_add_scheme_finalizer(void *p, void (*f)(void *p, void *data), void *data)
01215 {
01216   add_finalizer(p, f, data, 0, 0, NULL, NULL, 0, 0);
01217 }
01218 
01219 void scheme_add_scheme_finalizer_once(void *p, void (*f)(void *p, void *data), void *data)
01220 {
01221   add_finalizer(p, f, data, 0, 0, NULL, NULL, 1, 0);
01222 }
01223 
01224 void scheme_register_finalizer(void *p, void (*f)(void *p, void *data), 
01225                             void *data, void (**oldf)(void *p, void *data), 
01226                             void **olddata)
01227 {
01228   add_finalizer(p, f, data, 0, 1, oldf, olddata, 0, 0);
01229 }
01230 
01231 void scheme_remove_all_finalization(void *p)
01232 {
01233   GC_register_finalizer(p, NULL, NULL, NULL, NULL);
01234 }
01235 
01236 void scheme_collect_garbage(void)
01237 {
01238   GC_gcollect();
01239 }
01240 
01241 unsigned long scheme_get_deeper_address(void)
01242 {
01243   int v, *vp;
01244   vp = &v;
01245   return (unsigned long)vp;
01246 }
01247 
01248 /************************************************************************/
01249 /*                             GC_dump                                  */
01250 /************************************************************************/
01251 
01252 #ifndef MZ_PRECISE_GC
01253 # ifdef __cplusplus
01254 extern "C" 
01255 {
01256 # endif
01257   extern MZ_DLLIMPORT void GC_dump(void);
01258 # ifdef __cplusplus
01259 };
01260 # endif
01261 #endif
01262 
01263 #ifdef USE_TAGGED_ALLOCATION
01264 #define NUM_TYPE_SLOTS (_scheme_last_type_ + 5) /* extra space for externally defined */
01265 
01266 static long scheme_memory_count[NUM_TYPE_SLOTS];
01267 static long scheme_memory_actual_count[NUM_TYPE_SLOTS];
01268 static long scheme_memory_size[NUM_TYPE_SLOTS];
01269 static long scheme_memory_actual_size[NUM_TYPE_SLOTS];
01270 static unsigned long scheme_memory_hi[NUM_TYPE_SLOTS];
01271 static unsigned long scheme_memory_lo[NUM_TYPE_SLOTS];
01272 static long scheme_envunbox_count, scheme_envunbox_size;
01273 static long bad_seeds;
01274 static Scheme_Hash_Table *smc_ht;
01275 static int trace_path_type;
01276 
01277 # define OBJ_BUFFER_SIZE 1048576
01278 static void *obj_buffer[OBJ_BUFFER_SIZE];
01279 static int obj_buffer_pos;
01280 static int obj_type;
01281 
01282 # define NUM_RECORDED_APP_SIZES 5
01283 static int app_sizes[NUM_RECORDED_APP_SIZES+1];
01284 static int app_arg_kinds[NUM_RECORDED_APP_SIZES][NUM_RECORDED_APP_SIZES][5];
01285 
01286 void count_tagged(void *p, int size, void *data)
01287 {
01288   int which = SCHEME_TYPE((Scheme_Object *)p);
01289   if ((which >= 0) && (which < _scheme_last_type_)) {
01290     scheme_count_memory((Scheme_Object *)p, smc_ht);
01291   } else if (which >= scheme_num_types())
01292     bad_seeds++;
01293   else {
01294     if (which >= NUM_TYPE_SLOTS)
01295       which = NUM_TYPE_SLOTS - 1;
01296     scheme_memory_count[which]++;
01297     scheme_memory_size[which] += size;
01298   }
01299 
01300   if (which == obj_type) {
01301     if (obj_buffer_pos < OBJ_BUFFER_SIZE) {
01302       obj_buffer[obj_buffer_pos++] = p;
01303     }
01304   }
01305 
01306   if (which == scheme_application_type) {
01307     Scheme_App_Rec *app = (Scheme_App_Rec *)p;
01308     int cnt;
01309     cnt = app->num_args;
01310     if (cnt >= NUM_RECORDED_APP_SIZES) {
01311       cnt = NUM_RECORDED_APP_SIZES;
01312     } else {
01313       int i, devals, kind;
01314       devals = sizeof(Scheme_App_Rec) + (app->num_args * sizeof(Scheme_Object *));
01315       for (i = 0; i <= cnt; i++) {
01316        kind = ((char *)app + devals)[i];
01317        if ((kind >= 0) && (kind <= 4)) {
01318          app_arg_kinds[cnt][i][kind]++;
01319        }
01320       }
01321     }
01322     app_sizes[cnt]++;
01323   }
01324 }
01325 
01326 void count_envunbox(void *p, int size, void *data)
01327 {
01328   scheme_envunbox_count++;
01329   scheme_envunbox_size += size;
01330 }
01331 
01332 static void trace_count(void *p, int size)
01333 {
01334   int which = SCHEME_TYPE((Scheme_Object *)p);
01335   if ((which >= 0) && (which <= _scheme_last_type_)) {
01336    /* fall through to below */ 
01337   } else if (which >= scheme_num_types()) {
01338     bad_seeds++;
01339     return;
01340   } else {
01341     if (which >= NUM_TYPE_SLOTS)
01342       which = NUM_TYPE_SLOTS - 1;
01343    /* fall through to below */ 
01344   }
01345 
01346   {
01347     unsigned long s = (unsigned long)p;
01348     scheme_memory_actual_count[which]++;
01349     scheme_memory_actual_size[which] += size;
01350     if (!scheme_memory_lo[which] || (s < scheme_memory_lo[which]))
01351       scheme_memory_lo[which] = s;
01352     if (!scheme_memory_hi[which] || (s > scheme_memory_hi[which]))
01353       scheme_memory_hi[which] = s;
01354   }
01355 }
01356 
01357 static void trace_stack_count(void *p, int size)
01358 {
01359   /* Do nothing */
01360 }
01361 
01362 static void trace_path(void *p, unsigned long src, void *path_data)
01363 {
01364   if ((trace_path_type > -1)
01365       && ((int)SCHEME_TYPE((Scheme_Object *)p) == trace_path_type))
01366     GC_store_path(p, src, path_data);
01367 }
01368 
01369 static void trace_stack_path(void *p, unsigned long src, void *path_data)
01370 {
01371   if (trace_path_type == -2)
01372     GC_store_path(p, src, path_data);
01373 }
01374 
01375 static void trace_init(void)
01376 {
01377   /* do nothing */
01378 }
01379 
01380 static void trace_done(void)
01381 {
01382   /* do nothing */
01383 }
01384 
01385 static void finalize_object(void *p)
01386 {
01387   ((Scheme_Object *)p)->type = _scheme_values_types_;
01388 }
01389 
01390 static Scheme_Object *local_thread;
01391 static size_t local_thread_size;
01392 
01393 static int skip_foreign_thread(void *p, size_t size)
01394 {
01395   if (p == local_thread)
01396     local_thread_size = size;
01397   else if (size == local_thread_size) {
01398     if ((*(Scheme_Type *)p) == scheme_thread_type) {
01399       /* Has tag and right size; let's assume that it's really a thread. */
01400       Scheme_Custodian *local, *here;
01401       
01402       local = *((Scheme_Thread *)local_thread)->mref;
01403       here = *((Scheme_Thread *)p)->mref;
01404 
01405       /* If p belongs to the local thread's custodian, we'll see the
01406         local thread's custodian while walking up from here: */
01407       while (here) {
01408        if (here == local)
01409          return 0;
01410        if (here->parent)
01411          here = *here->parent;
01412        else
01413          here = NULL;
01414       }
01415 
01416       /* Must be a foreign thread: */
01417       return 1;
01418     }
01419   }
01420 
01421   return 0;
01422 }
01423 
01424 #endif
01425 
01426 void (*scheme_external_dump_info)(void);
01427 void (*scheme_external_dump_arg)(Scheme_Object *arg);
01428 char *(*scheme_external_dump_type)(void *v);
01429 
01430 #ifdef USE_TAGGED_ALLOCATION
01431 static void count_managed(Scheme_Custodian *m, int *c, int *a, int *u, int *t,
01432                        int *ipt, int *opt, int *th)
01433 {
01434   int i;
01435 
01436   *t += 1;
01437   *c += m->count;
01438   *a += m->alloc;
01439   for (i = m->count; i--; ) {
01440     if (m->boxes[i]) {
01441       Scheme_Object *o = (*(m->boxes[i]));
01442       (*u)++;
01443       if (SCHEME_THREADP(o))
01444        (*th)++;
01445       else if (SCHEME_INPORTP(o))
01446        (*ipt)++;
01447       else if (SCHEME_OUTPORTP(o))
01448        (*opt)++;
01449     }
01450   }
01451 
01452   if (*m->sibling)
01453     count_managed(*m->sibling, c, a, u, t, ipt, opt, th);
01454   if (*m->children)
01455     count_managed(*m->children, c, a, u, t, ipt, opt, th);
01456 }
01457 #endif
01458 
01459 #if defined(MZ_PRECISE_GC)
01460 # ifdef MZ_GC_BACKTRACE
01461 #  define MZ_PRECISE_GC_TRACE 1
01462 # else
01463 #  define MZ_PRECISE_GC_TRACE 0
01464 # endif
01465 #else
01466 # define MZ_PRECISE_GC_TRACE 0
01467 #endif
01468 
01469 #if MZ_PRECISE_GC_TRACE
01470 char *(*GC_get_xtagged_name)(void *p) = NULL;
01471 static Scheme_Object *cons_accum_result;
01472 static void cons_onto_list(void *p)
01473 {
01474   cons_accum_result = scheme_make_pair((Scheme_Object *)p, cons_accum_result);
01475 }
01476 #endif
01477 
01478 #if defined(USE_TAGGED_ALLOCATION) || MZ_PRECISE_GC_TRACE
01479 
01480 # ifdef MZ_PRECISE_GC
01481 START_XFORM_SKIP;
01482 #  ifdef DOS_FILE_SYSTEM
01483 extern void gc_fprintf(FILE *ignored, const char *c, ...);
01484 #   define object_console_printf gc_fprintf
01485 #  endif
01486 # endif
01487 
01488 #ifndef object_console_printf
01489 # define object_console_printf fprintf
01490 #endif
01491 
01492 extern int (*scheme_check_print_is_obj)(Scheme_Object *o);
01493 static int check_home(Scheme_Object *o)
01494 {
01495 #ifdef MZ_PRECISE_GC
01496   return (SCHEME_INTP(o) || GC_is_tagged(o) 
01497          || SAME_OBJ(o, scheme_true) 
01498          || SAME_OBJ(o, scheme_false)
01499          || SAME_OBJ(o, scheme_null)
01500          || SAME_OBJ(o, scheme_eof)
01501          || SAME_OBJ(o, scheme_void));
01502 #else
01503   /* GC_set(o) */
01504   return 1;
01505 #endif
01506 }
01507 
01508 static void print_tagged_value(const char *prefix, 
01509                             void *v, int xtagged, unsigned long diff, int max_w,
01510                             const char *suffix)
01511 {
01512   char buffer[256];
01513   char *type, *sep, diffstr[30];
01514   long len;
01515   
01516   sep = "";
01517   
01518   scheme_check_print_is_obj = check_home;
01519 
01520   if (!xtagged) {
01521     type = scheme_write_to_string_w_max((Scheme_Object *)v, &len, max_w);
01522     if (!scheme_strncmp(type, "#<thread", 8) 
01523        && ((type[8] == '>') || (type[8] == ':'))) {
01524       char *run, *sus, *kill, *clean, *deq, *all, *t2;
01525       int state = ((Scheme_Thread *)v)->running, len2;
01526            
01527       run = (state & MZTHREAD_RUNNING) ? "+run" : "";
01528       sus = (state & MZTHREAD_SUSPENDED) ? "+suspended" : "";
01529       kill = (state & MZTHREAD_KILLED) ? "+killed" : "";
01530       clean = (state & MZTHREAD_NEED_KILL_CLEANUP) ? "+cleanup" : "";
01531       deq = (((Scheme_Thread *)v)->next || ((Scheme_Thread *)v)->prev) ? "" : "+deq";
01532       all = !state ? "defunct" : "";
01533 
01534       sprintf(buffer, "[%d=%s%s%s%s%s%s]",
01535              state, run, sus, kill, clean, all, deq);
01536 
01537       len2 = strlen(buffer);
01538       t2 = (char *)scheme_malloc_atomic(len + len2 + 1);
01539       memcpy(t2, type, len);
01540       memcpy(t2 + len, buffer, len2 + 1);
01541       len += len2;
01542       type = t2;
01543     } else if (!scheme_strncmp(type, "#<continuation>", 15)) {
01544       char *t2;
01545       int len2;
01546            
01547       sprintf(buffer, "[%s%.100s]",
01548               (((Scheme_Cont *)v)->composable 
01549                ? "delim;"
01550                : ""),
01551               (((Scheme_Cont *)v)->prompt_tag
01552                ? (SCHEME_CDR(((Scheme_Cont *)v)->prompt_tag)
01553                   ? SCHEME_SYM_VAL(SCHEME_CDR(((Scheme_Cont *)v)->prompt_tag))
01554                   : "<anonymous>")
01555                : "NULL"));
01556       
01557       len2 = strlen(buffer);
01558       t2 = (char *)scheme_malloc_atomic(len + len2 + 1);
01559       memcpy(t2, type, len);
01560       memcpy(t2 + len, buffer, len2 + 1);
01561       len += len2;
01562       type = t2;
01563     } else if (!scheme_strncmp(type, "#<custodian>", 13)) {
01564       char *t2;
01565       int len2;
01566 
01567       sprintf(buffer, "[%d]",
01568               ((Scheme_Custodian *)v)->elems);
01569 
01570       len2 = strlen(buffer);
01571       t2 = (char *)scheme_malloc_atomic(len + len2 + 1);
01572       memcpy(t2, type, len);
01573       memcpy(t2 + len, buffer, len2 + 1);
01574       len += len2;
01575       type = t2;      
01576     } else if (!scheme_strncmp(type, "#<namespace", 11)) {
01577       char *t2;
01578       int len2;
01579            
01580       sprintf(buffer, "[%ld/%ld:%.100s]",
01581              ((Scheme_Env *)v)->phase,
01582               ((Scheme_Env *)v)->mod_phase,
01583              (((Scheme_Env *)v)->module
01584               ? scheme_write_to_string(((Scheme_Env *)v)->module->modname, NULL)
01585               : "(toplevel)"));
01586            
01587       len2 = strlen(buffer);
01588       t2 = (char *)scheme_malloc_atomic(len + len2 + 1);
01589       memcpy(t2, type, len);
01590       memcpy(t2 + len, buffer, len2 + 1);
01591       len += len2;
01592       type = t2;
01593     } else if (!scheme_strncmp(type, "#<global-variable-code", 22)) {
01594       Scheme_Bucket *b = (Scheme_Bucket *)v;
01595       Scheme_Object *bsym = (Scheme_Object *)b->key;
01596       char *t2;
01597       int len2;
01598 
01599       len2 = SCHEME_SYM_LEN(bsym);
01600       t2 = scheme_malloc_atomic(len + len2 + 3);
01601       memcpy(t2, type, len);
01602       memcpy(t2 + len + 1, SCHEME_SYM_VAL(bsym), len2);
01603       t2[len] = '[';
01604       t2[len + 1 + len2] = ']';
01605       t2[len + 1 + len2 + 1] = 0;
01606       len += len2;
01607       type = t2;
01608     } else if (!scheme_strncmp(type, "#<hash-table>", 13)
01609               || !scheme_strncmp(type, "#<hash-table:", 13)) {
01610       char *t2;
01611       int len2;
01612       int htype, size, count;
01613 
01614       if (SCHEME_HASHTP((Scheme_Object *)v)) {
01615        htype = 'n';
01616        size = ((Scheme_Hash_Table *)v)->size;
01617        count = ((Scheme_Hash_Table *)v)->count;
01618       } else {
01619        htype = 'b';
01620        size = ((Scheme_Bucket_Table *)v)->size;
01621        count = ((Scheme_Bucket_Table *)v)->count;
01622       }
01623       
01624       sprintf(buffer, "[%c:%d:%d]", htype, count, size);
01625 
01626       len2 = strlen(buffer);
01627       t2 = scheme_malloc_atomic(len + len2 + 1);
01628       memcpy(t2, type, len);
01629       memcpy(t2 + len, buffer, len2 + 1);
01630       len += len2;
01631       type = t2;
01632     } else if (!scheme_strncmp(type, "#<syntax-code", 13)) {
01633       char *t2, *t3;
01634       long len2, len3;
01635 
01636       t2 = scheme_write_to_string_w_max(SCHEME_IPTR_VAL(v), &len2, 32);
01637       
01638       len3 = len + len2 + 2 + 2;
01639       t3 = (char *)scheme_malloc_atomic(len3);
01640       memcpy(t3, type, len);
01641       t3[len] = (SCHEME_PINT_VAL(v) / 10) + '0';
01642       t3[len + 1] = (SCHEME_PINT_VAL(v) % 10) + '0';
01643       t3[len + 2] = '=';
01644       memcpy(t3 + len + 3, t2, len2);
01645       t3[len + len2 + 3] = 0;
01646       type = t3;
01647       len = len3;
01648 #ifdef MZTAG_REQUIRED
01649     } else if (SAME_TYPE(SCHEME_TYPE(v), scheme_rt_meta_cont)) {
01650       Scheme_Meta_Continuation *mc = (Scheme_Meta_Continuation *)v;
01651       Scheme_Object *pt;
01652       long len2, len3;
01653       char *t2, *t3;
01654 
01655       pt = mc->prompt_tag;
01656       if (pt) {
01657         t3 = scheme_write_to_string_w_max(pt, &len3, max_w);
01658       } else {
01659         t3 = "#f";
01660         len3 = 2;
01661       }
01662 
01663       len2 = 32 + len3;
01664       t2 = (char *)scheme_malloc_atomic(len2);
01665       sprintf(t2, "#<meta-continuation>[%d;%s]", mc->pseudo, t3);
01666       type = t2;
01667       len = strlen(t2);
01668     } else if (SAME_TYPE(SCHEME_TYPE(v), scheme_rt_compact_port)) {
01669       
01670 #endif
01671     } else if (!scheme_strncmp(type, "#<syntax", 8)) {
01672       char *t2, *t3;
01673       long len2, len3;
01674 
01675       t2 = scheme_write_to_string_w_max(SCHEME_STX_VAL(v), &len2, 32);
01676       
01677       len3 = len + len2 + 2;
01678       t3 = (char *)scheme_malloc_atomic(len3);
01679       memcpy(t3, type, len);
01680       t3[len] = '=';
01681       memcpy(t3 + len + 1, t2, len2);
01682       t3[len + len2 + 1] = 0;
01683       type = t3;
01684       len = len3;
01685     }
01686 
01687     sep = "=";
01688   } else if (scheme_external_dump_type) {
01689     type = scheme_external_dump_type(v);
01690     if (*type)
01691       sep = ":";
01692   } else
01693     type = "";
01694   
01695   if (diff)
01696     sprintf(diffstr, "%lx", diff);
01697   
01698   object_console_printf(stderr,
01699                      "%s%p%s%s%s%s%s", 
01700                      prefix,
01701                      v, 
01702                      sep,
01703                      type,
01704                      diff ? "+" : "",
01705                      diff ? diffstr : "",
01706                      suffix);
01707 
01708   scheme_check_print_is_obj = NULL;
01709 }
01710 # ifdef MZ_PRECISE_GC
01711 END_XFORM_SKIP;
01712 # endif
01713 #endif
01714 
01715 Scheme_Object *scheme_dump_gc_stats(int c, Scheme_Object *p[])
01716 {
01717   Scheme_Object *result = scheme_void;
01718 #ifdef USE_TAGGED_ALLOCATION
01719   void *initial_trace_root = NULL;
01720   int (*inital_root_skip)(void *, size_t) = NULL;
01721 #endif
01722 #if MZ_PRECISE_GC_TRACE
01723   int trace_for_tag = 0;
01724   int flags = 0;
01725   int path_length_limit = 1000;
01726   GC_for_each_found_proc for_each_found = NULL;
01727 #else
01728 # ifndef USE_TAGGED_ALLOCATION
01729 #  define flags 0
01730 #  define trace_for_tag 0
01731 #  define path_length_limit 1000
01732 #  define for_each_found NULL
01733 #  define GC_get_xtagged_name NULL
01734 #  define print_tagged_value NULL
01735 # endif
01736 #endif
01737 
01738 #if 0
01739   /* Syntax-object debugging support: */
01740   if ((c == 1) && SCHEME_STXP(p[0])) {
01741     return scheme_explode_syntax(p[0], scheme_make_hash_table(SCHEME_hash_ptr));
01742   }
01743 
01744   if (c && SAME_TYPE(SCHEME_TYPE(p[0]), scheme_compilation_top_type)) {
01745     Scheme_Hash_Table *ht;
01746     Scheme_Compilation_Top *top;
01747     Scheme_Object *vec, *v, *lst = scheme_null;
01748     Scheme_Module *m;
01749     Resolve_Prefix *prefix;
01750     int i, j;
01751 
01752     ht = scheme_make_hash_table(SCHEME_hash_ptr);
01753 
01754     top = (Scheme_Compilation_Top *)p[0];
01755 
01756     j = 0;
01757     while (1) {
01758       if (j)
01759         m = scheme_extract_compiled_module(p[0]);
01760       else
01761         m = NULL;
01762 
01763       if (m) {
01764         if (j == 1) {
01765           prefix = m->prefix;
01766         } else {
01767           int k = j - 2;
01768           if (k >= SCHEME_VEC_SIZE(m->et_body))
01769             break;
01770           v = SCHEME_VEC_ELS(m->et_body)[k];
01771           prefix = (Resolve_Prefix *)SCHEME_VEC_ELS(v)[3];
01772         }
01773       } else {
01774         if (j)
01775           break;
01776         prefix = top->prefix;
01777       }
01778       
01779       vec = scheme_make_vector(prefix->num_stxes, NULL);
01780       for (i = 0; i < prefix->num_stxes; i++) {
01781         v = scheme_explode_syntax(prefix->stxes[i], ht);
01782         SCHEME_VEC_ELS(vec)[i] = v;
01783       }
01784 
01785       lst = scheme_make_pair(vec, lst);
01786       j++;
01787     }
01788 
01789     return scheme_reverse(lst);
01790   }
01791 #endif
01792 
01793   scheme_start_atomic();
01794 
01795   if (scheme_external_dump_arg)
01796     scheme_external_dump_arg(c ? p[0] : NULL);
01797 
01798   scheme_console_printf("Begin Dump\n");
01799 
01800 #ifdef USE_TAGGED_ALLOCATION
01801   trace_path_type = -1;
01802   obj_type = -1;
01803   if (c && SCHEME_SYMBOLP(p[0])) {
01804     Scheme_Object *sym;
01805     char *s;
01806     int i, maxpos, just_objects;
01807 
01808     sym = p[0];
01809     s = scheme_symbol_val(sym);
01810 
01811     maxpos = scheme_num_types();
01812     if (maxpos > NUM_TYPE_SLOTS-1)
01813       maxpos = NUM_TYPE_SLOTS-1;
01814 
01815     just_objects = ((c > 1)
01816                   && SCHEME_SYMBOLP(p[1])
01817                   && !strcmp(SCHEME_SYM_VAL(p[1]), "objects"));
01818 
01819     for (i = 0; i < maxpos; i++) {
01820       void *tn = scheme_get_type_name(i);
01821       if (tn && !strcmp(tn, s)) {
01822        if (just_objects)
01823          obj_type = i;
01824        else
01825          trace_path_type = i;
01826        break;
01827       }
01828     }
01829     if (SAME_OBJ(p[0], scheme_intern_symbol("stack"))) {
01830       trace_path_type = -2;
01831     }
01832 
01833     if ((c > 2)
01834        && SCHEME_SYMBOLP(p[1])
01835        && !strcmp(SCHEME_SYM_VAL(p[1]), "from")) {
01836       initial_trace_root = p[2];
01837       if (SCHEME_THREADP(p[2])) {
01838        local_thread = p[2];
01839        local_thread_size = 0;
01840        inital_root_skip = skip_foreign_thread;
01841       }
01842     }
01843   }
01844 
01845   {
01846     int i;
01847     int stack_c, roots_c, uncollectable_c, final_c;
01848     long total_count = 0, total_size = 0;
01849     long total_actual_count = 0, total_actual_size = 0;
01850     long traced;
01851     int no_walk = 0;
01852 
01853     no_walk = 1 /* (!c || !SAME_OBJ(p[0], scheme_true)) */;
01854     
01855     for (i = 0; i < NUM_TYPE_SLOTS; i++) {
01856       scheme_memory_count[i] = scheme_memory_size[i] = 0;
01857       scheme_memory_actual_size[i] = scheme_memory_actual_count[i] = 0;
01858       scheme_memory_hi[i] = scheme_memory_lo[i] = 0;
01859     }
01860     scheme_envunbox_count = scheme_envunbox_size = 0;
01861     bad_seeds = 0;
01862     for (i = 0; i <= NUM_RECORDED_APP_SIZES; i++) {
01863       app_sizes[i] = 0;
01864     }
01865     {
01866       int j, k;
01867       for (i = 0; i < NUM_RECORDED_APP_SIZES; i++) {
01868        for (j = 0; j <= i; j++) {
01869          for (k = 0; k <= 4; k++) {
01870            app_arg_kinds[i][j][k] = 0;
01871          }
01872        }
01873       }
01874     }
01875 
01876     traced = GC_trace_count(&stack_c, &roots_c, &uncollectable_c, &final_c);
01877     GC_dump();
01878 
01879     scheme_console_printf("\ntraced: %ld\n", traced);
01880 
01881     tagged = tagged_while_counting;
01882     
01883     if (!no_walk)
01884       smc_ht = scheme_make_hash_table(SCHEME_hash_ptr);
01885     
01886     if (tagged) 
01887       GC_for_each_element(real_tagged, count_tagged, NULL);
01888     if (tagged_eternal) 
01889       GC_for_each_element(tagged_eternal, count_tagged, NULL);
01890     if (tagged_uncollectable) 
01891       GC_for_each_element(tagged_uncollectable, count_tagged, NULL);
01892     if (tagged_atomic)
01893       GC_for_each_element(tagged_atomic, count_tagged, NULL);
01894     if (envunbox)
01895       GC_for_each_element(envunbox, count_envunbox, NULL);
01896 
01897     tagged = real_tagged;
01898 
01899     scheme_console_printf("Begin MzScheme\n");
01900     scheme_console_printf("%30.30s %10s %10s %10s %8s - %8s\n",
01901                        "TYPE", "COUNT", "ESTM-SIZE", "TRACE-SIZE", 
01902                        "LO-LOC", "HI-LOC");
01903     for (i = 0; i < NUM_TYPE_SLOTS; i++) {
01904       if (scheme_memory_count[i] || scheme_memory_actual_count[i]) {
01905        scheme_console_printf("%30.30s %10ld %10ld %10ld %8lx - %8lx\n",
01906                            (i < NUM_TYPE_SLOTS-1)
01907                            ? scheme_get_type_name(i)
01908                            : "other",
01909                            scheme_memory_actual_count[i],
01910                            scheme_memory_size[i],
01911                            scheme_memory_actual_size[i],
01912                            scheme_memory_lo[i],
01913                            scheme_memory_hi[i]);
01914        if (scheme_memory_actual_count[i] != scheme_memory_count[i]) {
01915          scheme_console_printf("%30.30s reach count: %10ld\n",
01916                             "", scheme_memory_count[i]);
01917        }
01918        total_count += scheme_memory_count[i];
01919        total_size += scheme_memory_size[i];
01920        total_actual_count += scheme_memory_actual_count[i];
01921        total_actual_size += scheme_memory_actual_size[i];
01922       }
01923     }
01924 
01925     scheme_console_printf("%30.30s %10ld %10ld          -\n",
01926                        "envunbox", scheme_envunbox_count, scheme_envunbox_size);
01927     total_count += scheme_envunbox_count;
01928     total_size += scheme_envunbox_size;
01929 
01930     scheme_console_printf("%30.30s          - %10ld          -\n",
01931                        "miscellaneous", 
01932                        scheme_misc_count + scheme_type_table_count);
01933     total_size += scheme_misc_count + scheme_type_table_count;
01934 
01935     scheme_console_printf("%30.30s          -          - %10ld\n",
01936                        "roots", roots_c);
01937     total_actual_size += roots_c;
01938 
01939     scheme_console_printf("%30.30s          -          - %10ld\n",
01940                        "stack", stack_c);
01941     total_actual_size += stack_c;
01942 
01943     scheme_console_printf("%30.30s          -          - %10ld\n",
01944                        "unreached-uncollectable", uncollectable_c);
01945     total_actual_size += uncollectable_c;
01946 
01947     scheme_console_printf("%30.30s          -          - %10ld\n",
01948                        "finalization", final_c);
01949     total_actual_size += final_c;
01950 
01951     scheme_console_printf("%30.30s %10ld %10ld %10ld\n",
01952                        "total", total_count, total_size, 
01953                        total_actual_size);
01954     scheme_console_printf("End MzScheme\n");
01955 
01956     scheme_console_printf("Begin Apps\n");
01957     for (i = 0; i < NUM_RECORDED_APP_SIZES; i++) {
01958       int j, k;
01959       scheme_console_printf("  %d%s: %d", i, 
01960                          (i == NUM_RECORDED_APP_SIZES ? "+" : ""), 
01961                          app_sizes[i]);
01962       for (j = 0; j <= i; j++) {
01963        scheme_console_printf(" (");
01964        for (k = 0; k <= 4; k++) {
01965          if (k)
01966            scheme_console_printf(",");
01967          scheme_console_printf("%d", app_arg_kinds[i][j][k]);
01968        }
01969        scheme_console_printf(")");
01970       }
01971       scheme_console_printf("\n");
01972     }
01973     scheme_console_printf("End Apps\n");
01974 
01975     {
01976       Scheme_Custodian *m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
01977       int c = 0, a = 0, u = 0, t = 0, ipt = 0, opt = 0, th = 0;
01978 
01979       while (*m->parent)
01980        m = *m->parent;
01981 
01982       count_managed(m, &c, &a, &u, &t, &ipt, &opt, &th);
01983 
01984       scheme_console_printf("custodians: %d  managed: actual: %d   breadth: %d   room: %d\n"
01985                          "                        input-ports: %d  output-ports: %d  threads: %d\n"
01986                          "stacks: %d\n", 
01987                          t, u, c, a, ipt, opt, th,
01988                          scheme_num_copied_stacks);
01989     }
01990 
01991     if (bad_seeds)
01992       scheme_console_printf("ERROR: %ld illegal tags found\n", bad_seeds);
01993 
01994     smc_ht = NULL;
01995   }
01996 
01997 #else
01998 
01999 # if MZ_PRECISE_GC_TRACE
02000   cons_accum_result = scheme_void;
02001   if (c && SCHEME_SYMBOLP(p[0])) {
02002     Scheme_Object *sym;
02003     char *s;
02004     int i, maxpos;
02005 
02006     sym = p[0];
02007     s = scheme_symbol_val(sym);
02008 
02009     maxpos = scheme_num_types();
02010 
02011     for (i = 0; i < maxpos; i++) {
02012       void *tn;
02013       tn = scheme_get_type_name(i);
02014       if (tn && !strcmp(tn, s)) {
02015        trace_for_tag = i;
02016        flags |= GC_DUMP_SHOW_TRACE;
02017        break;
02018       }
02019     }
02020 
02021     if (!strcmp("fnl", s))
02022       flags |= GC_DUMP_SHOW_FINALS;
02023 
02024     if (!strcmp("peek", s) && (c == 3)) {
02025       long n;
02026       scheme_end_atomic();
02027       if (scheme_get_int_val(p[1], &n)) {
02028        if (GC_is_tagged_start((void *)n)) {
02029          return (Scheme_Object *)n;
02030        } else
02031          return p[2];
02032       }
02033     }
02034     
02035     if (!strcmp("next", s) && (c == 2)) {
02036       void *pt;
02037       scheme_end_atomic();
02038       if (SCHEME_FALSEP(p[1]))
02039        pt = GC_next_tagged_start(NULL);
02040       else
02041        pt = GC_next_tagged_start((void *)p[1]);
02042       if (pt)
02043        return (Scheme_Object *)pt;
02044       else
02045        return scheme_false;
02046     }
02047 
02048     if (!strcmp("addr", s) && (c == 2)) {
02049       scheme_end_atomic();      
02050       return scheme_make_integer_value((long)p[1]);
02051     }
02052   } else if (c && SCHEME_INTP(p[0])) {
02053     trace_for_tag = SCHEME_INT_VAL(p[0]);
02054     flags |= GC_DUMP_SHOW_TRACE;
02055   } else if (c && SCHEME_THREADP(p[0])) {
02056     Scheme_Thread *t = (Scheme_Thread *)p[0];
02057     void **var_stack, *limit;
02058     long delta;
02059 
02060     scheme_console_printf("Thread: %p\n", t);
02061     if (t->running) {
02062       if (scheme_current_thread == t) {
02063         scheme_console_printf(" swapped in\n");
02064         var_stack = GC_variable_stack;
02065         delta = 0;
02066         limit = (void *)scheme_get_current_thread_stack_start();
02067       } else {
02068         scheme_console_printf(" swapped out\n");
02069         var_stack = (void **)t->jmpup_buf.gc_var_stack;
02070         delta = (long)t->jmpup_buf.stack_copy - (long)t->jmpup_buf.stack_from;
02071         /* FIXME: stack direction */
02072         limit = (char *)t->jmpup_buf.stack_copy + t->jmpup_buf.stack_size;
02073       }
02074       GC_dump_variable_stack(var_stack, delta, limit, NULL,
02075                              scheme_get_type_name,
02076                              GC_get_xtagged_name,
02077                              print_tagged_value);
02078     } else {
02079       scheme_console_printf(" done\n");
02080     }
02081     scheme_end_atomic();
02082     return scheme_void;
02083   }
02084 
02085   if ((c > 1) && SCHEME_INTP(p[1]))
02086     path_length_limit = SCHEME_INT_VAL(p[1]);
02087   else if ((c > 1) && SCHEME_SYMBOLP(p[1]) && !strcmp("cons", SCHEME_SYM_VAL(p[1]))) {
02088     for_each_found = cons_onto_list;
02089     cons_accum_result = scheme_null;
02090     flags -= (flags & GC_DUMP_SHOW_TRACE);
02091   }
02092   scheme_console_printf("Begin Dump\n");
02093 #endif
02094 
02095 # ifdef MZ_PRECISE_GC
02096   GC_dump_with_traces(flags, 
02097                     scheme_get_type_name,
02098                     GC_get_xtagged_name,
02099                     for_each_found,
02100                     trace_for_tag,
02101                     print_tagged_value,
02102                     path_length_limit);
02103 # else
02104   GC_dump();
02105 # endif
02106 #endif
02107 
02108   if (scheme_external_dump_info)
02109     scheme_external_dump_info();
02110 
02111 #ifdef USE_TAGGED_ALLOCATION
02112   {
02113     void **ps = NULL;
02114     int l;
02115     int max_w;
02116     Scheme_Object *w;
02117 
02118     GC_inital_root_skip = inital_root_skip;
02119     GC_initial_trace_root = initial_trace_root;
02120     GC_trace_path();
02121     GC_inital_root_skip = NULL;
02122     GC_initial_trace_root = NULL;
02123     
02124     w = scheme_get_param(scheme_current_config(), MZCONFIG_ERROR_PRINT_WIDTH);
02125     if (SCHEME_INTP(w))
02126       max_w = SCHEME_INT_VAL(w);
02127     else
02128       max_w = 10000;
02129 
02130     scheme_console_printf("Begin Paths\n");
02131 
02132     while ((ps = GC_get_next_path(ps, &l))) {
02133       int i, j;
02134       if (l)
02135        scheme_console_printf("$%s", ps[0]);
02136       for (i = 1, j = 2; i < l; i++, j += 2) {
02137        void *v = ps[j];
02138        unsigned long diff = (unsigned long)ps[j + 1];
02139        struct GC_Set *home;
02140 
02141        home = GC_set(v);
02142        if (home
02143            && ((home == real_tagged)
02144               || (home == tagged_atomic)
02145               || (home == tagged_uncollectable)
02146               || (home == tagged_eternal))) {
02147          print_tagged_value("\n  ->", v, 0, diff, max_w, "");
02148        } else
02149          print_tagged_value("\n  ->", v, 1, diff, max_w, "");
02150       }
02151       scheme_console_printf("\n");
02152     }
02153 
02154     GC_clear_paths();
02155 
02156     scheme_console_printf("End Paths\n");
02157   }
02158 
02159   scheme_console_printf("Begin Help\n");
02160   scheme_console_printf(" (dump-memory-stats sym) - prints paths to instances of type named by sym.\n");
02161   scheme_console_printf("   Examples: (dump-memory-stats '<pair>), (dump-memory-stats 'frame).\n");
02162   scheme_console_printf("   If sym is 'stack, prints paths to thread stacks.\n");
02163   scheme_console_printf(" (dump-memory-stats sym 'objects) - prints all instances of type named by sym.\n");
02164   scheme_console_printf(" (dump-memory-stats sym 'from from-v) - prints paths, paths through from-v first.\n");
02165   scheme_console_printf("End Help\n");
02166 
02167   if (obj_type >= 0) {
02168     result = scheme_null;
02169     while (obj_buffer_pos--) {
02170       result = scheme_make_pair((Scheme_Object *)(obj_buffer[obj_buffer_pos]), result);
02171     }
02172   }
02173 #endif
02174 
02175 # if MZ_PRECISE_GC_TRACE
02176   scheme_console_printf("Begin Help\n");
02177   scheme_console_printf(" (dump-memory-stats sym) - prints paths to instances of type named by sym.\n");
02178   scheme_console_printf("   Example: (dump-memory-stats '<pair>)\n");
02179   scheme_console_printf(" (dump-memory-stats 'fnl) - prints not-yet-finalized objects.\n");
02180   scheme_console_printf(" (dump-memory-stats num) - prints paths to objects with tag num.\n");
02181   scheme_console_printf(" (dump-memory-stats -num) - prints paths to objects of size num.\n");
02182   scheme_console_printf(" (dump-memory-stats sym/num len) - limits path to size len.\n");
02183   scheme_console_printf(" (dump-memory-stats sym/num 'cons) - builds list instead of showing paths.\n");
02184   scheme_console_printf(" (dump-memory-stats 'peek num v) - returns value if num is address of object, v otherwise.\n");
02185   scheme_console_printf(" (dump-memory-stats 'next v) - next tagged object after v, #f if none; start with #f.\n");
02186   scheme_console_printf(" (dump-memory-stats 'addr v) - returns the address of v.\n");
02187   scheme_console_printf(" (dump-memory-stats thread) - shows information about the thread.\n");
02188   scheme_console_printf("End Help\n");
02189 
02190   result = cons_accum_result;
02191   cons_accum_result = scheme_void;
02192 # endif
02193 
02194   scheme_console_printf("End Dump\n");
02195 
02196   scheme_end_atomic();
02197 
02198   return result;
02199 }
02200 
02201 
02202 
02203 #ifdef MEMORY_COUNTING_ON
02204 
02205 long scheme_count_closure(Scheme_Object **o, mzshort len, Scheme_Hash_Table *ht)
02206 {
02207 #if 0
02208   int i;
02209   int s = 0;
02210 
02211   for (i = 0; i < len; i++) {
02212     if (!scheme_lookup_in_table(ht, (const char *)o[i])) {
02213       scheme_hash_set(ht, o[i], scheme_true);
02214       if (GC_size(o[i]) == sizeof(Scheme_Object *)) {
02215        /* May be an environment box */
02216        Scheme_Object *d = *(Scheme_Object **)o[i];
02217        if (GC_size(d) >= sizeof(Scheme_Type)) {
02218          /* Ok - probably it is a box. */
02219          s += sizeof(Scheme_Object *);
02220          s += scheme_count_memory(d, ht);
02221        } else {
02222          /* Not an environment box. */
02223          s += scheme_count_memory(o[i], ht);
02224        }
02225       } else {
02226        s += scheme_count_memory(o[i], ht);
02227       }
02228     }
02229   }
02230 
02231   return s;
02232 #endif
02233   return 0;
02234 }
02235 
02236 
02237 #if 0
02238 void scheme_check_home(Scheme_Object *root)
02239 {
02240   struct GC_Set *home;
02241   home = GC_set(root);
02242   if ((home != real_tagged)
02243       && (home != tagged_atomic)
02244       && (home != tagged_uncollectable)
02245       && (home != tagged_eternal)) {
02246     scheme_console_printf("Check: bad Scheme object: %lx\n", (unsigned long)root);
02247   }
02248 }
02249 #endif
02250 
02251 #define FORCE_SUBPARTS 0
02252 #define FORCE_KNOWN_SUBPARTS 1
02253 #define CAN_TRACE_HOME 1
02254 
02255 long scheme_count_memory(Scheme_Object *root, Scheme_Hash_Table *ht)
02256 {
02257   Scheme_Type type;
02258   long s = sizeof(Scheme_Simple_Object), e = 0;
02259   int need_align = 0;
02260   struct GC_Set *home;
02261 
02262   if (!root || SCHEME_INTP(root))
02263     return 0;
02264 
02265   type = SCHEME_TYPE(root);
02266 
02267   if (type >= _scheme_last_type_)
02268     return 0;
02269 
02270   if (ht && scheme_hash_get(ht, root))
02271     return 0;
02272 
02273   home = GC_set(root);
02274 #if CAN_TRACE_HOME
02275   if ((home != real_tagged)
02276       && (home != tagged_atomic)
02277       && (home != tagged_uncollectable)
02278       && (home != tagged_eternal)) {
02279     scheme_console_printf("Bad Scheme object: %lx\n", (unsigned long)root);
02280     return 0;
02281   }
02282 #endif
02283 
02284   if (ht)
02285     scheme_hash_set(ht, root, scheme_true);
02286 
02287 #define COUNT(x) (ht ? scheme_count_memory((Scheme_Object *)x, ht) : 0)
02288 
02289   switch (type) {
02290   case scheme_variable_type:
02291     s = sizeof(Scheme_Bucket);
02292 #if FORCE_SUBPARTS
02293     e = COUNT(((Scheme_Bucket *)root)->key)
02294       + COUNT(((Scheme_Bucket *)root)->val);
02295 #endif
02296     break;
02297   case scheme_local_type: 
02298   case scheme_local_unbox_type:
02299     s = sizeof(Scheme_Local);
02300     break;
02301   case scheme_syntax_type:
02302 #if FORCE_KNOWN_SUBPARTS
02303     e = COUNT(SCHEME_IPTR_VAL(root));
02304 #endif
02305     break;
02306   case scheme_application_type:
02307     {
02308       Scheme_App_Rec *app = (Scheme_App_Rec *)root;
02309       int i;
02310 
02311       s = sizeof(Scheme_App_Rec) + (app->num_args * sizeof(Scheme_Object *))
02312        + (app->num_args + 1);
02313       need_align = 1;
02314 #if FORCE_KNOWN_SUBPARTS
02315       e = COUNT(app->args[0]);
02316       for (i = 1; i <= app->num_args; i++) {
02317        e += COUNT(app->args[i]);
02318       }
02319 #endif
02320     }
02321     break;
02322   case scheme_sequence_type:
02323   case scheme_case_lambda_sequence_type:
02324   case scheme_begin0_sequence_type:
02325     {
02326       Scheme_Sequence *seq = (Scheme_Sequence *)root;
02327       int i;
02328 
02329       s = sizeof(Scheme_Sequence) + (seq->count - 1) * sizeof(Scheme_Object *);
02330 
02331 #if FORCE_KNOWN_SUBPARTS
02332       for (i = e = 0; i < seq->count; i++) {
02333        e += COUNT(seq->array[i]);
02334       }
02335 #endif
02336     }
02337     break;
02338   case scheme_branch_type:
02339     {
02340       Scheme_Branch_Rec *rec = (Scheme_Branch_Rec *)root;
02341       
02342       s = sizeof(Scheme_Branch_Rec);
02343 #if FORCE_KNOWN_SUBPARTS
02344       e = COUNT(rec->test) + COUNT(rec->tbranch) + COUNT(rec->fbranch);
02345 #endif
02346     }
02347     break;
02348   case scheme_unclosed_procedure_type:
02349   case scheme_compiled_unclosed_procedure_type:
02350     {
02351       Scheme_Closure_Data *data = 
02352        (Scheme_Closure_Data *)root;
02353 
02354       s = sizeof(Scheme_Closure_Data);
02355       s += data->closure_size * sizeof(mzshort);
02356 #if FORCE_KNOWN_SUBPARTS
02357       e = COUNT(data->code);
02358 #endif
02359     }
02360     break;
02361   case scheme_let_value_type:
02362     {
02363       Scheme_Let_Value *let = (Scheme_Let_Value *)root;
02364 
02365       s = sizeof(Scheme_Let_Value);
02366 #if FORCE_KNOWN_SUBPARTS
02367       e = COUNT(let->value) + COUNT(let->body);
02368 #endif
02369     }
02370     break;
02371   case scheme_compiled_let_value_type:
02372     {
02373       Scheme_Compiled_Let_Value *let = (Scheme_Compiled_Let_Value *)root;
02374 
02375       s = sizeof(Scheme_Compiled_Let_Value);
02376 #if FORCE_KNOWN_SUBPARTS
02377       e = COUNT(let->value) + COUNT(let->body);
02378 #endif
02379     }
02380     break;
02381   case scheme_let_void_type:
02382     {
02383       Scheme_Let_Void *let = (Scheme_Let_Void *)root;
02384 
02385       s = sizeof(Scheme_Let_Void);
02386 #if FORCE_KNOWN_SUBPARTS
02387       e = COUNT(let->body);
02388 #endif
02389     }
02390     break;
02391   case scheme_compiled_let_void_type:
02392     {
02393       Scheme_Let_Header *let = (Scheme_Let_Header *)root;
02394 
02395       s = sizeof(Scheme_Let_Header);
02396 #if FORCE_KNOWN_SUBPARTS
02397       e = COUNT(let->body);
02398 #endif
02399     }
02400     break;
02401   case scheme_letrec_type:
02402     {
02403       Scheme_Letrec *let = (Scheme_Letrec *)root;
02404       int i;
02405 
02406       s = sizeof(Scheme_Letrec);
02407       s += let->count * sizeof(Scheme_Object *);
02408 #if FORCE_KNOWN_SUBPARTS
02409       e = COUNT(let->body);
02410       for (i = 0; i < let->count; i++) {
02411        e += COUNT(let->procs[i]);
02412       }
02413 #endif
02414     }
02415     break;
02416   case scheme_char_type:
02417     s = sizeof(Scheme_Small_Object);
02418     break;
02419   case scheme_integer_type:
02420     s = 0;
02421     break;
02422   case scheme_double_type:
02423     s = sizeof(Scheme_Double);
02424     break;
02425   case scheme_float_type:
02426     break;
02427   case scheme_char_string_type:
02428     s += (SCHEME_CHAR_STRTAG_VAL(root) + 1) * sizeof(mzchar);
02429     need_align = 1;
02430     break;
02431   case scheme_byte_string_type:
02432     s += SCHEME_BYTE_STRTAG_VAL(root) + 1;
02433     need_align = 1;
02434     break;
02435   case scheme_symbol_type:
02436     s = sizeof(Scheme_Symbol) + SCHEME_SYM_LEN(root) - 1;
02437     need_align = 1;
02438     break;
02439   case scheme_null_type: 
02440     break;
02441   case scheme_pair_type:
02442 #if FORCE_KNOWN_SUBPARTS
02443     e = COUNT(SCHEME_CAR(root)) + COUNT(SCHEME_CDR(root));
02444 #endif
02445     break;
02446   case scheme_vector_type:
02447     {
02448       int count = SCHEME_VEC_SIZE(root), i;
02449       Scheme_Object **array = SCHEME_VEC_ELS(root);
02450 
02451       s += count * sizeof(Scheme_Object*);
02452 
02453 #if FORCE_KNOWN_SUBPARTS
02454       for (i = e = 0; i < count; i++) {
02455        e += COUNT(array[i]);
02456       }
02457 #endif
02458     }
02459     break;
02460   case scheme_prim_type:
02461     {
02462       if (((Scheme_Primitive_Proc *)root)->pp.flags & SCHEME_PRIM_IS_MULTI_RESULT)
02463        s = sizeof(Scheme_Prim_W_Result_Arity);
02464       else
02465        s = sizeof(Scheme_Primitive_Proc);
02466     }  
02467     break;
02468   case scheme_closure_type:
02469     {
02470       Scheme_Closure_Data *data;
02471       Scheme_Object **vals;
02472       
02473       data = SCHEME_COMPILED_CLOS_CODE(root);
02474       vals = SCHEME_COMPILED_CLOS_ENV(root);
02475 
02476       s += (data->closure_size * sizeof(Scheme_Object *));
02477 #if FORCE_KNOWN_SUBPARTS
02478       e = COUNT(data) + scheme_count_closure(vals, data->closure_size, ht);
02479 #endif
02480     }
02481     break;
02482   case scheme_closed_prim_type:
02483     {
02484       if (((Scheme_Closed_Primitive_Proc *)root)->pp.flags & SCHEME_PRIM_IS_MULTI_RESULT)
02485        s = sizeof(Scheme_Closed_Prim_W_Result_Arity);
02486       else
02487        s = sizeof(Scheme_Closed_Primitive_Proc);
02488     }  
02489     break;
02490   case scheme_cont_type:
02491     {
02492       Scheme_Cont *c = (Scheme_Cont *)root;
02493       Scheme_Saved_Stack *rs;
02494 
02495       s = sizeof(Scheme_Cont);
02496 
02497       for (rs = c->runstack_copied; rs; rs = rs->prev) {
02498        s += sizeof(Scheme_Saved_Stack);
02499       }
02500     }
02501     break;
02502   case scheme_input_port_type: 
02503     scheme_count_input_port(root, &s, &e, ht);
02504     break;
02505   case scheme_output_port_type:
02506     scheme_count_output_port(root, &s, &e, ht);
02507     break;
02508   case scheme_eof_type:
02509   case scheme_true_type: 
02510   case scheme_false_type:
02511   case scheme_void_type:
02512   case scheme_undefined_type:
02513     /* Only one */
02514     break;
02515   case scheme_syntax_compiler_type:
02516     break;
02517   case scheme_macro_type:
02518   case scheme_set_macro_type:
02519     s = sizeof(Scheme_Small_Object);
02520 #if FORCE_KNOWN_SUBPARTS
02521     e = COUNT(SCHEME_PTR_VAL(root));
02522 #endif
02523     break;
02524   case scheme_box_type:
02525     s = sizeof(Scheme_Small_Object);
02526 #if FORCE_KNOWN_SUBPARTS
02527     e = COUNT(SCHEME_BOX_VAL(root));
02528 #endif
02529     break;
02530   case scheme_will_executor_type:
02531     s = sizeof(Scheme_Simple_Object);
02532     break;
02533   case scheme_custodian_type: 
02534     {
02535       Scheme_Custodian *m = (Scheme_Custodian *)root;
02536 
02537       s = sizeof(Scheme_Custodian);
02538       e = m->alloc * (sizeof(Scheme_Object **)
02539                     + sizeof(Scheme_Custodian_Reference *)
02540                     + sizeof(void *)
02541                     + sizeof(void *));
02542     }
02543     break;
02544   case scheme_thread_type:
02545     {
02546       Scheme_Thread *p = (Scheme_Thread *)root;
02547       Scheme_Saved_Stack *saved;
02548 
02549       s = sizeof(Scheme_Thread)
02550        + ((p->runstack_size + p->tail_buffer_size) * sizeof(Scheme_Object *));
02551 
02552 #if FORCE_KNOWN_SUBPARTS
02553       e = COUNT(p->init_config);
02554 #endif
02555 
02556       /* Check stack: */
02557       for (saved = p->runstack_saved; saved; saved = saved->prev) {
02558        s += (saved->runstack_size * sizeof(Scheme_Object *));
02559       }
02560     }
02561     break;
02562   case scheme_namespace_type:
02563     {
02564       Scheme_Env *env = (Scheme_Env *)root;
02565 
02566       s = sizeof(Scheme_Env);
02567 #if FORCE_KNOWN_SUBPARTS
02568       e = COUNT(env->toplevel);
02569 #endif
02570     }
02571     break;
02572   case scheme_config_type:
02573     {
02574       s = sizeof(Scheme_Config) + (sizeof(Scheme_Object *) * __MZCONFIG_BUILTIN_COUNT__);
02575 #if FORCE_SUBPARTS
02576       {
02577        Scheme_Config *c = (Scheme_Config *)root;
02578        int i;
02579 
02580        e = COUNT(c->extensions) + COUNT(c->base);
02581 
02582        for (i = 0; i < __MZCONFIG_BUILTIN_COUNT__; i++) {
02583          e += COUNT(*c->configs[i]);
02584        }
02585       }
02586 #endif
02587     }
02588     break;
02589   case scheme_proc_struct_type:
02590   case scheme_structure_type:
02591     {
02592       Scheme_Object **slots = ((Scheme_Structure *)root)->slots;
02593       int i, count = SCHEME_STRUCT_NUM_SLOTS(root);
02594 
02595       s = sizeof(Scheme_Structure) + (count - 1) * sizeof(Scheme_Object *);
02596 #if FORCE_KNOWN_SUBPARTS
02597       for (i = e = 0; i < count; i++) {
02598        e += COUNT(slots[i]);
02599       }
02600       e += COUNT(((Scheme_Structure *)root)->stype);
02601 #endif
02602     }
02603     break;
02604   case scheme_bignum_type:
02605     {
02606       int count = SCHEME_BIGLEN(root);
02607 
02608       if (count < 0)
02609        count = -count;
02610 
02611       s = sizeof(Small_Bignum) + (count - 1) * sizeof(bigdig);
02612     }
02613     break;
02614   case scheme_escaping_cont_type:
02615     s = sizeof(Scheme_Escaping_Cont);
02616     break;
02617   case scheme_sema_type:
02618     s = sizeof(Scheme_Sema);
02619     break;
02620   case scheme_compilation_top_type:
02621     s = sizeof(Scheme_Compilation_Top);
02622     break;
02623   case scheme_hash_table_type:
02624     {
02625       Scheme_Hash_Table *ht = (Scheme_Hash_Table *)root;
02626 
02627       s = sizeof(Scheme_Hash_Table)
02628        + ht->size * sizeof(Scheme_Object *);
02629       
02630 #if FORCE_SUBPARTS
02631       {
02632        int i;
02633        for (i = e = 0; i < ht->size; i++) {
02634          if (ht->buckets[i]) {
02635            if (ht->by_address)
02636              e += COUNT(ht->buckets[i]);
02637            else
02638              e += COUNT(ht->buckets[i]->val);
02639          }
02640        }
02641       }
02642 #endif
02643     }
02644     break;
02645   case scheme_weak_box_type:
02646     s = sizeof(Scheme_Small_Object);
02647     e = COUNT(SCHEME_BOX_VAL(root));
02648     break;
02649   case scheme_complex_type:
02650     s = sizeof(Scheme_Complex);
02651     e = COUNT(((Scheme_Complex *)root)->r) + COUNT(((Scheme_Complex *)root)->i);
02652     break;
02653   case scheme_rational_type:
02654     s = sizeof(Scheme_Rational);
02655 #if FORCE_KNOWN_SUBPARTS
02656     e = COUNT(((Scheme_Rational *)root)->num) 
02657       + COUNT(((Scheme_Rational *)root)->denom);
02658 #endif
02659     break;
02660   case scheme_struct_type_type:
02661     {
02662       Scheme_Struct_Type *st = (Scheme_Struct_Type *)root;
02663       s = sizeof(Scheme_Struct_Type) + st->name_pos * sizeof(Scheme_Object*);
02664 #if FORCE_KNOWN_SUBPARTS
02665       e = COUNT(st->name);
02666       if (st->name_pos)
02667        e += COUNT(st->parent_types[st->name_pos - 1]);
02668 #endif
02669     }
02670     break;
02671   case scheme_listener_type:
02672     s = sizeof(Scheme_Small_Object);
02673     break;
02674   case scheme_random_state_type:
02675     s = 130; /* wild guess */
02676     break;
02677   case scheme_eval_waiting_type:
02678   case scheme_tail_call_waiting_type:
02679     /* Only one */
02680     break;
02681   case scheme_multiple_values_type:
02682     /* Only one */
02683     break;
02684   case scheme_placeholder_type:
02685     s = 0; /* Infrequent */
02686     break;
02687   default:
02688     s = 0;
02689     break;
02690   }
02691 
02692   if (need_align) {
02693     /* Round up to sizeof(void*) boundary: */
02694     if (s & (sizeof(void*) - 1))
02695       s += sizeof(void*) - (s & (sizeof(void*) - 1));
02696   }
02697 
02698   scheme_memory_count[type]++;
02699   scheme_memory_size[type] += s;
02700 
02701   return s;
02702 }
02703 
02704 long scheme_count_envbox(Scheme_Object *root, Scheme_Hash_Table *ht)
02705 {
02706 #if CAN_TRACE_HOME
02707   if (GC_set(root) != envunbox) {
02708     scheme_console_printf("Bad envunbox object: %lx\n", (unsigned long)root);
02709     return 0;
02710   }
02711 #endif
02712 
02713   if (ht)
02714     return scheme_count_memory(SCHEME_ENVBOX_VAL(root), ht) + 4;
02715   else
02716     return 4;
02717 }
02718 
02719 #endif