Back to index

plt-scheme  4.2.1
sgc.c
Go to the documentation of this file.
00001 /*
00002   SenoraGC, a relatively portable conservative GC for a slightly
00003     cooperative environment
00004   Copyright (c) 2004-2009 PLT Scheme Inc.
00005   Copyright (c) 1996-98 Matthew Flatt
00006   All rights reserved.
00007 
00008     This library is free software; you can redistribute it and/or
00009     modify it under the terms of the GNU Library General Public
00010     License as published by the Free Software Foundation; either
00011     version 2 of the License, or (at your option) any later version.
00012 
00013     This library is distributed in the hope that it will be useful,
00014     but WITHOUT ANY WARRANTY; without even the implied warranty of
00015     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016     Library General Public License for more details.
00017 
00018     You should have received a copy of the GNU Library General Public
00019     License along with this library; if not, write to the Free
00020     Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
00021     Boston, MA 02110-1301 USA.
00022 
00023   After Boehm et al.
00024 
00025   Note: this implementation is probably still a little hardwired for
00026   32-bit addresses.
00027 
00028  */
00029 
00030 #include <stdlib.h>
00031 #include <setjmp.h>
00032 #include <stdio.h>
00033 #include <string.h>
00034 #include "../sconfig.h"
00035 #include "mzconfig.h"
00036 #include "sgc.h"
00037 
00038 #ifdef SIZEOF_LONG
00039 # if SIZEOF_LONG == 8
00040 #  define SIXTY_FOUR_BIT_INTEGERS
00041 # endif
00042 #endif
00043 
00044 /****************************************************************************/
00045 /* Option bundles                                                           */
00046 /****************************************************************************/
00047 
00048 #ifndef SGC_STD_DEBUGGING
00049 # define SGC_STD_DEBUGGING 0
00050 #endif
00051 
00052 #ifdef WIN32
00053 # define SGC_STD_DEBUGGING_UNIX 0
00054 # define SGC_STD_DEBUGGING_WINDOWS SGC_STD_DEBUGGING
00055 #else
00056 # define SGC_STD_DEBUGGING_UNIX SGC_STD_DEBUGGING
00057 # define SGC_STD_DEBUGGING_WINDOWS 0
00058 #endif
00059 
00060 #ifndef SGC_AUTO_ROOTS
00061 # define SGC_AUTO_ROOTS 0
00062 #endif
00063 
00064 /****************************************************************************/
00065 /* Options and debugging flags                                              */
00066 /****************************************************************************/
00067 
00068 #define NO_COLLECTIONS 0
00069 /* Disable all collections */
00070 
00071 #define NO_DISAPPEARING 0
00072 /* Never perform disappearing link */
00073 
00074 #define NO_FINALIZING 0
00075 /* Never invoke queued finalizers */
00076 
00077 #define NO_ATOMIC 0
00078 /* Never treat allocated data as atomic */
00079 
00080 #define KEEP_BLOCKS_FOREVER 0
00081 /* Number of block sector assignments to keep indefinitely, rather
00082    than recycling them immediately. This speeds up collection
00083    and allocation, but it also costs memory via fragmentation. */
00084 
00085 #define NO_STACK_OFFBYONE 0
00086 /* Don't notice a stack-based pointer just past the end of an object */
00087 
00088 #define WATCH_FOR_FINALIZATION_CYCLES 0
00089 /* Report cycles in finalizable chunks */
00090 
00091 #define USE_GC_FREE_SPACE_DIVISOR 1
00092 /* Grow stack in a way similar to Boehm's GC using the global
00093    GC_free_space_divisor. 0 => GC after allocating twice the
00094    active size from the last GC. */
00095 
00096 #define PROVIDE_GC_FREE 0
00097 /* Provide GC_free; automatically implies DISTINGUISH_FREE_FROM_UNMARKED */
00098 
00099 #define PROVIDE_CHUNK_GC_FREE 1
00100 /* Provide GC_free that only works on chunks (i.e., large allocation
00101    blocks); frees on small blacks are ignored */
00102 
00103 #define PROVIDE_MALLOC_AND_FREE 0
00104 /* Defines malloc(), realloc(), calloc(), and  free() in terms of 
00105    the collector. Platform-specific allocation routines (e.g., sbrk())
00106    must then be used for low-level allocations by the collector;
00107    turn on one of: GET_MEM_VIA_SBRK or GET_MEM_VIA_MMAP.
00108    This will not work if fprintf uses malloc of free. Turning on
00109    FPRINTF_USE_PRIM_STRINGOUT can solve this problem.
00110    Automatically implies PROVIDE_GC_FREE, but adds extra checks to
00111    CHECK_FREES if PROVIDE_GC_FREE was not otherwise on */
00112 
00113 #define GET_MEM_VIA_SBRK 0
00114 /* Instead of calling malloc() to get low-level memory, use
00115    sbrk() directly. (Unix) */
00116 
00117 #define GET_MEM_VIA_MMAP SGC_STD_DEBUGGING_UNIX
00118 /* Instead of calling malloc() to get low-level memory, use
00119    mmap() directly. (Unix) */
00120 
00121 #define GET_MEM_VIA_VIRTUAL_ALLOC SGC_STD_DEBUGGING_WINDOWS
00122 /* Instead of calling malloc() to get low-level memory, use
00123    VirtualAlloc() directly. (Win32) */
00124 
00125 #define RELEASE_UNUSED_SECTORS 1
00126 /* Instead of managing a list of unused sectors, they are
00127    given back to the OS. This only works with mmap(). */
00128 
00129 #define DISTINGUISH_FREE_FROM_UNMARKED 0
00130 /* Don't let conservatism resurrect a previously-collected block */
00131 
00132 #define TERSE_MEMORY_TRACING 0
00133 /* Automatically implies ALLOW_TRACE_COUNT, ALLOW_TRACE_PATH,
00134    ALLOW_SET_FINALIZER, CHECK_SKIP_MARK_AT_FIRST, and CHECK_WATCH_FOR_PTR_ALLOC */
00135 
00136 #define STD_MEMORY_TRACING SGC_STD_DEBUGGING
00137 /* Automatically implies TERSE_MEMORY_TRACING, DUMP_BLOCK_COUNTS,
00138    and DUMP_SECTOR_MAP */
00139 
00140 #define DETAIL_MEMORY_TRACING 0
00141 /* Automatically implies STD_MEMORY_TRACING, DUMP_BLOCK_MAPS, 
00142    STAMP_AND_REMEMBER_SOURCE, and KEEP_DETAIL_PATH */
00143 
00144 #define STAMP_AND_REMEMBER_SOURCE 0
00145 /* Keep timestamps and source of marking from collection */
00146 
00147 #define ALLOW_TRACE_COUNT 0
00148 /* Support collection-based trace count callbacks */
00149 
00150 #define ALLOW_TRACE_PATH 0
00151 /* Support collection-based trace path callbacks */
00152 
00153 #define KEEP_DETAIL_PATH SGC_STD_DEBUGGING
00154 /* Keep source offsets for path traces */
00155 
00156 #define CHECK_SKIP_MARK_AT_FIRST 0
00157 /* Enables skipping certain marks during collection from the inital
00158    root supplied as GC_initial_trace_root */
00159 
00160 #define ALLOW_SET_LOCKING 0
00161 /* Enables locking out collections for a specific set. */
00162 
00163 #define ALLOW_SET_FINALIZER 0
00164 /* Support the per-set custom "de-allocation" callback. */
00165 
00166 #define CHECK_WATCH_FOR_PTR_ALLOC SGC_STD_DEBUGGING
00167 /* Set GC_watch_for_ptr to be ~(ptr value);
00168    there are 3 places where the ptr is checked, 
00169    unless USE_WATCH_FOUND_FUNC is on */
00170 
00171 #define USE_WATCH_FOUND_FUNC SGC_STD_DEBUGGING
00172 /* Calls GC_found_watch when the watch-for ptr is found. */
00173 
00174 #define PAD_BOUNDARY_BYTES SGC_STD_DEBUGGING
00175 /* Put a known padding pattern around every allocated
00176    block to test for array overflow/underflow.
00177    Pad-testing is performed at the beginning of every GC.
00178    Automatically implies CHECK_SIMPLE_INTERIOR_POINTERS */
00179 
00180 #define CHECK_SIMPLE_INTERIOR_POINTERS 0
00181 /* Recognize pointers into the middle of an allocated
00182    block, (but do not recognize pointers just past the
00183    end of an allocated block, as is generally performed
00184    for stack-based pointers). */
00185 
00186 #define DUMP_BLOCK_COUNTS 1
00187 /* GC_dump prints detail information about block and
00188    set size contents. */
00189 
00190 #define DUMP_SECTOR_MAP 1
00191 /* GC_dump prints detail information about existing
00192    sectors. */
00193 #ifdef SIXTY_FOUR_BIT_INTEGERS
00194 # undef DUMP_SECTOR_MAP
00195 # define DUMP_SECTOR_MAP 0
00196 #endif
00197 
00198 #define DUMP_BLOCK_MAPS 1 /* 0 */
00199 /* GC_dump prints detail information about block and
00200    set address contents. Automatically implies
00201    DUMP_BLOCK_COUNTS. */
00202 
00203 #define CHECK_FREES SGC_STD_DEBUGGING
00204 /* Print an error for redundant frees by calling free_error */
00205 
00206 #define FPRINTF_USE_PRIM_STRINGOUT SGC_STD_DEBUGGING_WINDOWS
00207 /* Avoids using fprintf while the GC is running, printing
00208    messages instead as pure strings passed to
00209     void GC_prim_stringout(char *s, int len);
00210    which must be provided at link time, or one of
00211    PRIM_STRINGOUT_AS_FWRITE or PRIM_STRINGOUT_AS_WINDOWS_CONSOLE */
00212 
00213 #define PRIM_STRINGOUT_AS_FWRITE 0
00214 /* Implements GC_prim_stringout using fwrite. Not likely to
00215    solve any problems, but useful for debugging FPRINTF. */
00216 
00217 #define PRIM_STRINGOUT_AS_WINDOWS_CONSOLE SGC_STD_DEBUGGING_WINDOWS
00218 /* Implements GC_prim_stringout using Windows console
00219    functions. */
00220 
00221 #define AUTO_STATIC_ROOTS_IF_POSSIBLE SGC_AUTO_ROOTS
00222 /* Automatically registers static C variables as roots if
00223    platform-specific code is porvided */
00224 
00225 #define PRINT_INFO_PER_GC SGC_STD_DEBUGGING
00226 /* Writes to stderr before an after each GC, summarizing
00227    the state of memory and current system time at each point. */
00228 
00229 #define SHOW_SECTOR_MAPS_AT_GC 0
00230 /* Write a sector map before and after each GC. This is helpful for
00231    noticing unusual memory pattens, such as allocations of large
00232    blocks or unusually repetitive allocations. Only works with
00233    PRINT_INFO_PER_GC. */
00234 
00235 /****************************************************************************/
00236 /* Parameters and platform-specific settings                                */
00237 /****************************************************************************/
00238 
00239 /* GC frequency: MEM_USE_FACTOR is max factor between current
00240    allocated bytes and alocated bytes after last GC. */
00241 #ifdef SMALL_HASH_TABLES
00242 # define FIRST_GC_LIMIT 20000
00243 # define MEM_USE_FACTOR 1.40
00244 #else
00245 # define FIRST_GC_LIMIT 100000
00246 # define MEM_USE_FACTOR 3
00247 #endif
00248 
00249 #ifdef DOS_FAR_POINTERS
00250 # include <dos.h>
00251 # include <alloc.h>
00252 # define PTR_TO_INT(v) (((((long)FP_SEG(v)) & 0xFFFF) << 4) + FP_OFF(v))
00253 # define INT_TO_PTR(v) ((void *)((((v) >> 4) << 16) + ((v) & 0xF)))
00254 # if !PROVIDE_MALLOC_AND_FREE
00255 #  define MALLOC farmalloc
00256 #  define FREE farfree
00257 # endif
00258 #else
00259 # define PTR_TO_INT(v) ((unsigned long)(v))
00260 # define INT_TO_PTR(v) ((void *)(v))
00261 # if !PROVIDE_MALLOC_AND_FREE
00262 #  define MALLOC malloc
00263 #  define FREE free
00264 # endif
00265 #endif
00266 
00267 #if GET_MEM_VIA_SBRK
00268 # include <unistd.h>
00269 #endif
00270 #if GET_MEM_VIA_MMAP
00271 # include <unistd.h>
00272 # include <sys/mman.h>
00273 # include <sys/types.h>
00274 # include <sys/stat.h>
00275 # include <fcntl.h>
00276 #endif
00277 #ifdef WIN32
00278 # include <windows.h>
00279 #endif
00280 
00281 #if !GET_MEM_VIA_MMAP
00282 # undef RELEASE_UNUSED_SECTORS
00283 # define RELEASE_UNUSED_SECTORS 0
00284 #endif
00285 
00286 /* System-specific alignment of pointers. */
00287 #ifdef SIXTY_FOUR_BIT_INTEGERS
00288 # define PTR_ALIGNMENT 8
00289 # define LOG_PTR_SIZE 3
00290 # define LOW_32_BITS(x) (x & 0xFFFFFFFF)
00291 #else
00292 # define PTR_ALIGNMENT 4
00293 # define LOG_PTR_SIZE 2
00294 # define LOW_32_BITS(x) x
00295 #endif
00296 # define PTR_SIZE (1 << LOG_PTR_SIZE)
00297 
00298 #define DOUBLE_SIZE sizeof(double)
00299 
00300 /* SECTOR_SEGMENT_SIZE determines the alignment of collector blocks.
00301    Since it should be a power of 2, LOG_SECTOR_SEGMENT_SIZE is
00302    specified directly. A larger block size speeds up GC, but wastes
00303    more unallocated bytes in same-size buckets. */
00304 #define LOG_SECTOR_SEGMENT_SIZE 12
00305 #define SECTOR_SEGMENT_SIZE (1 << LOG_SECTOR_SEGMENT_SIZE)
00306 #define SECTOR_SEGMENT_MASK (~(SECTOR_SEGMENT_SIZE-1))
00307 
00308 /* MAX_COMMON_SIZE is maximum size of allocated blocks
00309    using relatively efficient memory layouts. */
00310 #define MAX_COMMON_SIZE (SECTOR_SEGMENT_SIZE >> 2)
00311 
00312 #define NUM_COMMON_SIZE ((2 * LOG_SECTOR_SEGMENT_SIZE) + 8)
00313 
00314 /* Number of sector segments to be allocated at once with
00315    malloc() to avoid waste when obtaining the proper alignment. */
00316 #define SECTOR_SEGMENT_GROUP_SIZE 32
00317 
00318 /* Number of bits used in 32-bit level table for checking existance of
00319    a sector. Creates a table of (1 << SECTOR_LOOKUP_SHIFT) pointers
00320    to individual page tables of size SECTOR_LOOKUP_PAGESIZE. */
00321 #define SECTOR_LOOKUP_PAGESETBITS 12
00322 
00323 #define LOG_MAP_PTR_SIZE 2
00324 #define MAP_PTR_SIZE 4
00325 
00326 #define SECTOR_LOOKUP_SHIFT ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS)
00327 #define LOG_SECTOR_LOOKUP_PAGESIZE ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS - LOG_SECTOR_SEGMENT_SIZE)
00328 #define SECTOR_LOOKUP_PAGESIZE (1 << LOG_SECTOR_LOOKUP_PAGESIZE)
00329 #define SECTOR_LOOKUP_PAGEMASK (SECTOR_LOOKUP_PAGESIZE - 1)
00330 
00331 #define SECTOR_LOOKUP_PAGETABLE(x) (LOW_32_BITS(x) >> SECTOR_LOOKUP_SHIFT)
00332 #define SECTOR_LOOKUP_PAGEPOS(x) ((LOW_32_BITS(x) >> LOG_SECTOR_SEGMENT_SIZE) & SECTOR_LOOKUP_PAGEMASK)
00333 
00334 #define LOG_SECTOR_PAGEREC_SIZE (LOG_PTR_SIZE + 1)
00335 
00336 /***************************************************************************/
00337 
00338 /* Implementation Terminology:
00339     "sector" - A low-level block of memory. Given an arbitrary
00340                pointer value, whether it is contained in a sector and
00341               the starting point of the sector can be determined in
00342               constant time.
00343     "segment" - A portion of a sector, aligned on SECTOR_SEGMENT_SIZE 
00344                boundaries.
00345     "page" - part of a table for a (partial) mapping from addresses to 
00346                segments
00347     "block" or "common block" - A block for small memory allocations. Blocks
00348                provide allocation by partitioning a sector.
00349     "chunk" - A block of memory too large to be a common block. Each chunk
00350                is allocated in its own sector.
00351     "set" - A collection of blocks & chunks asscoaited with a particular
00352                name, "deallocation" function, trace function, etc.
00353 */
00354 
00355 /***************************************************************************/
00356 
00357 /* Debugging the collector: */
00358 #define CHECK 0
00359 #define PRINT 0
00360 #define TIME 0
00361 #define ALWAYS_TRACE 0
00362 #define CHECK_COLLECTING 0
00363 #define MARK_STATS 0
00364 #define ALLOC_STATS 0
00365 #define FINISH_STATS 0
00366 
00367 #if DETAIL_MEMORY_TRACING
00368 # undef STD_MEMORY_TRACING
00369 # undef STAMP_AND_REMEMBER_SOURCE
00370 # undef DUMP_BLOCK_MAPS
00371 # undef KEEP_DETAIL_PATH
00372 # define STD_MEMORY_TRACING 1
00373 # define STAMP_AND_REMEMBER_SOURCE 1
00374 # define DUMP_BLOCK_MAPS 1
00375 # define KEEP_DETAIL_PATH 1
00376 #endif
00377 
00378 #if STD_MEMORY_TRACING
00379 # undef TERSE_MEMORY_TRACING
00380 # undef DUMP_BLOCK_COUNTS
00381 # define TERSE_MEMORY_TRACING 1
00382 # define DUMP_BLOCK_COUNTS 1
00383 #endif
00384 
00385 #if TERSE_MEMORY_TRACING
00386 # undef ALLOW_TRACE_COUNT
00387 # undef ALLOW_TRACE_PATH
00388 # undef ALLOW_SET_FINALIZER
00389 # undef CHECK_WATCH_FOR_PTR_ALLOC
00390 # undef CHECK_SKIP_MARK_AT_FIRST
00391 # define ALLOW_TRACE_COUNT 1
00392 # define ALLOW_TRACE_PATH 1
00393 # define ALLOW_SET_FINALIZER 1
00394 # define CHECK_WATCH_FOR_PTR_ALLOC 1
00395 # define CHECK_SKIP_MARK_AT_FIRST 1
00396 #endif
00397 
00398 #if PAD_BOUNDARY_BYTES
00399 # undef CHECK_SIMPLE_INTERIOR_POINTERS
00400 # define CHECK_SIMPLE_INTERIOR_POINTERS 1
00401 #endif
00402 
00403 #if DUMP_BLOCK_MAPS
00404 # undef DUMP_BLOCK_COUNTS
00405 # define DUMP_BLOCK_COUNTS 1
00406 #endif
00407 
00408 #if PROVIDE_MALLOC_AND_FREE
00409 # if !PROVIDE_GC_FREE
00410 #  define EXTRA_FREE_CHECKS 1
00411 # endif
00412 # undef PROVIDE_GC_FREE
00413 # define PROVIDE_GC_FREE 1
00414 #else
00415 # define EXTRA_FREE_CHECKS 0
00416 #endif
00417 
00418 #if PROVIDE_GC_FREE
00419 # undef DISTINGUISH_FREE_FROM_UNMARKED
00420 # define DISTINGUISH_FREE_FROM_UNMARKED 1
00421 # undef PROVIDE_CHUNK_GC_FREE
00422 # define PROVIDE_CHUNK_GC_FREE 1
00423 #endif
00424 
00425 #if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH || PROVIDE_GC_FREE
00426 # define KEEP_SET_NO 1
00427 # define KEEP_CHUNK_SET_NO 1
00428 #else
00429 # define KEEP_SET_NO 0
00430 # if PROVIDE_CHUNK_GC_FREE
00431 #  define KEEP_CHUNK_SET_NO 1
00432 # endif
00433 #endif
00434 
00435 #if PROVIDE_CHUNK_GC_FREE
00436 # define KEEP_PREV_PTR 1
00437 #else
00438 # define KEEP_PREV_PTR 0
00439 #endif
00440 
00441 #ifndef NULL
00442 # define NULL 0L
00443 #endif
00444 
00445 #if PAD_BOUNDARY_BYTES
00446 /* Pad start & end must be a multiple of DOUBLE_SIZE */
00447 # define PAD_START_SIZE (2 * sizeof(long))
00448 # define PAD_END_SIZE PAD_START_SIZE
00449 # define PAD_PATTERN 0x7c8c9cAc
00450 # define PAD_FILL_PATTERN 0xc7
00451 # define SET_PAD(p, s, os) set_pad(p, s, os)
00452 # define PAD_FORWARD(p) ((void *)(((char *)p) + PAD_START_SIZE))
00453 # define PAD_BACKWARD(p) ((void *)(((char *)p) - PAD_START_SIZE))
00454 #else
00455 # define PAD_FORWARD(p) (p)
00456 # define PAD_BACKWARD(p) (p)
00457 #endif
00458 
00459 /* A root to start with, when non-zero. Useful for tracing from a
00460    particular object. The skip function is used when
00461    CHECK_SKIP_MARK_AT_FIRST to skip certain objects when marking from
00462    this root (when the function return 1) */
00463 void *GC_initial_trace_root;
00464 int (*GC_inital_root_skip)(void *, size_t);
00465 
00466 void (*GC_out_of_memory)(void);
00467 
00468 /* Sector types: */
00469 enum {
00470   sector_kind_block,
00471   sector_kind_free,
00472 #if !RELEASE_UNUSED_SECTORS
00473   sector_kind_freed,
00474 #else
00475 # define sector_kind_freed sector_kind_free
00476 #endif
00477   sector_kind_chunk,
00478   sector_kind_managed,
00479   sector_kind_other
00480 };
00481 
00482 typedef struct BlockOfMemory {
00483   struct Finalizer *finalizers;
00484   unsigned long start;
00485   unsigned long end;
00486   unsigned long top;
00487   short size;
00488   short atomic;
00489   short elem_per_block;
00490   short free_search_start, free_search_bit, free_search_offset;
00491 #if KEEP_SET_NO
00492   short set_no;
00493 #endif
00494   int *positions; /* maps displacement in ptrs => position in objects */
00495   struct BlockOfMemory *next;
00496 #if STAMP_AND_REMEMBER_SOURCE
00497   long make_time;
00498   long use_time;
00499   unsigned long low_marker;
00500   unsigned long high_marker;
00501 #endif
00502   unsigned char free[1];
00503 } BlockOfMemory;
00504 
00505 #if DISTINGUISH_FREE_FROM_UNMARKED
00506 
00507 # define FREE_BIT_PER_ELEM 4
00508 # define LOG_FREE_BIT_PER_ELEM 2
00509 # define FREE_BIT_SIZE (8 >> LOG_FREE_BIT_PER_ELEM)
00510 # define FREE_BIT_START 0x2 
00511 # define UNMARK_BIT_START 0x1
00512 
00513 # define POS_TO_FREE_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
00514 # define POS_TO_UNMARK_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
00515 # define POS_TO_FREE_BIT(p) (FREE_BIT_START << ((p & (FREE_BIT_PER_ELEM - 1)) << 1))
00516 # define POS_TO_UNMARK_BIT(p) (UNMARK_BIT_START << ((p & (FREE_BIT_PER_ELEM - 1)) << 1))
00517 
00518 # define ALL_UNMARKED 0x55
00519 # define ALL_FREE 0xAA
00520 
00521 # define _NOT_FREE(x) NOT_FREE(x)
00522 
00523 # define SHIFT_UNMARK_TO_FREE(x) ((x & ALL_UNMARKED) << 1)
00524 # define SHIFT_COPY_FREE_TO_UNMARKED(x) ((x & ALL_FREE) | ((x & ALL_FREE) >> 1))
00525 
00526 #else /* !DISTINGUISH_FREE_FROM_UNMARKED */
00527 
00528 # define FREE_BIT_PER_ELEM 8
00529 # define LOG_FREE_BIT_PER_ELEM 3
00530 # define FREE_BIT_SIZE (8 >> LOG_FREE_BIT_PER_ELEM)
00531 # define FREE_BIT_START 0x1
00532 # define UNMARK_BIT_START 0x1
00533 
00534 # define POS_TO_FREE_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
00535 # define POS_TO_UNMARK_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
00536 # define POS_TO_FREE_BIT(p) (FREE_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
00537 # define POS_TO_UNMARK_BIT(p) (UNMARK_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
00538 
00539 # define ALL_UNMARKED 0xFF
00540 
00541 # define _NOT_FREE(x) 1
00542 
00543 #endif /* DISTINGUISH_FREE_FROM_UNMARKED */
00544 
00545 #define NOT_FREE(x) (!(x))
00546 #define IS_FREE(x) (x)
00547 #define NOT_MARKED(x) (x)
00548 #define IS_MARKED(x) (!(x))
00549 
00550 #define ELEM_PER_BLOCK(b) b->elem_per_block
00551 
00552 typedef struct MemoryChunk {
00553   struct Finalizer *finalizers;
00554   unsigned long start;
00555   unsigned long end;
00556   struct MemoryChunk *next;
00557 #if KEEP_PREV_PTR
00558   struct MemoryChunk **prev_ptr;
00559 #endif
00560   short atomic;
00561   short marked;
00562 #if STAMP_AND_REMEMBER_SOURCE
00563   long make_time;
00564   unsigned long marker;
00565 #endif
00566 #if KEEP_CHUNK_SET_NO
00567   int set_no;
00568 #endif
00569   char data[1];
00570 } MemoryChunk;
00571 
00572 /* If this changes size from 2 ptrs, change LOG_SECTOR_PAGEREC_SIZE */
00573 typedef struct {
00574   long kind;  /* sector_kind_other, etc. */
00575   unsigned long start; /* Sector start; may not be accurate if the segment
00576                           is deallocated, but 0 => not in any sector */
00577 } SectorPage;
00578 
00579 #if !RELEASE_UNUSED_SECTORS
00580 # include "../utils/splay.c"
00581 
00582 typedef struct SectorFreepage {
00583   long size; 
00584   unsigned long start; /* Sector start */
00585   unsigned long end; /* start of next */
00586   Tree by_start;
00587   Tree by_end;
00588   Tree by_start_per_size;
00589   Tree by_size;
00590   Tree *same_size;
00591 } SectorFreepage;
00592 
00593 static Tree *sector_freepage_by_start;
00594 static Tree *sector_freepage_by_end;
00595 static Tree *sector_freepage_by_size;
00596 
00597 #define TREE_FP(t) ((SectorFreepage *)(t->data))
00598 
00599 static Tree *next(Tree *node)
00600 {
00601   node = node->right;
00602   if (node) {
00603     while (node->left) {
00604       node = node->left;
00605     }
00606     return node;
00607   } else
00608     return NULL;
00609 }
00610 
00611 static void remove_freepage(SectorFreepage *fp)
00612 {
00613   /* Remove fp from freelists: */
00614   sector_freepage_by_start = splay_delete(fp->start, sector_freepage_by_start);
00615   sector_freepage_by_end = splay_delete(fp->end, sector_freepage_by_end);
00616   sector_freepage_by_size = splay(fp->size, sector_freepage_by_size);
00617   if (TREE_FP(sector_freepage_by_size) == fp) {
00618     /* This was the representative for its size; remove it. */
00619     sector_freepage_by_size = splay_delete(fp->size, sector_freepage_by_size);
00620     if (fp->same_size) {
00621       SectorFreepage *same;
00622       same = TREE_FP(fp->same_size);
00623       same->same_size = splay_delete(same->start, fp->same_size);
00624       sector_freepage_by_size = splay_insert(same->size, &same->by_size, sector_freepage_by_size);
00625     }
00626   } else {
00627     /* Not the top-level representative; remove it from the representative's
00628        same_size tree */
00629     SectorFreepage *same;
00630     same = TREE_FP(sector_freepage_by_size);
00631     same->same_size = splay_delete(fp->start, same->same_size);
00632   }
00633 }
00634 
00635 static void add_freepage(SectorFreepage *naya)
00636 {
00637   naya->by_start.data = (void *)naya;
00638   sector_freepage_by_start = splay_insert(naya->start, &naya->by_start, sector_freepage_by_start);
00639   naya->by_end.data = (void *)naya;
00640   sector_freepage_by_end = splay_insert(naya->end, &naya->by_end, sector_freepage_by_end);
00641   naya->by_size.data = (void *)naya;
00642   sector_freepage_by_size = splay_insert(naya->size, &naya->by_size, sector_freepage_by_size);
00643   if (TREE_FP(sector_freepage_by_size) != naya) {
00644     /* This size was already in the tree; add it to the next_size list, instead */
00645     SectorFreepage *already = TREE_FP(sector_freepage_by_size);
00646     naya->by_start_per_size.data = (void *)naya;
00647     already->same_size = splay_insert(naya->start, &naya->by_start_per_size, already->same_size);
00648   } else
00649     naya->same_size = NULL;
00650 }
00651 #endif /* !RELEASE_UNUSED_SECTORS */
00652 
00653 #define TABLE_HI_SHIFT LOG_SECTOR_SEGMENT_SIZE
00654 #define TABLE_LO_MASK (SECTOR_SEGMENT_SIZE-1)
00655 #define EACH_TABLE_COUNT (1 << (LOG_SECTOR_SEGMENT_SIZE - LOG_PTR_SIZE))
00656 
00657 typedef struct GC_Set {
00658   short atomic, uncollectable;
00659 #if ALLOW_SET_LOCKING
00660   short locked;
00661 #endif
00662   char *name;
00663   BlockOfMemory **blocks;
00664   BlockOfMemory **block_ends;
00665   MemoryChunk **othersptr;
00666 #if DUMP_BLOCK_COUNTS
00667   unsigned long total;
00668 #endif
00669 #if ALLOW_TRACE_COUNT
00670   GC_count_tracer count_tracer;
00671 #endif
00672 #if ALLOW_TRACE_PATH
00673   GC_path_tracer path_tracer;
00674 #endif
00675 #if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
00676   GC_trace_init trace_init;
00677   GC_trace_done trace_done;
00678 #endif
00679 #if KEEP_SET_NO || KEEP_CHUNK_SET_NO
00680   int no;
00681 #endif
00682 #if ALLOW_SET_FINALIZER
00683   GC_set_elem_finalizer finalizer;
00684 #endif
00685 } GC_Set;
00686 
00687 typedef struct GC_SetWithOthers {
00688   GC_Set c;
00689   MemoryChunk *others;
00690 } GC_SetWithOthers;
00691 
00692 static GC_Set **common_sets;
00693 static int num_common_sets;
00694 
00695 static BlockOfMemory *common[2 * NUM_COMMON_SIZE]; /* second half is `ends' array */
00696 static BlockOfMemory *atomic_common[2 * NUM_COMMON_SIZE];
00697 static BlockOfMemory *uncollectable_common[2 * NUM_COMMON_SIZE];
00698 static BlockOfMemory *uncollectable_atomic_common[2 * NUM_COMMON_SIZE];
00699 static MemoryChunk *others, *atomic_others;
00700 static MemoryChunk *uncollectable_others, *uncollectable_atomic_others;
00701 
00702 static int *common_positionses[NUM_COMMON_SIZE];
00703 
00704 #if PROVIDE_MALLOC_AND_FREE
00705 static BlockOfMemory *sys_malloc[2 * NUM_COMMON_SIZE];
00706 static MemoryChunk *sys_malloc_others;
00707 #endif
00708 
00709 #define do_malloc_ATOMIC 0x1
00710 #define do_malloc_UNCOLLECTABLE 0x2
00711 #if NO_ATOMIC
00712 # define do_malloc_ATOMIC_UNLESS_DISABLED 0
00713 #else
00714 # define do_malloc_ATOMIC_UNLESS_DISABLED do_malloc_ATOMIC
00715 #endif
00716 
00717 int GC_dl_entries;
00718 int GC_fo_entries;
00719 
00720 void (*GC_push_last_roots)(void);
00721 void (*GC_push_last_roots_again)(void);
00722 
00723 enum {
00724   dl_normal,
00725   dl_restored,
00726   dl_late
00727 };
00728 
00729 typedef struct DisappearingLink {
00730   short kind;
00731   void *watch;
00732   void **disappear;
00733   void *saved_value;
00734   struct DisappearingLink *prev, *next;
00735 } DisappearingLink;
00736 
00737 typedef struct Finalizer {
00738   union {
00739     void *watch; /* pointer to finalized block; used after queued */
00740     int pos;     /* position within common block; used before queued */
00741   } u;
00742   short eager_level;
00743   short ignore_self;
00744   void *data;
00745   void (*f)(void *p, void *data);
00746   struct Finalizer *prev, *next; /* also not needed for chunks */
00747 } Finalizer;
00748 
00749 static DisappearingLink *disappearing, *late_disappearing;
00750 static Finalizer *queued_finalizers, *last_queued_finalizer;
00751 static int num_queued_finalizers;
00752 
00753 static unsigned long sector_low_plausible, sector_high_plausible;
00754 static unsigned long low_plausible, high_plausible;
00755 
00756 void *GC_stackbottom;
00757 
00758 static long mem_use, mem_limit = FIRST_GC_LIMIT;
00759 #if USE_GC_FREE_SPACE_DIVISOR
00760 int GC_free_space_divisor = 4;
00761 #endif
00762 
00763 static long mem_real_use, mem_uncollectable_use;
00764 
00765 static long sector_mem_use, sector_admin_mem_use, sector_free_mem_use;
00766 static long manage_mem_use, manage_real_mem_use;
00767 
00768 static long collect_mem_use;
00769 
00770 static long num_sector_allocs, num_sector_frees;
00771 
00772 #if ALLOW_TRACE_COUNT
00773 static long mem_traced;
00774 #endif
00775 
00776 static long num_chunks;
00777 static long num_blocks;
00778 
00779 GC_collect_start_callback_Proc GC_collect_start_callback;
00780 GC_collect_end_callback_Proc GC_collect_end_callback;
00781 void (*GC_custom_finalize)(void);
00782 
00783 GC_collect_start_callback_Proc GC_set_collect_start_callback(GC_collect_start_callback_Proc func) {
00784   GC_collect_start_callback_Proc old;
00785   old = GC_collect_start_callback;
00786   GC_collect_start_callback = func;
00787   return old;
00788 }
00789 GC_collect_end_callback_Proc GC_set_collect_end_callback(GC_collect_end_callback_Proc func) {
00790   GC_collect_end_callback_Proc old;
00791   old = GC_collect_end_callback;
00792   GC_collect_end_callback = func;
00793   return old;
00794 }
00795 
00796 
00797 static long roots_count;
00798 static long roots_size;
00799 static unsigned long *roots;
00800 
00801 #if STAMP_AND_REMEMBER_SOURCE
00802 static long stamp_clock = 0;
00803 #endif
00804 
00805 static long *size_index_map; /* (1/PTR_SIZE)th of requested size to alloc index */
00806 static long *size_map; /* alloc index to alloc size */
00807 
00808 #if CHECK_COLLECTING
00809 static int collecting_now;
00810 #endif
00811 
00812 #if FPRINTF_USE_PRIM_STRINGOUT
00813 static void sgc_fprintf(int, const char *, ...);
00814 # define FPRINTF sgc_fprintf
00815 # define STDERR 0
00816 #else
00817 # define FPRINTF fprintf
00818 # define STDERR stderr
00819 #endif
00820 
00821 #if CHECK_FREES
00822 static void free_error(const char *msg)
00823 {
00824   FPRINTF(STDERR, msg);
00825 }
00826 #endif
00827 
00828 /*************************************************************/
00829 /* Page mapping: */
00830 
00831 #ifdef SIXTY_FOUR_BIT_INTEGERS
00832 static SectorPage ***sector_pagetablesss[1 << 16];
00833 # define DECL_SECTOR_PAGETABLES SectorPage **sector_pagetables;
00834 # define GET_SECTOR_PAGETABLES(p) sector_pagetables = create_sector_pagetables(p)
00835 # define FIND_SECTOR_PAGETABLES(p) sector_pagetables = get_sector_pagetables(p)
00836 static void *malloc_plain_sector(int count);
00837 inline static SectorPage **create_sector_pagetables(unsigned long p) {
00838   unsigned long pos;
00839   SectorPage ***sector_pagetabless, **sector_pagetables;
00840   pos = ((unsigned long)p >> 48) & ((1 << 16) - 1);
00841   sector_pagetabless = sector_pagetablesss[pos];
00842   if (!sector_pagetabless) {
00843     int c = (sizeof(SectorPage **) << 16) >> LOG_SECTOR_SEGMENT_SIZE;
00844     sector_pagetabless = (SectorPage ***)malloc_plain_sector(c);
00845     memset(sector_pagetabless, 0, c << LOG_SECTOR_SEGMENT_SIZE);
00846     sector_pagetablesss[pos] = sector_pagetabless;
00847     sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
00848   }
00849   pos = ((unsigned long)p >> 32) & ((1 << 16) - 1);
00850   sector_pagetables = sector_pagetabless[pos];
00851   if (!sector_pagetables) {
00852     int c = (SECTOR_LOOKUP_PAGESETBITS + LOG_PTR_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
00853     if (c < 0)
00854       c = 0;
00855     c = 1 << c;
00856     sector_pagetables = (SectorPage **)malloc_plain_sector(c);
00857     memset(sector_pagetables, 0, c << LOG_SECTOR_SEGMENT_SIZE);
00858     sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
00859     sector_pagetabless[pos] = sector_pagetables;
00860   }
00861   return sector_pagetables;
00862 }
00863 inline static SectorPage **get_sector_pagetables(unsigned long p) {
00864   unsigned long pos;
00865   SectorPage ***sector_pagetabless;
00866   pos = ((unsigned long)p >> 48) & ((1 << 16) - 1);
00867   sector_pagetabless = sector_pagetablesss[pos];
00868   if (!sector_pagetabless)
00869     return NULL;
00870   pos = ((unsigned long)p >> 32) & ((1 << 16) - 1);
00871   return sector_pagetabless[pos];
00872 }
00873 #else
00874 static SectorPage **sector_pagetables;
00875 # define DECL_SECTOR_PAGETABLES 
00876 # define GET_SECTOR_PAGETABLES(p) 
00877 # define FIND_SECTOR_PAGETABLES(p) 
00878 #endif
00879 
00880 /*************************************************************/
00881 
00882 /* 
00883    The kinds of allocation:
00884    
00885      malloc_sector = returns new SECTOR_SEGMENT_SIZE-aligned memory;
00886                      relies on nothing else; the memeory blocks must
00887                    be explicitly freed with free_sector; all GC
00888                    allocation is perfomed via sectors
00889      
00890      malloc_managed = malloc "atomic" block used by GC implementation
00891                       itself; no GCing should occur during the malloc;
00892                     the block is freed with free_managed
00893 
00894      realloc_collect_temp = temporary structures used during gc;
00895                             no other allocation can take place
00896                          during gc, and all memory will be freed
00897                             when GC is done with free_collect_temp
00898 */
00899 
00900 #if GET_MEM_VIA_SBRK
00901 static void *platform_plain_sector(int count)
00902 {
00903   caddr_t cur_brk = (caddr_t)sbrk(0);
00904   long lsbs = (unsigned long)cur_brk & TABLE_LO_MASK;
00905   void *result;
00906     
00907   if (lsbs != 0) {
00908     if ((caddr_t)sbrk(SECTOR_SEGMENT_SIZE - lsbs) == (caddr_t)(-1)) 
00909       return 0;
00910   }
00911 
00912   result = (caddr_t)sbrk((count << LOG_SECTOR_SEGMENT_SIZE));
00913 
00914   if (result == (caddr_t)(-1)) 
00915     return 0;
00916 
00917   return result;
00918 }
00919 #endif
00920 #if GET_MEM_VIA_MMAP
00921 static void *platform_plain_sector(int count)
00922 {
00923 #ifdef MAP_ANON
00924   return mmap(NULL, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
00925 #else
00926   static int fd;
00927 
00928   if (!fd) {
00929     fd = open("/dev/zero", O_RDWR);
00930   }
00931   
00932   return mmap(0, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
00933 #endif
00934 }
00935 
00936 static void free_plain_sector(void *p, int count)
00937 {
00938   munmap(p, count << LOG_SECTOR_SEGMENT_SIZE);
00939 }
00940 #endif
00941 #if GET_MEM_VIA_VIRTUAL_ALLOC
00942 static void *platform_plain_sector(int count)
00943 {
00944   /* Since 64k blocks are used up by each call to VirtualAlloc,
00945      use roughly the same trick as in the malloc-based alloc to
00946      avoid wasting ther address space. */
00947 
00948   static int prealloced;
00949   static void *preallocptr;
00950   
00951   if (!prealloced && (count < SECTOR_SEGMENT_GROUP_SIZE)) {
00952     prealloced = SECTOR_SEGMENT_GROUP_SIZE;
00953     preallocptr = VirtualAlloc(NULL, prealloced << LOG_SECTOR_SEGMENT_SIZE,
00954                             MEM_COMMIT | MEM_RESERVE,
00955                             PAGE_READWRITE);            
00956   }
00957   
00958   if (count <= prealloced) {
00959     void *result = preallocptr;
00960     preallocptr = ((char *)preallocptr) + (count << LOG_SECTOR_SEGMENT_SIZE);
00961     prealloced -= count;
00962     return result;
00963   }
00964   
00965   return VirtualAlloc(NULL, count << LOG_SECTOR_SEGMENT_SIZE,
00966                     MEM_COMMIT | MEM_RESERVE,
00967                     PAGE_READWRITE);
00968 }
00969 #endif
00970 
00971 #if !GET_MEM_VIA_SBRK && !GET_MEM_VIA_MMAP && !GET_MEM_VIA_VIRTUAL_ALLOC
00972 # if PROVIDE_MALLOC_AND_FREE
00973   >> 
00974   Error: you must pick a platform-specific allocation mechanism
00975   to support malloc() and free() 
00976   <<
00977 # endif
00978 
00979 static void *platform_plain_sector(int count)
00980 {
00981   static int prealloced;
00982   static void *preallocptr;
00983 
00984   if (!prealloced) {
00985     unsigned long d;
00986 
00987     if (count <= (SECTOR_SEGMENT_GROUP_SIZE-1))
00988       prealloced = SECTOR_SEGMENT_GROUP_SIZE-1;
00989     else
00990       prealloced = count;
00991 
00992     preallocptr = MALLOC((prealloced + 1) << LOG_SECTOR_SEGMENT_SIZE);
00993 
00994     d = ((unsigned long)preallocptr) & TABLE_LO_MASK;
00995     if (d)
00996       preallocptr = ((char *)preallocptr) + (SECTOR_SEGMENT_SIZE - d);
00997   }
00998 
00999   if (prealloced >= count) {
01000     void *r = preallocptr;
01001 
01002     prealloced -= count;
01003     preallocptr = ((char *)preallocptr) + (count << LOG_SECTOR_SEGMENT_SIZE);
01004     
01005     return r;
01006   }
01007 
01008   {
01009     unsigned long d;
01010     void *r;
01011 
01012     r = MALLOC((count + 1) << LOG_SECTOR_SEGMENT_SIZE);
01013 
01014     d = ((unsigned long)r) & TABLE_LO_MASK;
01015     if (d)
01016       r = ((char *)r) + (SECTOR_SEGMENT_SIZE - d);
01017 
01018     return r;
01019   }
01020 }
01021 #endif
01022 
01023 static void *malloc_plain_sector(int count)
01024 {
01025   void *m;
01026 
01027   m = platform_plain_sector(count);
01028 
01029   if (!m) {
01030     if (GC_out_of_memory)
01031       GC_out_of_memory();
01032     FPRINTF(STDERR, "out of memory\n");
01033     exit(-1);
01034   }
01035 
01036   return m;
01037 }
01038 
01039 static void register_sector(void *naya, int need, long kind)
01040 {
01041   unsigned long ns, orig_ns;
01042   int pagetableindex, pageindex, i;
01043   SectorPage *pagetable;
01044   DECL_SECTOR_PAGETABLES;
01045 
01046   orig_ns = ns = PTR_TO_INT(naya);
01047   if (!sector_low_plausible || (ns < sector_low_plausible))
01048     sector_low_plausible = ns;
01049   if (!sector_high_plausible 
01050       || (ns + (need << LOG_SECTOR_SEGMENT_SIZE) > sector_high_plausible))
01051     sector_high_plausible = ns + (need << LOG_SECTOR_SEGMENT_SIZE);
01052 
01053   /* Register pages as existing: */
01054   for (i = need; i--; ns += SECTOR_SEGMENT_SIZE) {
01055     GET_SECTOR_PAGETABLES(ns);
01056     pagetableindex = SECTOR_LOOKUP_PAGETABLE(ns);
01057     pagetable = sector_pagetables[pagetableindex];
01058     if (!pagetable) {
01059       int j, c = (LOG_SECTOR_LOOKUP_PAGESIZE + LOG_SECTOR_PAGEREC_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
01060       if (c < 0)
01061         c = 0;
01062       c = 1 << c;
01063       pagetable = (SectorPage *)malloc_plain_sector(c);
01064       sector_pagetables[pagetableindex] = pagetable;
01065       sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
01066       for (j = 0; j < SECTOR_LOOKUP_PAGESIZE; j++) {
01067        pagetable[j].start = 0; 
01068        pagetable[j].kind = sector_kind_free;
01069       }
01070     }
01071 
01072     pageindex = SECTOR_LOOKUP_PAGEPOS(ns);
01073     pagetable[pageindex].kind = kind;
01074     pagetable[pageindex].start = orig_ns;
01075   }
01076 }
01077 
01078 static void *malloc_sector(long size, long kind, int no_new)
01079 {
01080   long need;
01081   void *naya;
01082 #if !RELEASE_UNUSED_SECTORS
01083   SectorFreepage *fp;
01084 #endif
01085 
01086 #if CHECK_COLLECTING
01087   if (collecting_now) {
01088     free_error("alloc while collecting\n");
01089     return NULL;
01090   }
01091 #endif
01092 
01093   num_sector_allocs++;
01094 
01095 #ifndef SIXTY_FOUR_BIT_INTEGERS
01096   if (!sector_pagetables) {
01097     int i, c = (SECTOR_LOOKUP_PAGESETBITS + LOG_PTR_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
01098     if (c < 0)
01099       c = 0;
01100     c = 1 << c;
01101     sector_pagetables = (SectorPage **)malloc_plain_sector(c);
01102     sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
01103     for (i = 0; i < (1 << SECTOR_LOOKUP_PAGESETBITS); i++)
01104       sector_pagetables[i] = NULL;
01105   }
01106 #endif
01107 
01108   need = (size + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE;
01109 
01110 #if !RELEASE_UNUSED_SECTORS
01111   if (sector_freepage_by_size) {
01112     sector_freepage_by_size = splay(need, sector_freepage_by_size);
01113     if (TREE_FP(sector_freepage_by_size)->size < need) {
01114       /* No exact match, so find the next size up */
01115       Tree *node;
01116       node = next(sector_freepage_by_size);
01117       if (node)
01118        fp = TREE_FP(node);
01119       else
01120        fp = NULL;
01121     } else 
01122       fp = TREE_FP(sector_freepage_by_size);
01123   } else
01124     fp = NULL;
01125   
01126   if (fp) {
01127     remove_freepage(fp);
01128 
01129     naya = INT_TO_PTR(fp->start);
01130     register_sector(naya, need, kind);
01131     if (fp->size > need) {
01132       /* Move freepage info and shrink */
01133       SectorFreepage *naya;
01134       unsigned long nfp;
01135       nfp = fp->start + (need << LOG_SECTOR_SEGMENT_SIZE);
01136       naya = (SectorFreepage *)INT_TO_PTR(nfp);
01137       naya->size = fp->size - need;
01138       naya->start = nfp;
01139       naya->end = fp->end;
01140 
01141       add_freepage(naya);
01142     }
01143 
01144     sector_free_mem_use -= (need << LOG_SECTOR_SEGMENT_SIZE);
01145     return naya;
01146   }
01147 #endif
01148 
01149   if (no_new)
01150     return NULL;
01151 
01152   naya = malloc_plain_sector(need);
01153   sector_mem_use += (need << LOG_SECTOR_SEGMENT_SIZE);
01154   register_sector(naya, need, kind);
01155 
01156   return naya;
01157 }
01158 
01159 static void free_sector(void *p)
01160 {
01161   unsigned long s = PTR_TO_INT(p), t;
01162   int c = 0;
01163 #if !RELEASE_UNUSED_SECTORS
01164   SectorFreepage *fp, *ifp;
01165 #endif
01166 
01167   num_sector_frees++;
01168   
01169   /* Determine the size: */
01170   t = s;
01171   while(1) {
01172     DECL_SECTOR_PAGETABLES;
01173     long pagetableindex = SECTOR_LOOKUP_PAGETABLE(t);
01174     long pageindex = SECTOR_LOOKUP_PAGEPOS(t);
01175     GET_SECTOR_PAGETABLES(t);
01176     if (sector_pagetables[pagetableindex]
01177        && (sector_pagetables[pagetableindex][pageindex].start == s)) {
01178       sector_pagetables[pagetableindex][pageindex].kind = sector_kind_freed;
01179       sector_pagetables[pagetableindex][pageindex].start = 0;
01180       c++;
01181       t += SECTOR_SEGMENT_SIZE;
01182     } else
01183       break;
01184   }
01185 
01186 #if CHECK_FREES
01187   if (!c) {
01188     free_error("bad sector free!\n");
01189     return;
01190   }
01191 #endif
01192 
01193 #if RELEASE_UNUSED_SECTORS
01194   free_plain_sector(p, c);
01195   sector_mem_use -= (c << LOG_SECTOR_SEGMENT_SIZE);
01196 #else
01197   sector_free_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
01198   if (sector_freepage_by_end) {
01199     /* Try merge with a predecessor: */
01200     sector_freepage_by_end = splay(s, sector_freepage_by_end);
01201     ifp = TREE_FP(sector_freepage_by_end);
01202     if (ifp->end == s) {
01203       remove_freepage(ifp);
01204       c += ifp->size;
01205       s = ifp->start;
01206     }
01207     
01208     if (sector_freepage_by_start) {
01209       /* Try merge with a successor: */
01210       sector_freepage_by_start = splay(t, sector_freepage_by_start);
01211       ifp = TREE_FP(sector_freepage_by_start);
01212       if (ifp->start == t) {
01213        remove_freepage(ifp);
01214        c += ifp->size;
01215        t = ifp->end;
01216       }
01217     }
01218   }
01219   
01220   fp = (SectorFreepage *)p;
01221   fp->start = s;
01222   fp->end = t;
01223   fp->size = c;
01224   add_freepage(fp);
01225 #endif
01226 }
01227 
01228 #ifdef WIN32
01229 static int is_sector_segment(void *p)
01230 {
01231   DECL_SECTOR_PAGETABLES;
01232   unsigned long s = PTR_TO_INT(p);
01233   long pagetableindex = SECTOR_LOOKUP_PAGETABLE(s);
01234   long pageindex = SECTOR_LOOKUP_PAGEPOS(s);
01235 
01236   FIND_SECTOR_PAGETABLES(p);
01237   if (!sector_pagetables) return 0;
01238 
01239   return (sector_pagetables[pagetableindex]
01240           && sector_pagetables[pagetableindex][pageindex].start);
01241 }
01242 #endif
01243 
01244 #if GET_MEM_VIA_SBRK
01245 static int c_refcount;
01246 static char *save_brk;
01247 #endif
01248 
01249 static void prepare_collect_temp()
01250 {
01251 #if GET_MEM_VIA_SBRK
01252   save_brk = (char *)sbrk(0);
01253 #else
01254   collect_mem_use = 0;
01255 #endif
01256 }
01257 
01258 static void *realloc_collect_temp(void *v, long oldsize, long newsize)
01259 {
01260 #if GET_MEM_VIA_SBRK
01261   void *naya;
01262 
01263   naya = (void *)sbrk(newsize);
01264   memcpy(naya, v, oldsize);
01265   if (!v)
01266     c_refcount++;
01267   return naya;
01268 #elif GET_MEM_VIA_MMAP
01269   void *naya;
01270   
01271   naya = platform_plain_sector((newsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
01272   memcpy(naya, v, oldsize);
01273   if (v)
01274     munmap(v, (oldsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
01275 
01276   return naya;
01277 #elif GET_MEM_VIA_VIRTUAL_ALLOC
01278   void *naya;
01279 
01280   naya = VirtualAlloc(NULL, newsize, 
01281                     MEM_COMMIT | MEM_RESERVE,
01282                     PAGE_READWRITE);
01283   memcpy(naya, v, oldsize);
01284   if (v)
01285     VirtualFree(v, 0, MEM_RELEASE);
01286 
01287   return naya;
01288 #else
01289   void *naya;
01290 
01291   naya = MALLOC(newsize);
01292   memcpy(naya, v, oldsize);
01293   FREE(v);
01294   collect_mem_use += newsize;
01295   return naya;
01296 #endif
01297 }
01298 
01299 static void free_collect_temp(void *v, long oldsize)
01300 {
01301 #if GET_MEM_VIA_SBRK
01302   if (!(--c_refcount)) {
01303     collect_mem_use = (unsigned long)(sbrk(0)) - (unsigned long)save_brk;
01304     brk(save_brk);
01305   }
01306 #elif GET_MEM_VIA_MMAP
01307   munmap(v, (oldsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
01308 #elif GET_MEM_VIA_VIRTUAL_ALLOC
01309   VirtualFree(v, 0, MEM_RELEASE);
01310 #else
01311   FREE(v);
01312 #endif
01313 }
01314 
01315 typedef struct {
01316   struct ManagedBlock *next;
01317   struct ManagedBlock *prev;
01318   long count;
01319   long size; /* Use size to find bucket */
01320   unsigned long end;
01321 } ManagedBlockHeader;
01322 
01323 typedef struct ManagedBlock {
01324   ManagedBlockHeader head;
01325   char free[1];
01326 } ManagedBlock;
01327 
01328 typedef struct {
01329   long size;
01330   long perblock;
01331   long offset;
01332   ManagedBlock *block;
01333 } ManagedBucket;
01334 
01335 typedef struct {
01336   int num_buckets;
01337   ManagedBucket buckets[1];
01338 } Managed;
01339 
01340 static Managed *managed;
01341 
01342 static void *malloc_managed(long size)
01343 {
01344   /* A naive strategy is sufficient here.
01345      There will be many disappearing links, many
01346      finalizations, and very little of anything else. */
01347   int i, j;
01348   long perblock, offset;
01349   ManagedBlock *mb;
01350   
01351   if (size & PTR_SIZE)
01352     size += PTR_SIZE - (size & PTR_SIZE);
01353 
01354   if (!managed) {
01355     managed = (Managed *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_other, 0);
01356     managed->num_buckets = 0;
01357     manage_real_mem_use += SECTOR_SEGMENT_SIZE;
01358   }
01359 
01360   for (i = 0; i < managed->num_buckets; i++) {
01361     if (managed->buckets[i].size == size)
01362       break;
01363   }
01364 
01365   if (i >= managed->num_buckets) {
01366     managed->num_buckets++;
01367     managed->buckets[i].size = size;
01368     if (size < MAX_COMMON_SIZE) {
01369       int c;
01370 
01371       mb = (ManagedBlock *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_managed, 0);
01372       manage_real_mem_use += SECTOR_SEGMENT_SIZE;
01373       managed->buckets[i].block = mb;
01374 
01375       c = (SECTOR_SEGMENT_SIZE - sizeof(ManagedBlockHeader)) / size;
01376       if (c & (PTR_SIZE - 1))
01377        c += (PTR_SIZE - (c & (PTR_SIZE - 1)));
01378       managed->buckets[i].perblock = (SECTOR_SEGMENT_SIZE - sizeof(ManagedBlockHeader) - c) / size;
01379       managed->buckets[i].offset = c + sizeof(ManagedBlockHeader);
01380     } else {
01381       long l = size + sizeof(ManagedBlockHeader) + PTR_SIZE;
01382       mb = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
01383       manage_real_mem_use += l;
01384       managed->buckets[i].block = mb;
01385       managed->buckets[i].perblock = 1;
01386       managed->buckets[i].offset = sizeof(ManagedBlockHeader) + PTR_SIZE;
01387     }
01388     mb->head.count = 0;
01389     mb->head.size = size;
01390     mb->head.next = NULL;
01391     mb->head.prev = NULL;
01392     perblock = managed->buckets[i].perblock;
01393     for (j = perblock; j--; )
01394       mb->free[j] = 1;
01395     mb->head.end = PTR_TO_INT(mb) + managed->buckets[i].offset + size * perblock;
01396   }
01397 
01398   perblock = managed->buckets[i].perblock;
01399   offset = managed->buckets[i].offset;
01400   mb = managed->buckets[i].block;
01401   while ((mb->head.count == perblock) && mb->head.next)
01402     mb = mb->head.next;
01403   if (mb->head.count == perblock) {
01404     long l = offset + size * perblock;
01405     mb->head.next = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
01406     manage_real_mem_use += l;
01407     mb->head.next->head.prev = mb;
01408     mb = mb->head.next;
01409     mb->head.count = 0;
01410     mb->head.size = size;
01411     mb->head.next = NULL;
01412     for (j = perblock; j--; )
01413       mb->free[j] = 1;
01414     mb->head.end = PTR_TO_INT(mb) + offset + size * perblock;
01415   }
01416 
01417   manage_mem_use += size;
01418 
01419   mb->head.count++;
01420   for (j = perblock; j--; )
01421     if (mb->free[j]) {
01422       mb->free[j] = 0;
01423       return (((char *)mb) + offset) + size * j;
01424     }
01425 
01426   FPRINTF(STDERR, "error allocating managed\n");
01427   return NULL;
01428 }
01429 
01430 static void free_managed(void *s)
01431 {
01432   int i;
01433   unsigned long p;
01434   ManagedBucket *bucket;
01435   ManagedBlock *mb;
01436 
01437   p = PTR_TO_INT(s);
01438 
01439   /* Assume that s really is an allocated managed pointer: */
01440   mb = (ManagedBlock *)INT_TO_PTR((p & SECTOR_SEGMENT_MASK));
01441   
01442   for (i = 0; i < managed->num_buckets; i++) {
01443     bucket = managed->buckets + i;
01444     if (bucket->size == mb->head.size) {
01445       /* Found bucket */
01446       int which;
01447       which = (p - PTR_TO_INT(mb) - bucket->offset) / bucket->size;
01448       if ((which >= 0) && (which < bucket->perblock)) {
01449        if (mb->free[which]) {
01450          FPRINTF(STDERR, "error freeing managed\n");
01451          return;
01452        }
01453        mb->free[which] = 1;
01454        --mb->head.count;
01455        manage_mem_use -= bucket->size;
01456        if (!mb->head.count) {
01457          if (mb->head.prev) {
01458            if (mb->head.next)
01459              mb->head.next->head.prev = mb->head.prev;
01460            mb->head.prev->head.next = mb->head.next;
01461          } else {
01462            if (mb->head.next) {
01463              bucket->block = mb->head.next;
01464              bucket->block->head.prev = NULL;
01465            } else {
01466              /* Empty bucket */
01467              int j;
01468              --managed->num_buckets;
01469              for (j = i; j < managed->num_buckets; j++)
01470               memcpy(&(managed->buckets[j]), &(managed->buckets[j + 1]), sizeof(ManagedBucket));
01471            }
01472          }
01473 
01474          manage_real_mem_use -= (bucket->offset + bucket->size * bucket->perblock);
01475 
01476          free_sector(mb);
01477        }
01478        return;
01479       }
01480     }
01481   }
01482   
01483   FPRINTF(STDERR, "error freeing managed\n");
01484 }
01485 
01486 /*************************************************************/
01487 
01488 static void init_size_map()
01489 {
01490   int i, j, find_half;
01491   long k, next;
01492 
01493   size_index_map = (long *)malloc_sector(MAX_COMMON_SIZE, sector_kind_other, 0);
01494   size_map = (long *)malloc_sector(NUM_COMMON_SIZE * sizeof(long), sector_kind_other, 0);
01495 
01496   /* This is two loops instead of one to avoid a gcc 2.92.2 -O2 x86 bug: */
01497   for (i = 0; i < 8; i++) {
01498     size_index_map[i] = i;
01499   }
01500   for (i = 0; i < 8; i++) {
01501     size_map[i] = (i + 1) * PTR_SIZE;
01502   }
01503   /* i's final value is used below... */
01504 
01505   k = 8;
01506   next = 12;
01507   j = i;
01508   find_half = 1;
01509   while (j < (MAX_COMMON_SIZE >> 2)) {
01510     size_index_map[j] = i;
01511     if ((j + 1) == next) {
01512       size_map[i] = next * PTR_SIZE;
01513       i++;
01514       if (find_half) {
01515        next = 2 * k;
01516       } else {
01517        next = 3 * k;
01518        k = 2 * k;
01519       }
01520       find_half = !find_half;
01521     }
01522     j++;
01523   }
01524   if (i < NUM_COMMON_SIZE)
01525     size_map[i] = next * PTR_SIZE;
01526 
01527 #if 0
01528   FPRINTF(STDERR, "max: %d  num: %d\n", MAX_COMMON_SIZE, NUM_COMMON_SIZE);
01529   for (i = 0; i < (MAX_COMMON_SIZE >> 2); i++) {
01530     FPRINTF(STDERR, "%d->%d=%d;", i, 
01531            size_index_map[i], 
01532            size_map[size_index_map[i]]);
01533   }
01534   FPRINTF(STDERR, "\n");
01535 #endif
01536 }
01537 
01538 /*************************************************************/
01539 
01540 void GC_add_roots(void *start, void *end)
01541 {
01542   if (roots_count >= roots_size) {
01543     unsigned long *naya;
01544 
01545     mem_real_use -= (sizeof(unsigned long) * roots_size);
01546 
01547     roots_size = roots_size ? 2 * roots_size : 500;
01548     naya = (unsigned long *)malloc_managed(sizeof(unsigned long) * (roots_size + 1));
01549 
01550     mem_real_use += (sizeof(unsigned long) * roots_size);
01551 
01552     memcpy((void *)naya, (void *)roots, 
01553           sizeof(unsigned long) * roots_count);
01554 
01555     if (roots)
01556       free_managed(roots);
01557 
01558     roots = naya;
01559   }
01560 
01561   roots[roots_count++] = PTR_TO_INT(start);
01562   roots[roots_count++] = PTR_TO_INT(end) - PTR_ALIGNMENT;
01563 }
01564 
01565 #if AUTO_STATIC_ROOTS_IF_POSSIBLE
01566 # include "autostat.inc"
01567 #endif
01568 
01569 static int statics_setup = 0;
01570 
01571 static void init_static_variables(void)
01572 {
01573 #if AUTO_STATIC_ROOTS_IF_POSSIBLE
01574 # if USE_DATASTARTEND
01575   GC_add_roots(DATASTART, DATAEND);
01576 # endif
01577 # ifdef WIN32
01578   register_static_variables();
01579 # endif
01580 #endif
01581 
01582   statics_setup = 1;
01583 }
01584 
01585 static int initialized = 0;
01586 
01587 static void GC_initialize(void)
01588 {
01589   int i;
01590 
01591 #if PROVIDE_MALLOC_AND_FREE
01592   num_common_sets = 5;
01593 #else
01594   num_common_sets = 4;
01595 #endif
01596   common_sets = (GC_Set **)malloc_managed(sizeof(GC_Set*) * num_common_sets);
01597 
01598   common_sets[0] = (GC_Set *)malloc_managed(sizeof(GC_Set));
01599   common_sets[0]->atomic = 0;
01600   common_sets[0]->uncollectable = 0;
01601   common_sets[0]->blocks = common;
01602   common_sets[0]->block_ends = common + NUM_COMMON_SIZE;
01603   common_sets[0]->othersptr = &others;
01604 
01605   common_sets[1] = (GC_Set *)malloc_managed(sizeof(GC_Set));
01606   common_sets[1]->atomic = !NO_ATOMIC;
01607   common_sets[1]->uncollectable = 0;
01608   common_sets[1]->blocks = atomic_common;
01609   common_sets[1]->block_ends = atomic_common + NUM_COMMON_SIZE;
01610   common_sets[1]->othersptr = &atomic_others;
01611 
01612   common_sets[2] = (GC_Set *)malloc_managed(sizeof(GC_Set));
01613   common_sets[2]->atomic = 0;
01614   common_sets[2]->uncollectable = 1;
01615   common_sets[2]->blocks = uncollectable_common;
01616   common_sets[2]->block_ends = uncollectable_common + NUM_COMMON_SIZE;
01617   common_sets[2]->othersptr = &uncollectable_others;
01618 
01619   common_sets[3] = (GC_Set *)malloc_managed(sizeof(GC_Set));
01620   common_sets[3]->atomic = !NO_ATOMIC;
01621   common_sets[3]->uncollectable = 1;
01622   common_sets[3]->blocks = uncollectable_atomic_common;
01623   common_sets[3]->block_ends = uncollectable_atomic_common + NUM_COMMON_SIZE;
01624   common_sets[3]->othersptr = &uncollectable_atomic_others;
01625 
01626 #if PROVIDE_MALLOC_AND_FREE
01627   common_sets[4] = (GC_Set *)malloc_managed(sizeof(GC_Set));
01628   common_sets[4]->atomic = 1;
01629   common_sets[4]->uncollectable = 1;
01630   common_sets[4]->blocks = sys_malloc;
01631   common_sets[4]->block_ends = sys_malloc + NUM_COMMON_SIZE;
01632   common_sets[4]->othersptr = &sys_malloc_others;
01633 #endif
01634 
01635   for (i = 0; i < num_common_sets; i++) {
01636     common_sets[i]->name = "Basic";
01637 #if ALLOW_SET_LOCKING
01638     common_sets[i]->locked = 0;
01639 #endif
01640 #if KEEP_SET_NO || KEEP_CHUNK_SET_NO
01641     common_sets[i]->no = i;
01642 #endif
01643 #if ALLOW_TRACE_COUNT
01644     common_sets[i]->count_tracer = NULL;
01645 #endif
01646 #if ALLOW_TRACE_PATH
01647     common_sets[i]->path_tracer = NULL;
01648 #endif
01649 #if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
01650     common_sets[i]->trace_init = NULL;
01651     common_sets[i]->trace_done = NULL;
01652 #endif
01653 #if ALLOW_SET_FINALIZER
01654     common_sets[i]->finalizer = NULL;
01655 #endif    
01656   }
01657 
01658 #if PROVIDE_MALLOC_AND_FREE
01659   common_sets[4]->name = "Sysmalloc";
01660 #endif
01661 
01662   initialized = 1;
01663 }
01664 
01665 void GC_set_stack_base(void *base)
01666 {
01667   GC_stackbottom = base;
01668 }
01669 
01670 void *GC_get_stack_base(void)
01671 {
01672   return GC_stackbottom;
01673 }
01674 
01675 static void *find_ptr(void *d, int *_size,
01676                     BlockOfMemory **_block, int *_pos,
01677                     MemoryChunk **_chunk,
01678                     int find_anyway)
01679 {
01680   unsigned long p = PTR_TO_INT(d);
01681   DECL_SECTOR_PAGETABLES;
01682 
01683   FIND_SECTOR_PAGETABLES(p);
01684 
01685   if (!sector_pagetables)
01686     return NULL;
01687 
01688   if (p >= low_plausible && p < high_plausible) {
01689     SectorPage *pagetable = sector_pagetables[SECTOR_LOOKUP_PAGETABLE(p)];
01690     if (pagetable) {
01691       SectorPage *page = pagetable + SECTOR_LOOKUP_PAGEPOS(p);
01692       long kind = page->kind;
01693 
01694       if (kind == sector_kind_block) {
01695        /* Found common block: */
01696        BlockOfMemory *block = (BlockOfMemory *)INT_TO_PTR(page->start);
01697        if (p >= block->start && p < block->top) {
01698          int size = block->size;
01699          int diff = p - block->start;
01700          int pos = (diff / size), apos;
01701          int bit;
01702          unsigned long result;
01703          
01704          apos = POS_TO_UNMARK_INDEX(pos);
01705          bit = POS_TO_UNMARK_BIT(pos);
01706          
01707          if (_size)
01708            *_size = size;
01709          
01710          if (NOT_MARKED(block->free[apos] & bit) && !find_anyway)
01711            return NULL;
01712          
01713          result = block->start + (pos * size);
01714          
01715          if (_block)
01716            *_block = block;
01717          if (_pos)
01718            *_pos = pos;
01719          
01720          return INT_TO_PTR(result);
01721        }
01722       } else if (kind == sector_kind_chunk) {
01723        MemoryChunk *c = (MemoryChunk *)INT_TO_PTR(page->start);
01724        if ((p >= c->start) && (p < c->end)) {
01725          if (_size)
01726            *_size = (c->end - c->start);
01727          if (c->marked || find_anyway) {
01728            if (_chunk)
01729              *_chunk = c;
01730            return INT_TO_PTR(c->start);
01731          } else
01732            return NULL;
01733        }
01734       }
01735     }
01736   }
01737 
01738   return NULL;
01739 }
01740 
01741 void *GC_base(void *d)
01742 {
01743   void *p;
01744 
01745   p = find_ptr(d, NULL, NULL, NULL, NULL, 0);
01746 
01747 #if PAD_BOUNDARY_BYTES
01748   if (p)
01749     p = PAD_FORWARD(p);
01750 #endif
01751   
01752   return p;
01753 }
01754 
01755 int GC_size(void *d);
01756 int GC_is_atomic(void *d);
01757 int GC_orig_size(void *d);
01758 void *GC_orig_base(void *d);
01759 
01760 int GC_size(void *d)
01761 {
01762   int size;
01763   
01764   if (find_ptr(d, &size, NULL, NULL, NULL, 0)) {
01765 #if PAD_BOUNDARY_BYTES
01766     size -= PAD_START_SIZE + PAD_END_SIZE;
01767 #endif
01768     return size;
01769   } else
01770     return 0;
01771 }
01772 
01773 int GC_is_atomic(void *d)
01774 {
01775   BlockOfMemory *block = NULL;
01776   MemoryChunk *chunk = NULL;
01777   
01778   if (find_ptr(d, NULL, &block, NULL, &chunk, 0)) {
01779     if (block)
01780       return block->atomic;
01781     else
01782       return chunk->atomic;
01783   } else
01784     return 0;
01785 }
01786 
01787 int GC_orig_size(void *d)
01788 {
01789   int size = 0;
01790   
01791   find_ptr(d, &size, NULL, NULL, NULL, 0);
01792   return size;
01793 }
01794 
01795 void *GC_orig_base(void *d)
01796 {
01797   return find_ptr(d, NULL, NULL, NULL, NULL, 1);
01798 }
01799 
01800 GC_Set *GC_set(void *d)
01801 {
01802 #if KEEP_SET_NO
01803   BlockOfMemory *block = NULL;
01804   MemoryChunk *chunk = NULL;
01805   
01806   if (!initialized)
01807     GC_initialize();
01808 
01809   if (find_ptr(d, NULL, &block, NULL, &chunk, 0)) {
01810     int set_no;
01811     if (block)
01812       set_no = block->set_no;
01813     else
01814       set_no = chunk->set_no;
01815 
01816     return common_sets[set_no];
01817   } else
01818     return NULL;
01819 #else
01820   return NULL;
01821 #endif
01822 }
01823 
01824 #if DUMP_BLOCK_MAPS
01825 static unsigned long trace_stack_start, trace_stack_end, trace_reg_start, trace_reg_end;
01826 #endif
01827 
01828 #if DUMP_SECTOR_MAP
01829 static void dump_sector_map(char *prefix)
01830 {
01831   FPRINTF(STDERR, "%sBegin Sectors\n"
01832          "%sO0:free; ,.:block; =-:chunk; mn:other; \"':other; %d each\n%s",
01833          prefix, prefix, SECTOR_SEGMENT_SIZE, prefix);
01834   {
01835     int i, j;
01836     int c = 0;
01837     unsigned long was_sec = 0;
01838     int was_kind = 0;
01839 
01840     for (i = 0; i < (1 << SECTOR_LOOKUP_PAGESETBITS); i++) {
01841       SectorPage *pagetable;
01842       pagetable = sector_pagetables[i];
01843       if (pagetable) {
01844        for (j = 0; j < SECTOR_LOOKUP_PAGESIZE; j++) {
01845          long kind;
01846          kind = pagetable[j].kind;
01847          if (kind != sector_kind_free) {
01848            char *same_sec, *diff_sec;
01849 
01850            if (c++ > 40) {
01851              FPRINTF(STDERR, "\n%s", prefix);
01852              c = 1;
01853            }
01854 
01855            switch(kind) {
01856 #if !RELEASE_UNUSED_SECTORS
01857            case sector_kind_freed:
01858              same_sec = "0";
01859              diff_sec = "O";
01860              break;
01861 #endif
01862            case sector_kind_block:
01863              same_sec = ".";
01864              diff_sec = ",";
01865              break;
01866            case sector_kind_chunk:
01867              same_sec = "-";
01868              diff_sec = "=";
01869              break;
01870            case sector_kind_managed:
01871              same_sec = "n";
01872              diff_sec = "m";
01873              break;
01874            case sector_kind_other:
01875              same_sec = "'";
01876              diff_sec = "\"";
01877              break;
01878            default:
01879              same_sec = "?";
01880              diff_sec = "?";
01881              break;
01882            }
01883 
01884            if ((was_kind != kind) || (was_sec != pagetable[j].start))
01885              same_sec = diff_sec;
01886 
01887            FPRINTF(STDERR, same_sec);
01888            
01889            was_kind = kind;
01890            was_sec = pagetable[j].start;
01891          }
01892        }
01893       }
01894     }
01895   }
01896   FPRINTF(STDERR, "\n%sEnd Sectors\n", prefix);
01897 }
01898 #endif
01899 
01900 void GC_dump(void)
01901 {
01902   FPRINTF(STDERR, "Begin Map\n");
01903 
01904   FPRINTF(STDERR,
01905          "allocated: %ld  collectable: %ld  uncollectable: %ld\n"
01906          "including known overhead: %ld  scheduled gc: %ld  last collect depth: %ld\n"
01907          "managed: %ld  managed including overhead: %ld\n"
01908          "sector used: %ld  sector free: %ld  sector total: %ld\n"
01909          "sector range: %ld  sector administration: %ld\n"
01910          "num sector allocs: %ld  num sector frees: %ld\n"
01911          "num disappearing links: %d  num finalizations: %d  queued: %d\n"
01912 #if STAMP_AND_REMEMBER_SOURCE
01913          "current clock: %ld\n"
01914 #endif
01915          , mem_use + mem_uncollectable_use, mem_use, mem_uncollectable_use, 
01916          mem_real_use, mem_limit, collect_mem_use,
01917          manage_mem_use, manage_real_mem_use,
01918          sector_mem_use - sector_free_mem_use, sector_free_mem_use, sector_mem_use,
01919          sector_high_plausible - sector_low_plausible,
01920          sector_admin_mem_use,
01921          num_sector_allocs, num_sector_frees,
01922          GC_dl_entries, GC_fo_entries, num_queued_finalizers
01923 #if STAMP_AND_REMEMBER_SOURCE
01924          , stamp_clock
01925 #endif
01926          );
01927 
01928 #if DUMP_SECTOR_MAP
01929   dump_sector_map("");
01930 #endif
01931 
01932 #if DUMP_BLOCK_COUNTS
01933   {
01934     int i, j;
01935     unsigned long total;
01936     
01937 #if DUMP_BLOCK_MAPS
01938     FPRINTF(STDERR, "roots: ======================================\n");
01939     for (i = 0; i < roots_count; i += 2)
01940       FPRINTF(STDERR, ">%lx-%lx", roots[i], roots[i + 1]);
01941     FPRINTF(STDERR, "\n");
01942 
01943     FPRINTF(STDERR, "stack: ======================================\n");
01944     FPRINTF(STDERR, ">%lx-%lx>%lx-%lx\n",
01945            trace_stack_start, trace_stack_end, trace_reg_start, trace_reg_end);
01946 #endif
01947 
01948     for (j = 0; j < num_common_sets; j++) {
01949       GC_Set *cs = common_sets[j];
01950 
01951       total = 0;
01952 
01953       FPRINTF(STDERR,
01954              "Set: %s [%s/%s]: ======================================\n", 
01955              cs->name,
01956              cs->atomic ? "atomic" : "pointerful",
01957              cs->uncollectable ? "eternal" : "collectable");
01958 
01959       for (i = 0; i < NUM_COMMON_SIZE; i++) {
01960        BlockOfMemory *block;
01961        int counter = 0;
01962 
01963        block = (cs)->blocks[i];
01964 
01965        if (block) {
01966          FPRINTF(STDERR, "%d:", block->size);
01967 
01968 #if DUMP_BLOCK_MAPS
01969          FPRINTF(STDERR, "[%lx]", block->start - (unsigned long)block);
01970 #endif
01971 
01972          while (block) {
01973            int k, size = block->size;
01974 
01975 #if DUMP_BLOCK_MAPS
01976            counter = 0;
01977 #endif
01978 
01979            for (k = (block->top - block->start) / block->size; k-- ; ) {
01980              int bit = POS_TO_UNMARK_BIT(k);
01981              int pos = POS_TO_UNMARK_INDEX(k);
01982              
01983              if (IS_MARKED(block->free[pos] & bit)) {
01984               total += size;
01985               counter++;
01986              }
01987            }
01988 
01989 #if DUMP_BLOCK_MAPS
01990            FPRINTF(STDERR,
01991                   ">%lxx%d"
01992 #if STAMP_AND_REMEMBER_SOURCE
01993                   "@%ld-%ld:%lx-%lx" 
01994 #endif
01995                   , (unsigned long)block, counter
01996 #if STAMP_AND_REMEMBER_SOURCE
01997                   , block->make_time, 
01998                   block->use_time,
01999                   block->low_marker,
02000                   block->high_marker
02001 #endif
02002                   );
02003 #endif
02004            block = block->next;
02005          }
02006 #if DUMP_BLOCK_MAPS
02007          FPRINTF(STDERR, "\n");
02008 #else
02009          FPRINTF(STDERR, "%d;", counter);
02010 #endif
02011        }
02012       }
02013 
02014       /* Print chunks, "sorting" so that same size are printed together: */
02015       {
02016        MemoryChunk *c, *cnext, *first = NULL, *last = NULL, *t, *next, *prev;
02017        int counter = 0;
02018        
02019        for (c = *(cs->othersptr); c; c = cnext) {
02020          unsigned long size = c->end - c->start;
02021          FPRINTF(STDERR, "%ld:", size);
02022 
02023 #if DUMP_BLOCK_MAPS
02024          FPRINTF(STDERR, "[%lx]", c->start - (unsigned long)c);
02025 #endif
02026          
02027          cnext = c->next;
02028          
02029          prev = NULL;
02030          for (t = c; t; t = next) {
02031            next = t->next;
02032            
02033            if (size == (t->end - t->start)) {
02034 #if DUMP_BLOCK_MAPS
02035              FPRINTF(STDERR,
02036                     ">%lx"
02037 #if STAMP_AND_REMEMBER_SOURCE
02038                     "@%ld:%lx" 
02039 #endif
02040                     , (unsigned long)t
02041 #if STAMP_AND_REMEMBER_SOURCE
02042                     , t->make_time,
02043                     t->marker
02044 #endif
02045                     );
02046 #endif
02047              
02048              counter++;
02049 
02050              if (last)
02051               last->next = t;
02052              else
02053               first = t;
02054              last = t;
02055              if (prev)
02056               prev->next = t->next;
02057              if (t == cnext)
02058               cnext = t->next;
02059 
02060              total += size;
02061            } else
02062              prev = t;
02063          }
02064 #if DUMP_BLOCK_MAPS
02065          FPRINTF(STDERR, "\n");
02066 #else
02067          FPRINTF(STDERR, "%d;", counter);
02068          counter = 0;
02069 #endif
02070        }
02071        
02072        if (last)
02073          last->next = NULL;
02074        *(cs->othersptr) = first;
02075       }
02076       cs->total = total;
02077 
02078 #if KEEP_PREV_PTR
02079       /* reset prev pointers: */
02080       {
02081        MemoryChunk *c, **prev_ptr = (cs->othersptr);
02082        for (c = *(cs->othersptr); c; c = c->next) {
02083          c->prev_ptr = prev_ptr;
02084          prev_ptr = &c->next;
02085        }
02086       }
02087 #endif
02088       
02089       FPRINTF(STDERR, "total size: %ld\n", total);
02090     }
02091 
02092     FPRINTF(STDERR, "summary: ======================================\n");
02093     total = 0;
02094     for (j = 0; j < num_common_sets; j++) {
02095       GC_Set *cs = common_sets[j];
02096       FPRINTF(STDERR,
02097              "%12s: %10ld  [%s/%s]\n",
02098              cs->name, cs->total,
02099              cs->atomic ? "atomic" : "pointerful",
02100              cs->uncollectable ? "eternal" : "collectable");
02101       total += cs->total;
02102     }
02103     FPRINTF(STDERR, "%12s: %10ld\n", "total", total);
02104   }
02105 #endif
02106   FPRINTF(STDERR, "End Map\n");
02107 }
02108 
02109 long GC_get_memory_use()
02110 {
02111   return mem_real_use;
02112 }
02113 
02114 void GC_end_stubborn_change(void *p)
02115 {
02116   /* stubborness is not exploited */
02117 }
02118 
02119 static void *zero_ptr;
02120 
02121 #if CHECK_WATCH_FOR_PTR_ALLOC
02122 void *GC_watch_for_ptr = NULL;
02123 #define UNHIDE_WATCH(p) ((void *)~((unsigned long)p))
02124 static int findings;
02125 
02126 #if USE_WATCH_FOUND_FUNC
02127 void GC_found_watch()
02128 {
02129   FPRINTF(STDERR, "found\n");
02130   findings++;
02131 }
02132 #endif
02133 #endif
02134 
02135 #if PAD_BOUNDARY_BYTES
02136 static void set_pad(void *p, long s, long os)
02137 {
02138   long diff;
02139 
02140   /* Set start & end pad: */
02141   *(long*)p = PAD_PATTERN;
02142   *(long*)(((char *)p) + s - PAD_END_SIZE) = PAD_PATTERN;
02143   *(long*)(((char *)p) + s - PAD_END_SIZE + sizeof(long)) = PAD_PATTERN;
02144   
02145   /* Keep difference of given - requested: */
02146   diff = (s - os - PAD_START_SIZE - PAD_END_SIZE);
02147   ((long *)p)[1] = diff;
02148   
02149   if (diff) {
02150     unsigned char *ps = ((unsigned char *)p) + os + PAD_START_SIZE;
02151     while (diff--)
02152       *(ps++) = PAD_FILL_PATTERN;
02153   }
02154 }
02155 #endif
02156 
02157 static void init_positions(int cpos, int size, int num_elems)
02158 {
02159   int num_positions = num_elems << LOG_FREE_BIT_PER_ELEM;
02160   int block_size = size * num_positions;
02161   int num_offsets = block_size >> LOG_PTR_SIZE;
02162   int size_in_ptrs = size >> LOG_PTR_SIZE;
02163   int i, j, pos;
02164   int *positions;
02165 
02166   positions = (int *)malloc_sector(num_offsets * sizeof(int), sector_kind_other, 0);
02167 
02168   for (i = 0, pos = 0, j = 0; i < num_offsets; ) {
02169     positions[i++] = pos;
02170     if (++j == size_in_ptrs) {
02171       pos++;
02172       j = 0;
02173     }
02174   }
02175 
02176   common_positionses[cpos] = positions;
02177 }
02178 
02179 #if ALLOC_STATS
02180 # define ALLOC_STATISTIC(x) x
02181 static int num_allocs_stat;
02182 static int num_nonzero_allocs_stat;
02183 static int num_common_allocs_stat;
02184 static int num_block_alloc_checks_stat;
02185 static int num_block_alloc_nexts_stat;
02186 static int num_block_alloc_second_checks_stat;
02187 static int num_chunk_allocs_stat;
02188 static int num_newblock_allocs_stat;
02189 #else
02190 # define ALLOC_STATISTIC(x) /* empty */
02191 #endif
02192 
02193 #if KEEP_SET_NO || KEEP_CHUNK_SET_NO
02194 #define SET_NO_BACKINFO int set_no,
02195 #define KEEP_SET_INFO_ARG(x) x, 
02196 #else
02197 #define SET_NO_BACKINFO /* empty */
02198 #define KEEP_SET_INFO_ARG(x) /* empty */
02199 #endif
02200 
02201 static void *do_malloc(SET_NO_BACKINFO
02202                      unsigned long size, 
02203                      BlockOfMemory **common,
02204                      MemoryChunk **othersptr,
02205                      int flags)
02206 {
02207   BlockOfMemory **find, *block;
02208   BlockOfMemory **common_ends;
02209   void *s;
02210   long c;
02211   unsigned long p;
02212   long sizeElemBit;
02213   int i, cpos, elem_per_block, extra_alignment;
02214 #if PAD_BOUNDARY_BYTES
02215   long origsize;
02216 #endif
02217 
02218 #if CHECK_COLLECTING
02219   if (collecting_now) {
02220     exit(-1);
02221   }
02222 #endif
02223 
02224   ALLOC_STATISTIC(num_allocs_stat++);
02225 
02226   if (!size)
02227     return (void *)&zero_ptr;
02228 
02229   ALLOC_STATISTIC(num_nonzero_allocs_stat++);
02230 
02231 #if PAD_BOUNDARY_BYTES
02232   origsize = size;
02233   size += PAD_START_SIZE + PAD_END_SIZE;
02234 #endif
02235 
02236   if (size < (MAX_COMMON_SIZE - PTR_SIZE + 1)) {
02237     ALLOC_STATISTIC(num_common_allocs_stat++);
02238 
02239     if (!size_map)
02240       init_size_map();
02241 
02242     cpos = size_index_map[((size + PTR_SIZE - 1) >> LOG_PTR_SIZE) - 1];
02243 #if 0
02244     if (size > size_map[cpos]) {
02245       FPRINTF(STDERR, "map error: %d < %d\n", size_map[cpos], size);
02246     }
02247 #endif
02248     size = size_map[cpos];
02249 
02250     block = common[cpos + NUM_COMMON_SIZE];
02251     find = NULL;
02252 
02253     while (block) {
02254       int search_bit, search_offset;
02255 
02256       if (block->top < block->end)
02257        goto block_top;
02258 
02259       ALLOC_STATISTIC(num_block_alloc_checks_stat++);
02260 
02261       search_bit = block->free_search_bit;
02262       search_offset = block->free_search_offset;
02263 
02264       for (i = block->free_search_start; i >= 0; i--)
02265        if (block->free[i]) {
02266          char *zp;
02267          int v = block->free[i];
02268          
02269          while (IS_MARKED(v & search_bit)) {
02270            search_bit = search_bit << FREE_BIT_SIZE;
02271            search_offset++;
02272          }
02273          block->free[i] -= search_bit;
02274          block->free_search_start = i;
02275          block->free_search_bit = search_bit << FREE_BIT_SIZE;
02276          block->free_search_offset = search_offset + 1;
02277 
02278          c = (i << LOG_FREE_BIT_PER_ELEM) + search_offset;
02279          
02280          if (flags & do_malloc_UNCOLLECTABLE)
02281            mem_uncollectable_use += size;
02282          else
02283            mem_use += size;
02284          
02285          p = block->start + c * size;
02286 
02287          zp = INT_TO_PTR(p);
02288 
02289          if (!(flags & do_malloc_ATOMIC)) {
02290            void **p = (void **)zp;
02291            unsigned long sz = size >> LOG_PTR_SIZE;
02292            for (; sz--; p++)
02293              *p = 0;
02294          }
02295 
02296 #if CHECK_WATCH_FOR_PTR_ALLOC
02297          if (zp == UNHIDE_WATCH(GC_watch_for_ptr)) {
02298 #if USE_WATCH_FOUND_FUNC
02299            GC_found_watch();
02300 #else
02301            findings++;
02302 #endif
02303          }
02304 #endif
02305 
02306 #if PAD_BOUNDARY_BYTES
02307          SET_PAD(zp, size, origsize);
02308          zp = PAD_FORWARD(zp);
02309 #endif
02310 
02311          return zp;
02312        } else {
02313          search_bit = (FREE_BIT_START | UNMARK_BIT_START);
02314          search_offset = 0;
02315        }
02316 
02317       find = &block->next;
02318 
02319       block = block->next;
02320       common[cpos + NUM_COMMON_SIZE] = block;
02321 
02322       ALLOC_STATISTIC(num_block_alloc_nexts_stat++);
02323       ALLOC_STATISTIC(if (block) num_block_alloc_second_checks_stat++);
02324     }
02325 
02326   } else {
02327     void *a;
02328     MemoryChunk *c;
02329 
02330     /* Round up to ptr-aligned size: */
02331     if (size & (PTR_SIZE-1))
02332       size += PTR_SIZE - (size & (PTR_SIZE-1));
02333 
02334     ALLOC_STATISTIC(num_chunk_allocs_stat++);
02335 
02336     cpos = 0;
02337 
02338     a = malloc_sector(size + sizeof(MemoryChunk), sector_kind_chunk, 1);
02339     if (!a) {
02340       if (mem_use >= mem_limit)
02341        GC_gcollect();
02342       
02343       a = malloc_sector(size + sizeof(MemoryChunk), sector_kind_chunk, 0);
02344     }
02345 
02346     c = (MemoryChunk *)a;
02347     
02348     c->finalizers = NULL;
02349     c->marked = 1;
02350 
02351 #if STAMP_AND_REMEMBER_SOURCE
02352     c->make_time = stamp_clock;
02353 #endif
02354 #if KEEP_CHUNK_SET_NO
02355     c->set_no = set_no;
02356 #endif
02357 
02358     c->next = *othersptr;
02359 #if CHECK_FREES
02360     if (PTR_TO_INT(c->next) & (SECTOR_SEGMENT_SIZE - 1))
02361       free_error("bad next\n");
02362 #endif
02363     *othersptr = c;
02364 #if KEEP_PREV_PTR
02365     c->prev_ptr = othersptr;
02366     if (c->next)
02367       c->next->prev_ptr = &c->next;
02368 #endif
02369     
02370     c->start = PTR_TO_INT(&c->data);
02371     c->end = c->start + size;
02372     c->atomic = flags & do_malloc_ATOMIC;
02373 
02374     if (flags & do_malloc_UNCOLLECTABLE)
02375       mem_uncollectable_use += size;
02376     else
02377       mem_use += size;
02378     mem_real_use += (size + sizeof(MemoryChunk));
02379     num_chunks++;
02380 
02381     if (!low_plausible || (c->start < low_plausible))
02382       low_plausible = c->start;
02383     if (!high_plausible || (c->end > high_plausible))
02384       high_plausible = c->end;     
02385 
02386     if (!(flags & do_malloc_ATOMIC)) {
02387       void **p = (void **)&c->data;
02388       unsigned long sz = size >> LOG_PTR_SIZE;
02389       for (; sz--; p++)
02390        *p = 0;
02391     }
02392 
02393 #if CHECK_WATCH_FOR_PTR_ALLOC
02394     if ((&c->data) == UNHIDE_WATCH(GC_watch_for_ptr)) {
02395 #if USE_WATCH_FOUND_FUNC
02396       GC_found_watch();
02397 #else
02398       findings++;
02399 #endif
02400     }
02401 #endif
02402 
02403     s = (void *)&c->data;
02404 
02405 #if PAD_BOUNDARY_BYTES
02406     SET_PAD(s, size, origsize);
02407     s = PAD_FORWARD(s);
02408 #endif
02409 
02410     return s;
02411   }
02412 
02413   ALLOC_STATISTIC(num_newblock_allocs_stat++);
02414 
02415   sizeElemBit = size << LOG_FREE_BIT_PER_ELEM;
02416   
02417 #if PAD_BOUNDARY_BYTES
02418   /* Assume alignment */
02419   extra_alignment = (DOUBLE_SIZE - PTR_SIZE);
02420 #else
02421   extra_alignment = (size & (DOUBLE_SIZE - 1)) ? 0 : (DOUBLE_SIZE - PTR_SIZE);
02422 #endif
02423 
02424   /* upper bound: */
02425   elem_per_block = (SECTOR_SEGMENT_SIZE - sizeof(BlockOfMemory)) / sizeElemBit;
02426   /*                ^- mem area size      ^- block record */
02427   /* use this one: */
02428   elem_per_block = ((SECTOR_SEGMENT_SIZE - sizeof(BlockOfMemory) - elem_per_block
02429   /*                ^- mem area size      ^- block record       ^- elems     */
02430                    - (extra_alignment + PTR_SIZE - 2)) / sizeElemBit);
02431   /*                     ^- possible elem padding, -2 since BlockOfMemory has free[1] */
02432   if (elem_per_block) {
02433     /* Small enough to fit into one segment */
02434     c = SECTOR_SEGMENT_SIZE;
02435   } else {
02436     elem_per_block = 1;
02437     /* Add (PTR_SIZE - 1) to ensure enough room after alignment: */
02438     c = sizeof(BlockOfMemory) + (PTR_SIZE - 1) + sizeElemBit;
02439   }
02440 
02441   block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 1);
02442   if (!block) {
02443     if (mem_use >= mem_limit) {
02444       GC_gcollect();
02445       return do_malloc(KEEP_SET_INFO_ARG(set_no)
02446                      size, common, othersptr, flags);
02447     } else
02448       block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 0);
02449   }
02450 
02451   
02452   block->elem_per_block = elem_per_block;
02453 
02454   block->finalizers = NULL;
02455 
02456 #if STAMP_AND_REMEMBER_SOURCE
02457   block->make_time = stamp_clock;
02458 #endif
02459 #if KEEP_SET_NO
02460   block->set_no = set_no;
02461 #endif
02462 
02463   /* offset for data (ptr aligned): */
02464   c = sizeof(BlockOfMemory) + (elem_per_block - 1);
02465   if (c & (PTR_SIZE - 1))
02466     c += (PTR_SIZE - (c & (PTR_SIZE - 1)));
02467 #if !PAD_BOUNDARY_BYTES
02468   if (!(size & (DOUBLE_SIZE - 1))) /* Even more alignment for doubles: */
02469 #else
02470     /* Assume alignment */
02471 #endif
02472     if (c & (DOUBLE_SIZE - 1))
02473       c += (DOUBLE_SIZE - (c & (DOUBLE_SIZE - 1)));
02474   p = PTR_TO_INT(block) + c;
02475 
02476   common_ends = common + NUM_COMMON_SIZE;
02477 
02478   if (common_ends[cpos] || (find && !common[cpos])) {
02479     /* hey! - GC happened and reset stuff. find may not be alive anymore,
02480        so find it again. */
02481     find = &common_ends[cpos];
02482     while (*find)
02483       find = &(*find)->next;
02484   }
02485 
02486   if (find)
02487     *find = block;
02488   else if (!common[cpos])
02489     common[cpos] = block;
02490 
02491   if (!common_ends[cpos])
02492     common_ends[cpos] = block;
02493 
02494   num_blocks++;
02495 
02496   for (i = ELEM_PER_BLOCK(block); i-- ; )
02497     block->free[i] = 0;
02498   block->free_search_start = -1; /* a free search will not yield results until a GC */
02499 
02500   block->start = block->top = p;
02501   block->end = block->start + (elem_per_block * sizeElemBit);
02502   block->size = (short)size;
02503   block->next = NULL;
02504   block->atomic = flags & do_malloc_ATOMIC;
02505   if (!common_positionses[cpos])
02506     init_positions(cpos, size, elem_per_block);
02507   block->positions = common_positionses[cpos];
02508 
02509   if (!low_plausible || (block->start < low_plausible))
02510     low_plausible = block->start;
02511   if (!high_plausible || (block->end > high_plausible))
02512     high_plausible = block->end;   
02513 
02514   mem_real_use += SECTOR_SEGMENT_SIZE;
02515 
02516  block_top:
02517 
02518 #if STAMP_AND_REMEMBER_SOURCE
02519   block->use_time = stamp_clock;
02520 #endif
02521 
02522 #if CHECK
02523   if (block->end < block->start
02524       || block->top < block->start
02525       || block->top > block->end)
02526     FPRINTF(STDERR,
02527            "bad block: %ld %ld %ld %ld\n",
02528            size, block->start, block->top, block->end);
02529 #endif      
02530 
02531   s = INT_TO_PTR(block->top);
02532   block->top = block->top + size;
02533 
02534   if (flags & do_malloc_UNCOLLECTABLE)
02535     mem_uncollectable_use += size;
02536   else
02537     mem_use += size;
02538 
02539   if (!(flags & do_malloc_ATOMIC)) {
02540     void **p = (void **)s;
02541     unsigned long sz = size >> LOG_PTR_SIZE;
02542     for (; sz--; p++)
02543       *p = 0;
02544   }
02545 
02546 #if CHECK_WATCH_FOR_PTR_ALLOC
02547   if (s == UNHIDE_WATCH(GC_watch_for_ptr)) {
02548 #if USE_WATCH_FOUND_FUNC
02549     GC_found_watch();
02550 #else
02551     findings++;
02552 #endif
02553   }
02554 #endif
02555 
02556 #if PAD_BOUNDARY_BYTES
02557     SET_PAD(s, size, origsize);
02558     s = PAD_FORWARD(s);
02559 #endif
02560 
02561   return s;
02562 }
02563 
02564 GC_Set *GC_new_set(char *name, 
02565                  GC_trace_init trace_init,
02566                  GC_trace_done trace_done,
02567                  GC_count_tracer count_tracer,
02568                  GC_path_tracer path_tracer,
02569                  GC_set_elem_finalizer final,
02570                  int flags)
02571 {
02572   GC_Set *c, **naya;
02573   int i;
02574 
02575   if (!initialized)
02576     GC_initialize();
02577 
02578   c = (GC_Set *)malloc_managed(sizeof(GC_SetWithOthers));
02579 
02580   naya = (GC_Set **)malloc_managed(sizeof(GC_Set *) * (num_common_sets + 1));
02581   for (i = 0; i < num_common_sets; i++)
02582     naya[i] = common_sets[i];
02583   
02584 #if KEEP_SET_NO || KEEP_CHUNK_SET_NO
02585   c->no = num_common_sets;
02586 #endif
02587 #if ALLOW_TRACE_COUNT
02588   c->count_tracer = count_tracer;
02589 #endif
02590 #if ALLOW_TRACE_PATH
02591   c->path_tracer = path_tracer;
02592 #endif
02593 #if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
02594   c->trace_init = trace_init;
02595   c->trace_done = trace_done;
02596 #endif
02597 #if ALLOW_SET_FINALIZER
02598   c->finalizer = final;
02599 #endif    
02600 
02601   naya[num_common_sets++] = c;
02602   c->atomic = !!(flags & SGC_ATOMIC_SET);
02603   c->uncollectable = !!(flags & SGC_UNCOLLECTABLE_SET);
02604 #if ALLOW_SET_LOCKING
02605   c->locked = 0;
02606 #endif
02607   c->name = name;
02608   c->blocks = (BlockOfMemory **)malloc_managed(sizeof(BlockOfMemory*) * 2 * NUM_COMMON_SIZE);
02609   memset(c->blocks, 0, sizeof(BlockOfMemory*) * NUM_COMMON_SIZE);
02610   c->block_ends = c->blocks + NUM_COMMON_SIZE;
02611   memset(c->block_ends, 0, sizeof(BlockOfMemory*) * NUM_COMMON_SIZE);
02612 
02613   ((GC_SetWithOthers *)c)->others = NULL;
02614   c->othersptr = &((GC_SetWithOthers *)c)->others;
02615 
02616   free_managed(common_sets);
02617   common_sets = naya;
02618 
02619   return c;
02620 }
02621 
02622 void *GC_malloc(size_t size)
02623 {
02624   return do_malloc(KEEP_SET_INFO_ARG(0)
02625                  size, common, &others, 
02626                  0);
02627 }
02628 
02629 void *GC_malloc_atomic(size_t size)
02630 {
02631   return do_malloc(KEEP_SET_INFO_ARG(1)
02632                  size, atomic_common, 
02633                  &atomic_others, 
02634                  do_malloc_ATOMIC_UNLESS_DISABLED);
02635 }
02636 
02637 void *GC_malloc_uncollectable(size_t size)
02638 {
02639   return do_malloc(KEEP_SET_INFO_ARG(2)
02640                  size, uncollectable_common, 
02641                  &uncollectable_others, 
02642                  do_malloc_UNCOLLECTABLE);
02643 }
02644 
02645 void *GC_malloc_atomic_uncollectable(size_t size)
02646 {
02647   return do_malloc(KEEP_SET_INFO_ARG(3)
02648                  size, uncollectable_atomic_common, 
02649                  &uncollectable_atomic_others, 
02650                  do_malloc_ATOMIC_UNLESS_DISABLED | do_malloc_UNCOLLECTABLE);
02651 }
02652 
02653 void *GC_malloc_specific(size_t size, GC_Set *set)
02654 {
02655   return do_malloc(KEEP_SET_INFO_ARG(set->no)
02656                  size, set->blocks, set->othersptr,
02657                  ((set->atomic ? do_malloc_ATOMIC_UNLESS_DISABLED : 0)
02658                   | (set->uncollectable ? do_malloc_UNCOLLECTABLE : 0)));
02659 }
02660 
02661 void *GC_malloc_stubborn(size_t size)
02662 {
02663   return GC_malloc(size);
02664 }
02665 
02666 #if PROVIDE_MALLOC_AND_FREE
02667 void *malloc(size_t size)
02668 {
02669   return do_malloc(KEEP_SET_INFO_ARG(4)
02670                  size, sys_malloc, 
02671                  &sys_malloc_others, 
02672                  do_malloc_ATOMIC | do_malloc_UNCOLLECTABLE);
02673 }
02674 
02675 void *realloc(void *p, size_t size)
02676 {
02677   void *naya;
02678   size_t oldsize;
02679 
02680   if (p) {
02681     oldsize = (size_t)GC_size(p);
02682     if (!oldsize)
02683       FPRINTF(STDERR, "illegal realloc\n");
02684   } else
02685     oldsize = 0;
02686   naya = malloc(size);
02687   if (oldsize > size)
02688     oldsize = size;
02689   memcpy(naya, p, oldsize);
02690   if (p)
02691     free(p);
02692   return naya;
02693 }
02694 
02695 void *calloc(size_t n, size_t size)
02696 {
02697   void *p;
02698   long c;
02699 
02700   c = n * size;
02701   p = malloc(c);
02702   memset(p, 0, c);
02703 
02704   return p;
02705 }
02706 
02707 void free(void *p)
02708 {
02709   if (p)
02710     GC_free(p);
02711 }
02712 
02713 # ifdef WIN32
02714 size_t _msize(void *p)
02715 {
02716   return GC_size(p);
02717 }
02718 # endif
02719 #endif
02720 
02721 static void register_disappearing_link(void **p, void *a, int late)
02722 {
02723   DisappearingLink *dl;
02724     
02725   dl = (DisappearingLink *)malloc_managed(sizeof(DisappearingLink));
02726   dl->kind = late ? dl_late : (a ? dl_normal : dl_restored);
02727   dl->watch = a;
02728   dl->disappear = p;
02729   dl->saved_value = NULL;
02730   dl->prev = NULL;
02731   dl->next = late ? late_disappearing : disappearing;
02732   if (dl->next)
02733     dl->next->prev = dl;
02734   if (late)
02735     late_disappearing = dl;
02736   else
02737     disappearing = dl;
02738 
02739   GC_dl_entries++;
02740 
02741   mem_real_use += sizeof(DisappearingLink);
02742 }
02743 
02744 void GC_general_register_disappearing_link(void **p, void *a)
02745 {
02746   register_disappearing_link(p, a, 0);
02747 }
02748 
02749 void GC_register_late_disappearing_link(void **p, void *a)
02750 {
02751   register_disappearing_link(p, a, 1);
02752 }
02753 
02754 void GC_unregister_disappearing_link(void **p)
02755 {
02756   /* We'll do it later */
02757 }
02758 
02759 #if 0
02760 DisappearingLink *GC_find_dl(void *p)
02761 {
02762   DisappearingLink *dl;
02763 
02764   for (dl = disappearing; dl; dl = dl->next)
02765     if ((dl->watch == p) || (!dl->watch && (*dl->disappear == p)))
02766       return dl;
02767 
02768   for (dl = late_disappearing; dl; dl = dl->next)
02769     if ((dl->watch == p) || (!dl->watch && (*dl->disappear == p)))
02770       return dl;
02771 
02772   return NULL;
02773 }
02774 #endif
02775 
02776 static void register_finalizer(void *p, void (*f)(void *p, void *data), 
02777                             void *data, void (**oldf)(void *p, void *data), 
02778                             void **olddata, int eager_level, int ignore_self)
02779 {
02780   BlockOfMemory *block = NULL;
02781   MemoryChunk *chunk = NULL;
02782   int pos;
02783 
02784   if ((p = find_ptr(p, NULL, &block, &pos, &chunk, 0))) {
02785     Finalizer *fn;
02786 
02787     if (block) {
02788       fn = block->finalizers;
02789       while (fn && (fn->u.pos != pos))
02790        fn = fn->next;
02791       if (fn && !f) {
02792        if (fn->prev)
02793          fn->prev->next = fn->next;
02794        else
02795          block->finalizers = fn->next;
02796        if (fn->next)
02797          fn->next->prev = fn->prev;
02798       }
02799     } else {
02800       fn = chunk->finalizers;
02801       if (fn && !f)
02802        chunk->finalizers = NULL;
02803     }
02804 
02805     if (oldf)
02806       *oldf = (fn ? fn->f : NULL);
02807     if (olddata)
02808       *olddata = (fn ? fn->data : NULL);
02809     
02810     if (f) {
02811       int isnaya = !fn;
02812 
02813       if (!fn) {
02814        fn = (Finalizer *)malloc_managed(sizeof(Finalizer));
02815        mem_real_use += sizeof(Finalizer);
02816        GC_fo_entries++;
02817       }
02818 
02819       fn->u.pos = pos;
02820       fn->f = f;
02821       fn->data = data;
02822       fn->eager_level = eager_level;
02823       fn->ignore_self = ignore_self;
02824       
02825       if (isnaya) {
02826        fn->prev = NULL;
02827        if (block) {
02828          fn->next = block->finalizers;
02829          if (fn->next)
02830            fn->next->prev = fn;
02831          block->finalizers = fn;
02832        } else {
02833          chunk->finalizers = fn;
02834          fn->next = NULL;
02835        }
02836       }
02837     } else if (fn) {
02838       mem_real_use -= sizeof(Finalizer);
02839       free_managed(fn);
02840       --GC_fo_entries;
02841     }
02842   }
02843 }
02844 
02845 void GC_register_finalizer(void *p, void (*f)(void *p, void *data), 
02846                         void *data, void (**oldf)(void *p, void *data), 
02847                         void **olddata)
02848 {
02849   register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, 0, 0);
02850 }
02851 
02852 void GC_register_eager_finalizer(void *p, int level, void (*f)(void *p, void *data), 
02853                              void *data, void (**oldf)(void *p, void *data), 
02854                              void **olddata)
02855 {
02856   register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, level, 0);
02857 }
02858 
02859 void GC_register_finalizer_ignore_self(void *p, void (*f)(void *p, void *data), 
02860                                    void *data, void (**oldf)(void *p, void *data), 
02861                                    void **olddata)
02862 {
02863   register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, 0, 1);
02864 }
02865 
02866 /******************************************************************/
02867 
02868 void GC_for_each_element(GC_Set *set,
02869                       void (*f)(void *p, int size, void *data),
02870                       void *data)
02871 {
02872   int i;
02873   BlockOfMemory **blocks = set->blocks;
02874   MemoryChunk *c = *(set->othersptr);
02875 
02876 #if ALLOW_SET_LOCKING
02877   if (!set->uncollectable)
02878     set->locked++;
02879 #endif
02880 
02881   for (i = 0; i < NUM_COMMON_SIZE; i++) {
02882     BlockOfMemory **prev = &blocks[i];
02883     BlockOfMemory *block = *prev;
02884 
02885     while (block) {
02886       int j;
02887 
02888       j = (block->top - block->start) / block->size;
02889       
02890       while (j--) {
02891        int bit = POS_TO_FREE_BIT(j);
02892        int pos = POS_TO_FREE_INDEX(j);
02893        
02894        if (IS_MARKED(block->free[pos] & bit)) {
02895          unsigned long p;
02896          void *s;
02897          
02898          p = block->start + (block->size * j);
02899          s = INT_TO_PTR(p);
02900          
02901 #if PAD_BOUNDARY_BYTES
02902          s = PAD_FORWARD(s);
02903 #endif
02904          
02905          f(s, block->size, data);
02906        }
02907       }
02908       block = block->next;
02909     }
02910   }
02911 
02912   for (; c; c = c->next) {
02913     void *s;
02914 
02915     s = INT_TO_PTR(c->start);
02916 
02917 #if PAD_BOUNDARY_BYTES
02918     s = PAD_FORWARD(s);
02919 #endif
02920 
02921     f(s, c->end - c->start, data);
02922   }
02923 
02924 #if ALLOW_SET_LOCKING
02925   if (!set->uncollectable)
02926     --set->locked;
02927 #endif
02928 }
02929 
02930 /******************************************************************/
02931 
02932 static void free_chunk(MemoryChunk *k, MemoryChunk **prev, GC_Set *set)
02933 {
02934   MemoryChunk *next;
02935   
02936 #if ALLOW_SET_FINALIZER
02937   if (set->finalizer) {
02938     void *s = INT_TO_PTR(k->start);
02939 #if PAD_BOUNDARY_BYTES
02940     s = PAD_FORWARD(s);
02941 #endif
02942     set->finalizer(s);
02943   }
02944 #endif
02945   
02946   mem_real_use -= (k->end - k->start + sizeof(MemoryChunk));
02947   
02948 #if PRINT && 0
02949   FPRINTF(STDERR, "free chunk: %ld (%ld) %d %d\n", 
02950          (unsigned long)k, k->end - k->start,
02951          set->atomic, set->uncollectable);
02952 #endif
02953   
02954   next = k->next;
02955 
02956 #if KEEP_PREV_PTR
02957   if (next)
02958     next->prev_ptr = k->prev_ptr;
02959 #endif
02960 
02961 #if CHECK_FREES
02962   if (PTR_TO_INT(next) & (SECTOR_SEGMENT_SIZE - 1))
02963     free_error("bad next\n");
02964 #endif
02965 
02966   *prev = next;
02967 
02968   free_sector(k);
02969   --num_chunks;
02970 }
02971 
02972 void GC_free(void *p) 
02973 {
02974 #if PROVIDE_GC_FREE || PROVIDE_CHUNK_GC_FREE
02975   BlockOfMemory *block = NULL;
02976   MemoryChunk *chunk = NULL;
02977   int fpos;
02978   void *found;
02979   GC_Set *set;
02980 
02981 # if CHECK_COLLECTING && CHECK_FREES
02982   if (collecting_now)
02983     free_error("GC_free during collection\n");
02984 # endif
02985 
02986   found = find_ptr(p, NULL, &block, &fpos, &chunk, 1);
02987   if (!found) {
02988 # if CHECK_FREES
02989     char b[256];
02990     sprintf(b, "GC_free failed! %lx\n", (long)p);
02991     free_error(b);
02992 # endif
02993     return;
02994   }
02995 
02996   if (PAD_FORWARD(found) == p) {
02997     if (block) {
02998 # if PROVIDE_GC_FREE
02999       int i;
03000       int pos = POS_TO_FREE_INDEX(fpos);
03001       int fbit = POS_TO_FREE_BIT(fpos);
03002       int ubit = POS_TO_UNMARK_BIT(fpos);
03003 
03004 # if CHECK_FREES
03005       if (block->free[pos] & fbit) {
03006        char b[256];
03007        sprintf(b, "Block element already free! %lx\n", (long)p);
03008        return;
03009       }
03010 #   if EXTRA_FREE_CHECKS
03011       if (block->set_no != 4) {
03012        char b[256];
03013        sprintf(b, "GC_free on ptr from wrong block! %lx\n", (long)p);
03014        free_error(b);
03015        return;
03016       }
03017 #   endif
03018 #  endif
03019 
03020       block->free[pos] |= (fbit | ubit);
03021       if (block->free_search_start <= pos) {
03022        block->free_search_start = pos;
03023        block->free_search_bit = (FREE_BIT_START | UNMARK_BIT_START);
03024        block->free_search_offset = 0;
03025       }
03026 
03027       if (!initialized)
03028        GC_initialize();
03029 
03030       set = common_sets[block->set_no];
03031       
03032 #  if ALLOW_SET_FINALIZER
03033       if (set->finalizer)
03034        set->finalizer(p);
03035 #  endif
03036 
03037       {
03038        int size;
03039 #  if PAD_BOUNDARY_BYTES
03040        size = block->size - PAD_START_SIZE - PAD_END_SIZE;
03041        ((long *)found)[1] = 0; /* 0 extra */
03042 #  else
03043        size = block->size;
03044 #  endif
03045 
03046        /* Clear, since collection scans whole block. */
03047        memset(p, 0, size);
03048       }
03049 
03050       /* Make sure this block is reachable from block_ends: */
03051       i = size_index_map[(block->size >> LOG_PTR_SIZE) - 1];
03052       if (set->block_ends[i] != block)
03053        set->block_ends[i] = set->blocks[i];
03054 
03055       if (set->uncollectable)
03056        mem_uncollectable_use -= block->size;
03057 # endif
03058     } else {
03059       if (!initialized)
03060        GC_initialize();
03061 
03062 # if CHECK_FREES && EXTRA_FREE_CHECKS
03063       if (chunk->set_no != 4) {
03064        char b[256];
03065        sprintf(b, "GC_free on ptr from wrong block! %lx\n", (long)p);
03066        free_error(b);
03067        return;
03068       }
03069 # endif
03070       set = common_sets[chunk->set_no];
03071       if (set->uncollectable)
03072        mem_uncollectable_use -= (chunk->end - chunk->start);
03073       free_chunk(chunk, chunk->prev_ptr, set);
03074     }
03075   }
03076 # if CHECK_FREES
03077   else {
03078     char b[256];
03079     sprintf(b, "GC_free on block interior! %lx != %lx\n", 
03080            (long)p, (long)PAD_FORWARD(found));
03081     free_error(b);
03082   }
03083 # endif
03084 #endif
03085 }
03086 
03087 /******************************************************************/
03088 
03089 #if CHECK
03090 static long cmn_count, chk_count;
03091 #endif
03092 
03093 #if ALLOW_TRACE_COUNT
03094 static int collecting_with_trace_count;
03095 
03096 #define TRACE_COLLECT_SWITCH !collecting_with_trace_count
03097 #else
03098 #define TRACE_COLLECT_SWITCH 1
03099 #endif
03100 
03101 #if ALLOW_TRACE_PATH
03102 static int collecting_with_trace_path;
03103 static char *current_trace_source;
03104 
03105 /* Buffer used to store paths, since allocation is not allowed: */
03106 # define TRACE_PATH_BUFFER_SIZE 1048576
03107 static void *trace_path_buffer[TRACE_PATH_BUFFER_SIZE];
03108 static int trace_path_buffer_pos;
03109 #endif
03110 
03111 #if PAD_BOUNDARY_BYTES
03112 static void bad_pad(char *where, void *s, int type, long sz, long diff, long offset, 
03113                   long pd, long expect)
03114 {
03115   FPRINTF(STDERR,
03116          "pad %s violation at %lx <%d>, len %ld (diff %ld+%ld): %lx != %lx\n", 
03117          where, (unsigned long)s, type, sz, diff, offset, pd, expect);
03118 }
03119 #endif
03120 
03121 static void collect_init_chunk(MemoryChunk *c, int uncollectable, int ty)
03122 {
03123   for (; c; c = c->next) {
03124     if (uncollectable && TRACE_COLLECT_SWITCH)
03125       c->marked = 1;
03126     else
03127       c->marked = 0;
03128 
03129 #if PAD_BOUNDARY_BYTES
03130     /* Test padding: */
03131     {
03132       void *s = INT_TO_PTR(c->start);
03133       long pd, sz, diff;
03134       sz = c->end - c->start;
03135       diff = ((long *)s)[1];
03136       pd = *(long *)s;
03137       if (pd != PAD_PATTERN)
03138        bad_pad("start", s, ty, sz, diff, 0, pd, PAD_PATTERN);
03139       pd = *(long *)INT_TO_PTR(c->end - PAD_END_SIZE);
03140       if (pd != PAD_PATTERN)
03141        bad_pad("end1", s, ty, sz, diff, 0, pd, PAD_PATTERN);
03142       pd = *(long *)INT_TO_PTR(c->end - PAD_END_SIZE + sizeof(long));
03143       if (pd != PAD_PATTERN)
03144        bad_pad("end2", s, ty, sz, diff, 0, pd, PAD_PATTERN);
03145       if (diff) {
03146        /* Given was bigger than requested; check extra bytes: */
03147        unsigned char *ps = ((unsigned char *)s) + sz - PAD_END_SIZE - diff;
03148        long d = 0;
03149        while (d < diff) {
03150          if (*ps != PAD_FILL_PATTERN) {
03151            bad_pad("extra", s, ty, sz, diff, d, *ps, PAD_FILL_PATTERN);
03152          }
03153          ps++;
03154          d++;
03155        }
03156       }
03157     }
03158 #endif
03159 
03160 #if CHECK
03161     chk_count++;
03162     if ((!low_plausible || (c->start < low_plausible))
03163        || (!high_plausible || (c->end > high_plausible)))
03164       FPRINTF(STDERR, "implausible chunk!\n");
03165 #endif
03166   }
03167 }
03168 
03169 #if FINISH_STATS
03170 # define FINISH_STATISTIC(x) x
03171 static int num_finish_chunk_stat;
03172 static int num_finish_chunkkeep_stat;
03173 static int num_finish_chunkfree_stat;
03174 static int num_finish_block_stat;
03175 static int num_finish_blockkeep_stat;
03176 static int num_finish_blockfree_stat;
03177 static int num_finish_blockadjust_stat;
03178 static int num_finish_blockfiltercycles_stat;
03179 static int num_finishes_stat;
03180 #else
03181 # define FINISH_STATISTIC(x)
03182 #endif
03183 
03184 static void collect_finish_chunk(MemoryChunk **c, GC_Set *set)
03185 {
03186   unsigned long local_low_plausible;
03187   unsigned long local_high_plausible;
03188 
03189   local_low_plausible = low_plausible;
03190   local_high_plausible = high_plausible;
03191 
03192   while (*c) {
03193     MemoryChunk *k = *c;
03194 
03195     FINISH_STATISTIC(num_finish_chunk_stat++);
03196 
03197     if (k->marked) {
03198       c = &k->next;
03199 
03200       FINISH_STATISTIC(num_finish_chunkkeep_stat++);
03201 
03202       if (!local_low_plausible || (k->start < local_low_plausible))
03203        local_low_plausible = k->start;
03204       if (!local_high_plausible || (k->end > local_high_plausible))
03205        local_high_plausible = k->end;     
03206     } else {
03207       FINISH_STATISTIC(num_finish_chunkfree_stat++);
03208 
03209       free_chunk(k, c, set);
03210     }
03211   }
03212 
03213   low_plausible = local_low_plausible;
03214   high_plausible = local_high_plausible;
03215 }
03216 
03217 static void collect_init_common(BlockOfMemory **blocks, int uncollectable, int ty)
03218 {
03219   int i, j;
03220   int boundary, boundary_val = 0;
03221 
03222   for (i = 0; i < NUM_COMMON_SIZE; i++) {
03223     BlockOfMemory *block = blocks[i];
03224 
03225     while (block) {
03226 #if CHECK
03227       cmn_count++;
03228       if ((!low_plausible || (block->start < low_plausible))
03229          || (!high_plausible || (block->end > high_plausible)))
03230        FPRINTF(STDERR, "implausible block!\n");
03231 #endif
03232 
03233 #if STAMP_AND_REMEMBER_SOURCE
03234       block->low_marker = block->high_marker = 0;
03235 #endif
03236 
03237 #if PAD_BOUNDARY_BYTES
03238       /* Test padding: */
03239       {
03240        unsigned long p;
03241        long size = size_map[i];
03242        
03243        for (p = block->start; p < block->top; p += size) {
03244          void *s = INT_TO_PTR(p);
03245          long pd, diff;
03246          pd = *(long *)s;
03247          diff = ((long *)s)[1];
03248          if (pd != PAD_PATTERN)
03249            bad_pad("start", s, ty, size, diff, 0, pd, PAD_PATTERN);
03250          pd = *(long *)INT_TO_PTR(p + size - PAD_END_SIZE);
03251          if (pd != PAD_PATTERN)
03252            bad_pad("end1", s, ty, size, diff, 0, pd, PAD_PATTERN);
03253          pd = *(long *)INT_TO_PTR(p + size - PAD_END_SIZE + sizeof(long));
03254          if (pd != PAD_PATTERN)
03255            bad_pad("end2", s, ty, size, diff, 0, pd, PAD_PATTERN);
03256          if (diff) {
03257            /* Given was bigger than requested; check extra bytes: */
03258            unsigned char *ps = ((unsigned char *)s) + size - PAD_END_SIZE - diff;
03259            long d = 0;
03260            while (d < diff) {
03261              if (*ps != PAD_FILL_PATTERN) {
03262               bad_pad("extra", s, ty, size, diff, d, *ps, PAD_FILL_PATTERN);
03263              }
03264              ps++;
03265              d++;
03266            }
03267          }
03268        }
03269       }
03270 #endif
03271 
03272       if (uncollectable && TRACE_COLLECT_SWITCH) {
03273        for (j = ELEM_PER_BLOCK(block); j-- ; ) {
03274 #if DISTINGUISH_FREE_FROM_UNMARKED
03275          block->free[j] = SHIFT_COPY_FREE_TO_UNMARKED(block->free[j]);
03276 #else
03277          block->free[j] = 0;
03278 #endif
03279        }
03280       } else {
03281        if (block->top < block->end) {
03282          int pos = block->positions[(block->top - block->start) >> LOG_PTR_SIZE];
03283          boundary = POS_TO_UNMARK_INDEX(pos);
03284          boundary_val = (POS_TO_UNMARK_BIT(pos) - 1) & ALL_UNMARKED;
03285        } else {
03286          boundary = ELEM_PER_BLOCK(block);
03287        }
03288 
03289        for (j = ELEM_PER_BLOCK(block); j-- ; ) {
03290          if (j < boundary)
03291            block->free[j] |= ALL_UNMARKED;
03292          else if (j == boundary)
03293            block->free[j] = boundary_val;
03294          else
03295            block->free[j] = 0;
03296        }
03297       }
03298 
03299       block = block->next;
03300     }
03301   }
03302 }
03303 
03304 static void collect_finish_common(BlockOfMemory **blocks, 
03305                               BlockOfMemory **block_ends, 
03306                               GC_Set *set)
03307 {
03308   int i;
03309 #if KEEP_BLOCKS_FOREVER
03310   int kept;
03311 #endif
03312   unsigned long local_low_plausible;
03313   unsigned long local_high_plausible;
03314 
03315   local_low_plausible = low_plausible;
03316   local_high_plausible = high_plausible;
03317 
03318   for (i = 0; i < NUM_COMMON_SIZE; i++) {
03319     BlockOfMemory **prev = &blocks[i];
03320     BlockOfMemory *block = *prev;
03321 #if CHECK
03322     long size = size_map[i];
03323 #endif
03324 
03325 #if KEEP_BLOCKS_FOREVER
03326     kept = 0;
03327 #endif
03328 
03329     while (block) {
03330       int unfree;
03331 
03332       FINISH_STATISTIC(num_finish_block_stat++);
03333       
03334 #if CHECK
03335       if (block->end < block->start
03336          || block->top < block->start
03337          || block->top > block->end)
03338        FPRINTF(STDERR,
03339               "bad block: %ld %ld %ld %ld\n",
03340               size, block->start, block->top, block->end);
03341 #endif
03342 
03343 #if ALLOW_SET_FINALIZER
03344       if (set->finalizer) {
03345        unsigned long s;
03346        int j;
03347        for (j = 0, s = block->start; s < block->top; s += block->size, j++) {
03348          int pos = POS_TO_UNMARK_INDEX(j);
03349          int bit = POS_TO_UNMARK_BIT(j);
03350 
03351          if (NOT_MARKED(block->free[pos] & bit)) {
03352            void *p = INT_TO_PTR(s);
03353 #if PAD_BOUNDARY_BYTES
03354            p = PAD_FORWARD(p);
03355 #endif
03356            set->finalizer(p);
03357          }
03358        }
03359       }
03360 #endif
03361 
03362       unfree = 0;
03363       {
03364        int j;
03365        for (j = ELEM_PER_BLOCK(block); j-- ; ) {
03366          FINISH_STATISTIC(num_finish_blockfiltercycles_stat++);
03367          if ((block->free[j] & ALL_UNMARKED) != ALL_UNMARKED) {
03368            unfree = j + 1;
03369            break;
03370          }
03371        }
03372       }
03373 
03374 #if KEEP_BLOCKS_FOREVER
03375       if (!unfree && (kept < KEEP_BLOCKS_FOREVER)) {
03376        int j;
03377        block->top = block->start;
03378        for (j = ELEM_PER_BLOCK(block); j-- ; )
03379          block->free[j] = 0;
03380        kept++;
03381        unfree = 1;
03382       }
03383 #endif
03384 
03385       if (!unfree) {
03386        FINISH_STATISTIC(num_finish_blockfree_stat++);
03387 
03388        --num_blocks;
03389 
03390        *prev = block->next;
03391        free_sector(block);
03392        mem_real_use -= SECTOR_SEGMENT_SIZE;
03393        block = *prev;
03394       } else {
03395 #if DISTINGUISH_FREE_FROM_UNMARKED
03396        /* If it's unmarked, free it: */
03397        int j;
03398 
03399        for (j = ELEM_PER_BLOCK(block); j-- ; )
03400          block->free[j] |= SHIFT_UNMARK_TO_FREE(block->free[j]);
03401 #endif
03402 
03403        /* Push down block->top if it's easy */
03404        {
03405          unsigned long dt = (unfree << LOG_FREE_BIT_PER_ELEM) * (unsigned long)block->size;
03406          if (block->top > block->start + dt) {
03407            int k;
03408            FINISH_STATISTIC(num_finish_blockadjust_stat++);
03409            block->top = block->start + dt;
03410            for (k = ELEM_PER_BLOCK(block); --k >= unfree; ) {
03411              block->free[k] = 0;
03412            }
03413          }
03414        }
03415        
03416        block->free_search_start = unfree - 1;
03417        block->free_search_bit = (FREE_BIT_START | UNMARK_BIT_START);
03418        block->free_search_offset = 0;
03419 
03420        FINISH_STATISTIC(num_finish_blockkeep_stat++);
03421 
03422        if (!local_low_plausible || (block->start < local_low_plausible))
03423          local_low_plausible = block->start;
03424        if (!local_high_plausible || (block->end > local_high_plausible))
03425          local_high_plausible = block->end;
03426 
03427        prev = &block->next;
03428        block = block->next;
03429       }
03430     }
03431 
03432     block_ends[i] = blocks[i];
03433   }
03434 
03435   low_plausible = local_low_plausible;
03436   high_plausible = local_high_plausible;
03437 }
03438 
03439 static int collect_stack_count;
03440 static int collect_stack_size;
03441 static unsigned long *collect_stack;
03442 
03443 #define INITIAL_COLLECT_STACK_SIZE 8192
03444 
03445 #if KEEP_DETAIL_PATH
03446 # define PUSH_SRC(src) collect_stack[collect_stack_count++] = src;
03447 # define LOCAL_PUSH_SRC(src) local_collect_stack[local_collect_stack_count++] = src;
03448 # define COLLECT_STACK_FRAME_SIZE 3
03449 #else
03450 # define PUSH_SRC(src) /*empty*/
03451 # define LOCAL_PUSH_SRC(src) /*empty*/
03452 # define COLLECT_STACK_FRAME_SIZE 2
03453 #endif
03454 
03455 static void push_collect(unsigned long start, unsigned long end, unsigned long src)
03456 {
03457   if (collect_stack_count >= collect_stack_size) {
03458     long oldsize;
03459 
03460     if (collect_stack)
03461       oldsize = sizeof(unsigned long) * (collect_stack_size + (COLLECT_STACK_FRAME_SIZE - 1));
03462     else
03463       oldsize = 0;
03464 
03465     collect_stack_size = collect_stack_size ? 2 * collect_stack_size : 500;
03466     collect_stack = (unsigned long *)realloc_collect_temp(collect_stack, 
03467                                                    oldsize, 
03468                                                    sizeof(unsigned long) 
03469                                                    * (collect_stack_size + (COLLECT_STACK_FRAME_SIZE - 1)));
03470     /* fprintf(stderr, "grow push stack: %d\n", collect_stack_size); */
03471   }
03472 
03473   collect_stack[collect_stack_count++] = start;
03474   collect_stack[collect_stack_count++] = end;
03475   PUSH_SRC(src)
03476 }
03477 
03478 #define PUSH_COLLECT(s, e, src) \
03479   if (collect_stack_count < collect_stack_size) { \
03480     collect_stack[collect_stack_count++] = s; \
03481     collect_stack[collect_stack_count++] = e + 1 - PTR_ALIGNMENT; \
03482     PUSH_SRC(src) \
03483   } else \
03484     push_collect(s, e + 1 - PTR_ALIGNMENT, src);
03485 
03486 #define LOCAL_PUSH_COLLECT(s, e, src) \
03487   if (local_collect_stack_count < local_collect_stack_size) { \
03488     local_collect_stack[local_collect_stack_count++] = s; \
03489     local_collect_stack[local_collect_stack_count++] = e + 1 - PTR_ALIGNMENT; \
03490     LOCAL_PUSH_SRC(src) \
03491   } else { \
03492     collect_stack_count = local_collect_stack_count; \
03493     push_collect(s, e + 1 - PTR_ALIGNMENT, src); \
03494     local_collect_stack = collect_stack; \
03495     local_collect_stack_count = collect_stack_count; \
03496     local_collect_stack_size = collect_stack_size; \
03497   }
03498 
03499 #if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
03500 
03501 typedef struct {
03502   int count, size;
03503   unsigned long *stack;
03504 } TraceStack;
03505 
03506 static void init_trace_stack(TraceStack *s)
03507 {
03508   s->size = s->count = 0;
03509   s->stack = NULL;
03510 }
03511 
03512 static void done_trace_stack(TraceStack *s)
03513 {
03514   if (s->stack)
03515     free_collect_temp(s->stack, sizeof(unsigned long)*(s->size + 1));
03516 }
03517 
03518 static void push_trace_stack(unsigned long v, TraceStack *s)
03519 {
03520   if (s->count >= s->size) {
03521     long oldsize;
03522 
03523     if (s->stack)
03524       oldsize = sizeof(unsigned long)*(s->size + 1);
03525     else
03526       oldsize = 0;
03527 
03528     s->size = s->size ? 2 * s->size : 500;
03529     s->stack = (unsigned long *)realloc_collect_temp(s->stack,
03530                                                oldsize,
03531                                                sizeof(unsigned long)*(s->size + 1));
03532   }
03533 
03534   s->stack[s->count++] = v;
03535 }
03536 
03537 #define PUSH_TS(v, s) \
03538   if (s.count < s.size) { \
03539     s.stack[s.count++] = (unsigned long)(v); \
03540   } else \
03541     push_trace_stack((unsigned long)(v), &s);
03542 
03543 #define POP_TS(s) (s.stack[--s.count])
03544 
03545 #endif
03546 
03547 #if ALLOW_TRACE_COUNT
03548 
03549 TraceStack collect_trace_stack, collect_wait_trace_stack;
03550 
03551 static int collect_start_tracing;
03552 static int collect_end_tracing;
03553 static int collect_trace_count;
03554 
03555 #define PUSH_TRACE(v) PUSH_TS(v, collect_trace_stack)
03556 #define PUSH_WAIT_TRACE(v) PUSH_TS(v, collect_wait_trace_stack)
03557 
03558 #define POP_TRACE() POP_TS(collect_trace_stack)
03559 #define POP_WAIT_TRACE() POP_TS(collect_wait_trace_stack)
03560 
03561 #endif
03562 
03563 #if ALLOW_TRACE_PATH
03564 
03565 TraceStack collect_trace_path_stack;
03566 
03567 static int collect_end_path_elem;
03568 
03569 #define PUSH_PATH_ELEM(v) PUSH_TS(v, collect_trace_path_stack)
03570 
03571 #define POP_PATH_ELEM() POP_TS(collect_trace_path_stack)
03572 
03573 #define PATH_ELEM_STACK_NONEMPTY() (collect_trace_path_stack.count)
03574 
03575 #endif
03576 
03577 #if CHECK_SKIP_MARK_AT_FIRST
03578 static int collect_start_disable_mark_skip;
03579 int (*skip_mark_at_first)(void *, size_t);
03580 #endif
03581 
03582 #if MARK_STATS
03583 static int num_pairs_stat;
03584 static int num_checks_stat;
03585 static int num_interior_checks_stat;
03586 static int num_plausibles_stat;
03587 static int num_pages_stat;
03588 static int num_blocks_stat;
03589 static int num_blockallocs_stat;
03590 static int num_blockaligns_stat;
03591 static int num_blockmarks_stat;
03592 static int num_blockpushes_stat;
03593 static int num_blockpushes_tail_stat;
03594 static int num_chunks_stat;
03595 static int num_chunkmarks_stat;
03596 #endif
03597 
03598 #define COLLECT semi_collect_stack
03599 #define STACK_TRACE
03600 #include "collect.inc"
03601 
03602 static void prepare_stack_collect()
03603 {
03604   unsigned long s, e;
03605   unsigned long source;
03606 #if KEEP_DETAIL_PATH
03607   source = collect_stack[--collect_stack_count];
03608 #else
03609   source = 0;
03610 #endif
03611   e = collect_stack[--collect_stack_count];
03612   s = collect_stack[--collect_stack_count];
03613   e += PTR_ALIGNMENT - 1;
03614 
03615   PUSH_COLLECT(s, e, source);
03616   semi_collect_stack(0);
03617 
03618 #if !NO_STACK_OFFBYONE
03619   PUSH_COLLECT(s, e, source);
03620   semi_collect_stack(-PTR_ALIGNMENT);
03621   /* Note: this nested-semi preparation can create trace paths of
03622      the form X->X->Y->Z->... */
03623 #endif
03624 }
03625 
03626 #define COLLECT collect
03627 #if CHECK_SIMPLE_INTERIOR_POINTERS
03628 # define FOLLOW_INTERIOR
03629 #endif
03630 # include "collect.inc"
03631 
03632 static jmp_buf buf;
03633 
03634 /* Sparc fix borrowed from SCM, so here's the copyright:  */
03635 /* Scheme implementation intended for JACAL.
03636    Copyright (C) 1990, 1991, 1992, 1993, 1994 Aubrey Jaffer. */
03637 /* James Clark came up with this neat one instruction fix for
03638    continuations on the SPARC.  It flushes the register windows so
03639    that all the state of the process is contained in the stack. */
03640 #ifdef sparc
03641 #define FLUSH_REGISTER_WINDOWS asm("ta 3")
03642 #else
03643 #define FLUSH_REGISTER_WINDOWS /* empty */
03644 #endif
03645 
03646 static void push_stack(void *stack_now)
03647 {
03648   unsigned long start, end;
03649 
03650   start = PTR_TO_INT(GC_stackbottom);
03651   end = PTR_TO_INT(stack_now);
03652 
03653 #if PRINT && STAMP_AND_REMEMBER_SOURCE
03654   FPRINTF(STDERR, "stack in [%lx, %lx]\n", start, end);
03655 #endif
03656 
03657   if (start < end) {
03658     PUSH_COLLECT(start, end, 0);
03659   } else {
03660     PUSH_COLLECT(end, start, 0);
03661   }
03662 
03663 #if DUMP_BLOCK_MAPS
03664   trace_stack_start = collect_stack[collect_stack_count - COLLECT_STACK_FRAME_SIZE];
03665   trace_stack_end = collect_stack[collect_stack_count - (COLLECT_STACK_FRAME_SIZE - 1)];
03666 #endif
03667 
03668   prepare_stack_collect();
03669 
03670   start = PTR_TO_INT((void *)&buf);
03671   end = start + sizeof(buf);
03672   PUSH_COLLECT(start, end, 0);
03673 
03674 #if DUMP_BLOCK_MAPS
03675   trace_reg_start = collect_stack[collect_stack_count - COLLECT_STACK_FRAME_SIZE];
03676   trace_reg_end = collect_stack[collect_stack_count - (COLLECT_STACK_FRAME_SIZE - 1)];
03677 #endif
03678 
03679   prepare_stack_collect();
03680 
03681 #if PRINT && STAMP_AND_REMEMBER_SOURCE
03682   FPRINTF(STDERR, "jmpbuf in [%lx, %lx]\n", start, end);
03683 #endif
03684 }
03685 
03686 #if ALLOW_SET_LOCKING
03687 static void push_locked_chunk(MemoryChunk *c, int atomic)
03688 {
03689   for (; c; c = c->next) {
03690     unsigned long size = (c->end - c->start);
03691     mem_use += size;
03692     collect_trace_count += size;
03693     if (!atomic) {
03694       PUSH_COLLECT(c->start, c->end, 0);
03695     }
03696   }
03697 }
03698 
03699 static void push_locked_common(BlockOfMemory **blocks, int atomic)
03700 {
03701   int i;
03702 
03703   for (i = 0; i < NUM_COMMON_SIZE; i++) {
03704     BlockOfMemory *block = blocks[i];
03705     
03706     for (; block; block = block->next) {
03707       unsigned long size = block->size;
03708       unsigned long start = block->start;
03709       unsigned long top = block->top;
03710       int j;
03711       
03712       for (j = 0; start < top; start += size, j++) {
03713        int bit = POS_TO_UNMARK_BIT(j);
03714        int pos = POS_TO_UNMARK_INDEX(j);
03715        if (IS_MARKED(block->free[pos] & bit)) {
03716          if (!atomic) {
03717            PUSH_COLLECT(start, start + size, 0);
03718          }
03719          mem_use += size;
03720          collect_trace_count += size;
03721        }
03722       }
03723     }
03724   }
03725 }
03726 
03727 #endif
03728 
03729 static void push_uncollectable_chunk(MemoryChunk *c, GC_Set *set)
03730 {
03731 #if ALLOW_TRACE_COUNT
03732   if (!collecting_with_trace_count
03733       || !c
03734       || !set->count_tracer) {
03735 #endif
03736     for (; c; c = c->next) {
03737 #if ALLOW_TRACE_COUNT
03738       if (!c->marked) {
03739        if (collecting_with_trace_count) {
03740          c->marked = 1;
03741          collect_trace_count += (c->end - c->start);
03742        }
03743        if (!set->atomic) {
03744 #endif      
03745          PUSH_COLLECT(c->start, c->end, 0);
03746 #if ALLOW_TRACE_COUNT
03747        }
03748       } else {
03749        /* It got marked the normal way; deduct the size. */
03750        mem_use -= (c->end - c->start);
03751       }
03752 #endif
03753     }
03754 #if ALLOW_TRACE_COUNT
03755   } else {
03756     int save_count = collect_trace_count;
03757     for (; c; c = c->next) {
03758       if (!c->marked) {
03759        void *s;
03760        c->marked = 1;
03761        collect_trace_count = 0;
03762        if (!c->atomic) {
03763          PUSH_COLLECT(c->start, c->end, 0);
03764          collect();
03765        }
03766        collect_trace_count += (c->end - c->start);
03767 
03768        s = INT_TO_PTR(c->start);
03769 #if PAD_BOUNDARY_BYTES
03770        s = PAD_FORWARD(s);
03771 #endif
03772        set->count_tracer(s, collect_trace_count);
03773        mem_traced += collect_trace_count;
03774       } else {
03775        /* It got marked the normal way; deduct the size. */
03776        mem_use -= (c->end - c->start);
03777       }
03778     }
03779     collect_trace_count = save_count;
03780   }
03781 #endif
03782 }
03783 
03784 static void push_uncollectable_common(BlockOfMemory **blocks, GC_Set *set)
03785 {
03786   int i;
03787 
03788 #if ALLOW_TRACE_COUNT
03789   if (!collecting_with_trace_count) {
03790 #endif
03791     for (i = 0; i < NUM_COMMON_SIZE; i++) {
03792       BlockOfMemory *block = blocks[i];
03793       
03794       while (block) {
03795        PUSH_COLLECT(block->start, block->top, 0);
03796        block = block->next;
03797       }
03798     }
03799 #if ALLOW_TRACE_COUNT
03800   } else {
03801     int save_count = collect_trace_count;
03802 
03803     for (i = 0; i < NUM_COMMON_SIZE; i++) {
03804       BlockOfMemory *block = blocks[i];
03805       
03806       while (block) {
03807        unsigned long size = block->size;
03808        unsigned long start = block->start;
03809        unsigned long top = block->top;
03810        int j;
03811        
03812        for (j = 0; start < top; start += size, j++) {
03813          int bit;
03814          int pos;
03815          int fbit;
03816 
03817          pos = POS_TO_UNMARK_INDEX(j);
03818          bit = POS_TO_UNMARK_BIT(j);
03819          fbit = POS_TO_FREE_BIT(j);
03820 
03821          if (NOT_MARKED(block->free[pos] & bit)
03822              && _NOT_FREE(block->free[pos] & fbit)) {
03823            block->free[pos] -= bit;
03824            if (set->count_tracer)
03825              collect_trace_count = 0;
03826            else
03827              collect_trace_count += size;
03828            if (!block->atomic) {
03829              PUSH_COLLECT(start, start + size, 0);
03830              collect();
03831            }
03832            if (set->count_tracer) {
03833              void *s;
03834              collect_trace_count += size;
03835              s = INT_TO_PTR(start);
03836 #if PAD_BOUNDARY_BYTES
03837              s = PAD_FORWARD(s);
03838 #endif
03839              set->count_tracer(s, collect_trace_count);
03840              mem_traced += collect_trace_count;
03841            }
03842          } else {
03843            /* It got marked the normal way; deduct the size. */
03844            mem_use -= size;
03845          }
03846        }
03847 
03848        block = block->next;
03849       }
03850     }
03851 
03852     if (set->count_tracer)
03853       collect_trace_count = save_count;
03854   }
03855 #endif
03856 }
03857 
03858 
03859 static void push_collect_ignore(unsigned long s, unsigned long e, 
03860                             unsigned long a)
03861 /* Like PUSH_COLLECT, but immediate references to `a' are avoided */
03862 {
03863   unsigned long push_from = s;
03864 
03865 #if PAD_BOUNDARY_BYTES
03866   a = PTR_TO_INT(PAD_FORWARD(INT_TO_PTR(a)));
03867 #endif
03868 
03869   for (; s < e; s += PTR_ALIGNMENT) {
03870     void *d = *(void **)INT_TO_PTR(s);
03871     unsigned long p = PTR_TO_INT(d);
03872 
03873     if (p == a) {
03874       if (push_from != s) {
03875        PUSH_COLLECT(push_from, s, a);
03876       }
03877       push_from = s + PTR_ALIGNMENT;
03878     }
03879   }
03880 
03881   if (push_from != s) {
03882     PUSH_COLLECT(push_from, s, a);
03883   }
03884 }
03885 
03886 static void mark_chunks_for_finalizations(MemoryChunk *c)
03887 {
03888   for (; c; c = c->next) {
03889     Finalizer *fn = c->finalizers;
03890 
03891     if (fn) {
03892       /* Always mark data associated with finalization: */
03893       unsigned long p = PTR_TO_INT(&fn->data);
03894       PUSH_COLLECT(p, p + PTR_SIZE, 0);
03895 
03896       /* If not eager, mark data reachable from finalized block: */
03897       if (!fn->eager_level && !c->marked && !c->atomic) {
03898        if (fn->ignore_self)
03899          push_collect_ignore(c->start, c->end, c->start);
03900        else {
03901          PUSH_COLLECT(c->start, c->end, 0);
03902        }
03903       }
03904     }
03905   }
03906 
03907   collect();
03908 }
03909 
03910 static void mark_common_for_finalizations(BlockOfMemory **blocks, int atomic)
03911 {
03912   int i;
03913 
03914   for (i = 0; i < NUM_COMMON_SIZE; i++) {
03915     BlockOfMemory *block = blocks[i];
03916     for (; block; block = block->next) {
03917       Finalizer *fn = block->finalizers;
03918       for (; fn ; fn = fn->next) {
03919        unsigned long p;
03920          
03921        /* Always mark data associated with finalization: */
03922        p = PTR_TO_INT(&fn->data);
03923        PUSH_COLLECT(p, p + PTR_SIZE, 0);
03924 
03925        /* If not eager, mark data reachable from finalized block: */
03926        if (!fn->eager_level) {
03927          int pos, apos;
03928          int bit, fbit;
03929 
03930          pos = fn->u.pos;
03931          apos = POS_TO_UNMARK_INDEX(pos);
03932          bit = POS_TO_UNMARK_BIT(pos);
03933          fbit = POS_TO_FREE_BIT(pos);
03934          
03935          if (NOT_MARKED(block->free[apos] & bit)
03936              && _NOT_FREE(block->free[apos] & fbit)) {
03937            int size = block->size;
03938            
03939            if (!atomic) {
03940              p = block->start + (pos * size);
03941              if (fn->ignore_self)
03942               push_collect_ignore(p, p + size, p);
03943              else {
03944               PUSH_COLLECT(p, p + size, 0);
03945              }
03946 
03947 #if WATCH_FOR_FINALIZATION_CYCLES
03948              collect();
03949              if (IS_MARKED(block->free[apos] & bit))
03950               FPRINTF(STDERR, "cycle: %lx\n", p);
03951 #endif
03952            }
03953          }
03954        }
03955       }
03956     }
03957   }
03958 
03959   collect();
03960 }
03961 
03962 static void enqueue_fn(Finalizer *fn)
03963 {
03964   /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */
03965 
03966   unsigned long p;
03967 
03968   num_queued_finalizers++;
03969 
03970   if (last_queued_finalizer) {
03971     fn->prev = last_queued_finalizer;
03972     fn->prev->next = fn;
03973     fn->next = NULL;
03974   } else {
03975     fn->next = queued_finalizers;
03976     if (fn->next)
03977       fn->next->prev = fn;
03978     queued_finalizers = fn;
03979   }
03980   last_queued_finalizer = fn;
03981 
03982   /* Need to mark watched as in-use, now: */
03983   /* (if this finalizer is eager, block contents are now marked too) */
03984   p = PTR_TO_INT(&fn->u.watch);
03985   PUSH_COLLECT(p, p + PTR_SIZE, 0);
03986 }
03987 
03988 static void queue_chunk_finalizeable(MemoryChunk *c, int eager_level)
03989 {
03990   /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */
03991 
03992   for (; c; c = c->next) {
03993     if (c->finalizers && !c->marked) {
03994       Finalizer *fn = c->finalizers;
03995 
03996       if (fn->eager_level == eager_level) {
03997        c->finalizers = NULL;
03998 
03999        fn->u.watch = INT_TO_PTR(c->start);
04000        enqueue_fn(fn);
04001 
04002        if (eager_level) {
04003          /* Always mark data associated with finalization: */
04004          unsigned long p = PTR_TO_INT(&fn->data);
04005          PUSH_COLLECT(p, p + PTR_SIZE, 0);
04006        }
04007       }
04008     }
04009   }
04010 }
04011 
04012 static void queue_common_finalizeable(BlockOfMemory **blocks, int eager_level)
04013 {
04014   /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */
04015 
04016   int i;
04017   
04018   for (i = 0; i < NUM_COMMON_SIZE; i++) {
04019     BlockOfMemory *block = blocks[i];
04020     for (; block; block = block->next) {
04021       Finalizer *fn = block->finalizers, *next;
04022       
04023       for (; fn; fn = next) {
04024        int pos, apos;
04025        int bit;
04026          
04027        next = fn->next;
04028 
04029        pos = fn->u.pos;
04030        apos = POS_TO_UNMARK_INDEX(pos);
04031        bit = POS_TO_UNMARK_BIT(pos);
04032        
04033        if (NOT_MARKED(block->free[apos] & bit)) {
04034          unsigned long p;
04035        
04036          if (fn->eager_level == eager_level) {
04037            if (fn->prev)
04038              fn->prev->next = fn->next;
04039            else
04040              block->finalizers = fn->next;
04041            if (fn->next)
04042              fn->next->prev = fn->prev;
04043            
04044            p = block->start + (pos * block->size);
04045            fn->u.watch = INT_TO_PTR(p);
04046            enqueue_fn(fn);
04047 
04048            if (eager_level) {
04049              /* Always mark data associated with finalization: */
04050              p = PTR_TO_INT(&fn->data);
04051              PUSH_COLLECT(p, p + PTR_SIZE, 0);
04052            }
04053          }
04054        }
04055       }
04056     }
04057   }
04058 }
04059 
04060 static void do_disappearing(DisappearingLink **disappearing_ptr)
04061 {
04062   DisappearingLink *dl, *next, *disappearing;
04063   void *watch;
04064   int size;
04065 
04066   disappearing = *disappearing_ptr;
04067 
04068   for (dl = disappearing; dl; dl = next) {
04069     next = dl->next;
04070       
04071     watch = (dl->watch ? dl->watch : *dl->disappear);
04072     
04073     size = 0;
04074     if (watch && !find_ptr(watch, &size, NULL, NULL, NULL, 0)) {
04075       /* was the pointer allocated at all? */
04076       if (size) {
04077        /* It was allocated, and now it's gone: */
04078        if (dl->kind != dl_restored) {
04079          *dl->disappear = NULL;
04080          /* disappear is done */
04081          if (dl->prev)
04082            dl->prev->next = dl->next;
04083          else
04084            disappearing = dl->next;
04085          if (dl->next)
04086            dl->next->prev = dl->prev;
04087          
04088          mem_real_use -= sizeof(DisappearingLink);
04089          free_managed(dl);
04090          --GC_dl_entries;
04091        } else {
04092          /* We'll need to restore this one: */
04093          dl->saved_value = *dl->disappear;
04094          *dl->disappear = NULL;
04095        }
04096       }
04097     }
04098   }
04099 
04100   *disappearing_ptr = disappearing;
04101 }
04102 
04103 static void trim_disappearing(DisappearingLink **disappearing_ptr)
04104 {
04105   DisappearingLink *dl, *next, *disappearing;
04106 
04107   disappearing = *disappearing_ptr;
04108 
04109   for (dl = disappearing; dl; dl = next) {
04110     int size;
04111 
04112     next = dl->next;
04113     
04114     size = 0;
04115     if (!find_ptr(dl->disappear, &size, NULL, NULL, NULL, 0) && size) {
04116       /* Found it, but it was unmarked. Deregister disappearing. */
04117       if (dl->prev)
04118        dl->prev->next = dl->next;
04119       else
04120        disappearing = dl->next;
04121       if (dl->next)
04122        dl->next->prev = dl->prev;
04123 
04124       mem_real_use -= sizeof(DisappearingLink);
04125       free_managed(dl);
04126       --GC_dl_entries;
04127     }
04128   }
04129 
04130   *disappearing_ptr = disappearing;
04131 }
04132 
04133 static void do_disappear_and_finals()
04134 {
04135   DisappearingLink *dl, *next;
04136   Finalizer *fn;
04137   int j;
04138 
04139   /* Mark data in (not-yet-finalized) queued finalizable */
04140   for (fn = queued_finalizers; fn; fn = fn->next) {
04141     unsigned long p;
04142 
04143     p = PTR_TO_INT(&fn->u.watch);
04144     PUSH_COLLECT(p, p + PTR_SIZE, 0);
04145 
04146     p = PTR_TO_INT(&fn->data);
04147     PUSH_COLLECT(p, p + PTR_SIZE, 0);
04148   }
04149   collect();
04150   if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }
04151 
04152 #if !NO_DISAPPEARING
04153   /* Do disappearing: */
04154   do_disappearing(&disappearing);
04155 #endif
04156 
04157   /* Queue unreachable eager finalizable, level 1: */  
04158   /* DO NOT COLLECT FROM collect_stack UNTIL AFTER THIS LOOP */
04159   /* (Otherwise, some ready eager finalizations may not be queued.) */
04160   for (j = 0; j < num_common_sets; j++) {
04161     queue_chunk_finalizeable(*(common_sets[j]->othersptr), 1);
04162     queue_common_finalizeable(common_sets[j]->blocks, 1);
04163   }
04164   collect();
04165   if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }
04166 
04167   /* Queue unreachable eager finalizable, level 2: */  
04168   /* DO NOT COLLECT FROM collect_stack UNTIL AFTER THIS LOOP */
04169   for (j = 0; j < num_common_sets; j++) {
04170     queue_chunk_finalizeable(*(common_sets[j]->othersptr), 2);
04171     queue_common_finalizeable(common_sets[j]->blocks, 2);
04172   }
04173   collect();
04174   if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }
04175 
04176   /* Mark reachable from (non-eager) finalized blocks: */
04177   for (j = 0; j < num_common_sets; j++) {
04178     mark_chunks_for_finalizations(*(common_sets[j]->othersptr));
04179     mark_common_for_finalizations(common_sets[j]->blocks, common_sets[j]->atomic);
04180   }
04181 
04182   /* Queue unreachable (non-eager) finalizable: */  
04183   for (j = 0; j < num_common_sets; j++) {
04184     queue_chunk_finalizeable(*(common_sets[j]->othersptr), 0);
04185     queue_common_finalizeable(common_sets[j]->blocks, 0);
04186   }
04187   collect();
04188 
04189   /* Restore disappeared links where watch value is NULL: */
04190   for (dl = disappearing; dl; dl = next) {
04191     next = dl->next;
04192     if ((dl->kind == dl_restored) && dl->saved_value) {
04193       /* Restore disappearing value and deregister */
04194       *dl->disappear = dl->saved_value;
04195       dl->saved_value = NULL;
04196     }
04197   }
04198 
04199   if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }
04200 
04201   /* Deregister dangling disappearings: */
04202   trim_disappearing(&disappearing);
04203   trim_disappearing(&late_disappearing);
04204 
04205 #if !NO_DISAPPEARING
04206   /* Do late disappearing: */
04207   do_disappearing(&late_disappearing);
04208 #endif
04209 
04210   if (GC_custom_finalize)
04211     GC_custom_finalize();
04212 }
04213 
04214 static int compare_roots(const void *a, const void *b)
04215 {
04216   if (*(unsigned long *)a < *(unsigned long *)b)
04217     return -1;
04218   else
04219     return 1;
04220 }
04221 
04222 static void sort_and_merge_roots()
04223 {
04224   static int counter = 0;
04225   int i, offset, top;
04226 
04227   if (roots_count < 4)
04228     return;
04229 
04230   /* Only try this every 5 collections or so: */
04231   if (counter--)
04232     return;
04233   counter = 5;
04234 
04235   qsort(roots, roots_count >> 1, 2 * sizeof(unsigned long), compare_roots);
04236   offset = 0;
04237   top = roots_count;
04238   for (i = 2; i < top; i += 2) {
04239     if ((roots[i - 2 - offset] <= roots[i])
04240        && ((roots[i - 1 - offset] + (PTR_ALIGNMENT - 1)) >= roots[i])) {
04241       /* merge: */
04242       if (roots[i + 1] > roots[i - 1 - offset])
04243        roots[i - 1 - offset] = roots[i + 1];
04244       offset += 2;
04245       roots_count -= 2;
04246     } else if (offset) {
04247       /* compact: */
04248       roots[i - offset] = roots[i];
04249       roots[i + 1 - offset] = roots[i + 1];
04250     }
04251   }
04252 }
04253 
04254 static void run_finalizers(void)
04255 {
04256   static int doing = 0;
04257   Finalizer *fn;
04258   void *s;
04259 
04260   /* don't allow nested finalizations */
04261   if (doing)
04262     return;
04263   doing++;
04264 
04265 #if !NO_FINALIZING
04266   while (queued_finalizers) {
04267     fn = queued_finalizers;
04268     queued_finalizers = fn->next;
04269     if (!fn->next)
04270       last_queued_finalizer = NULL;
04271 
04272     --num_queued_finalizers;
04273 
04274     s = fn->u.watch;
04275     
04276 #if PAD_BOUNDARY_BYTES
04277     s = PAD_FORWARD(s);
04278 #endif
04279 
04280     fn->f(s, fn->data);
04281 
04282     mem_real_use -= sizeof(Finalizer);
04283     free_managed(fn);
04284     --GC_fo_entries;
04285   }
04286 #endif
04287 
04288   doing--;
04289 }
04290 
04291 #if ALLOW_TRACE_COUNT
04292 static int traced_from_roots, traced_from_stack, traced_from_uncollectable, traced_from_finals;
04293 #endif
04294 
04295 #if 0
04296 extern long scheme_get_milliseconds(void);
04297 # define GETTIME() scheme_get_milliseconds()
04298 #else
04299 extern long scheme_get_process_milliseconds(void);
04300 # define GETTIME() scheme_get_process_milliseconds()
04301 #endif
04302 
04303 #if TIME
04304 # define PRINTTIME(x) FPRINTF x
04305 static long started, rightnow, old;
04306 # define INITTIME() (started = GETTIME())
04307 # define GETTIMEREL() (rightnow = GETTIME(), old = started, started = rightnow, rightnow - old)
04308 #else
04309 # define INITTIME() /* empty */
04310 # define PRINTTIME(x) /* empty */
04311 #endif
04312 
04313 /* Immitate Boehm's private GC call; used by MzScheme */
04314 void GC_push_all_stack(void *sp, void *ep)
04315 {
04316   unsigned long s, e;
04317 
04318   s = PTR_TO_INT(sp);
04319   e = PTR_TO_INT(ep);
04320 
04321   PUSH_COLLECT(s, e, 0);
04322 
04323   prepare_stack_collect();
04324 }
04325 
04326 void GC_flush_mark_stack()
04327 {
04328   collect();  
04329 }
04330 
04331 #if PRINT_INFO_PER_GC
04332 static long last_gc_end;
04333 #endif
04334 
04335 static void do_GC_gcollect(void *stack_now)
04336 {
04337   long root_marked;
04338   int j;
04339 
04340 #if PRINT_INFO_PER_GC
04341   long orig_mem_use = mem_use;
04342   long start_time;
04343   start_time = GETTIME();
04344   FPRINTF(STDERR, "gc at %ld (%ld): %ld after %ld msecs\n",
04345          mem_use, sector_mem_use, 
04346 # if GET_MEM_VIA_SBRK
04347          (long)sbrk(0),
04348 # elif defined(WIN32) && AUTO_STATIC_ROOTS_IF_POSSIBLE
04349          total_memory_use(),
04350 # else
04351          (long)0,
04352 # endif
04353          start_time - last_gc_end);
04354 # if SHOW_SECTOR_MAPS_AT_GC
04355   dump_sector_map("");
04356 # endif
04357 #endif
04358 
04359   if (!GC_stackbottom) {
04360     /* Stack position not yet initialized; delay collection */
04361     if (mem_use)
04362       mem_limit = MEM_USE_FACTOR * mem_use;
04363     return;
04364   }
04365 
04366   if (!initialized)
04367     GC_initialize();
04368 
04369   if (!statics_setup)
04370     init_static_variables();
04371 
04372   if (GC_collect_start_callback)
04373     GC_collect_start_callback();
04374 
04375 #if CHECK_COLLECTING
04376   collecting_now = 1;
04377 #endif
04378 
04379 #if !NO_COLLECTIONS
04380 
04381 # if ALWAYS_TRACE && ALLOW_TRACE_COUNT
04382   collecting_with_trace_count = 1;
04383 # endif
04384 
04385 # if CHECK
04386   cmn_count = chk_count = 0;
04387 # endif
04388 
04389   INITTIME();
04390   PRINTTIME((STDERR, "gc: init start: %ld\n", GETTIMEREL()));
04391 
04392   for (j = 0; j < num_common_sets; j++) {
04393 # if ALLOW_SET_LOCKING
04394     if (!common_sets[j]->locked) {
04395 # endif
04396       collect_init_chunk(*(common_sets[j]->othersptr),
04397                       common_sets[j]->uncollectable,
04398                       j);
04399       collect_init_common(common_sets[j]->blocks,
04400                        common_sets[j]->uncollectable,
04401                        j);
04402 # if ALLOW_SET_LOCKING
04403     }
04404 # endif
04405   }
04406 
04407 # if CHECK
04408   if (num_chunks != chk_count) {
04409     FPRINTF(STDERR, "bad chunk count: %ld != %ld\n", num_chunks, chk_count);
04410   }
04411 
04412   if (num_blocks != cmn_count) {
04413     FPRINTF(STDERR, "bad block count: %ld != %ld\n", num_blocks, cmn_count);
04414   }
04415 # endif
04416 
04417 # if PRINT
04418   FPRINTF(STDERR, "gc at %ld (%ld)\n", mem_use, mem_real_use);
04419   FPRINTF(STDERR,
04420          "low: %lx hi: %lx blocks: %ld chunks: %ld\n", 
04421          low_plausible, high_plausible, 
04422          num_blocks, num_chunks);
04423 # endif
04424 
04425   mem_use = 0;
04426 
04427   sort_and_merge_roots();
04428 
04429 # if ALLOW_TRACE_COUNT
04430   init_trace_stack(&collect_trace_stack);
04431   init_trace_stack(&collect_wait_trace_stack);
04432   collect_start_tracing = 0;
04433   collect_end_tracing = -1;
04434 # endif
04435 # if ALLOW_TRACE_PATH
04436   init_trace_stack(&collect_trace_path_stack);
04437 # endif
04438 
04439   prepare_collect_temp();
04440 
04441   /*** Mark from roots ***/
04442   collect_stack_size = roots_count ? COLLECT_STACK_FRAME_SIZE * roots_count : 10;
04443   if (collect_stack_size < INITIAL_COLLECT_STACK_SIZE)
04444     collect_stack_size = INITIAL_COLLECT_STACK_SIZE;
04445   collect_stack_count = 0;
04446   collect_stack = (unsigned long *)realloc_collect_temp(NULL,
04447                                                  0,
04448                                                  sizeof(unsigned long) 
04449                                                  * (collect_stack_size + 2));
04450 
04451   for (j = 0; j < roots_count; j += 2) {
04452     collect_stack[collect_stack_count++] = roots[j];
04453     collect_stack[collect_stack_count++] = roots[j + 1];
04454 # if KEEP_DETAIL_PATH
04455     collect_stack[collect_stack_count++] = 0;
04456 # endif
04457   }
04458 
04459   if (GC_initial_trace_root) {
04460 # if CHECK_SKIP_MARK_AT_FIRST
04461     collect_start_disable_mark_skip = collect_stack_count;
04462     skip_mark_at_first = GC_inital_root_skip;
04463 # endif
04464     collect_stack[collect_stack_count++] = (unsigned long)&GC_initial_trace_root;
04465     collect_stack[collect_stack_count++] = ((unsigned long)&GC_initial_trace_root) + 1;
04466 # if KEEP_DETAIL_PATH
04467     collect_stack[collect_stack_count++] = 0;
04468 # endif
04469   }
04470 
04471   PRINTTIME((STDERR, "gc: root collect start: %ld\n", GETTIMEREL()));
04472 
04473 # if ALLOW_TRACE_COUNT
04474   collect_trace_count = 0;
04475   mem_traced = 0;
04476 # endif
04477 
04478 # if ALLOW_TRACE_PATH
04479   current_trace_source = "root";
04480 # endif
04481 
04482   collect();
04483 
04484 # if ALLOW_SET_LOCKING
04485   for (j = 0; j < num_common_sets; j++) {
04486     if (common_sets[j]->locked) {
04487       int a = common_sets[j]->atomic;
04488       push_locked_chunk(*(common_sets[j]->othersptr), a);
04489       push_locked_common(common_sets[j]->blocks, a);
04490     }
04491   }
04492 
04493   collect();
04494 # endif
04495 
04496 # if ALLOW_TRACE_COUNT
04497   traced_from_roots = collect_trace_count;
04498   collect_trace_count = 0;
04499 # endif
04500 
04501   root_marked = mem_use;
04502 
04503   PRINTTIME((STDERR, "gc: stack push start: %ld\n", GETTIMEREL()));
04504 
04505   /*** Mark from stack ***/
04506   push_stack(stack_now);
04507   
04508 # if PRINT && 0
04509   FPRINTF(STDERR, "stack until: %ld\n", collect_end_stackbased);
04510 # endif
04511 
04512 # if ALLOW_TRACE_PATH
04513   current_trace_source = "stack";
04514 # endif
04515 
04516   PRINTTIME((STDERR, "gc: stack collect start: %ld\n", GETTIMEREL()));
04517 
04518   collect();
04519 
04520 # if ALLOW_TRACE_COUNT
04521   traced_from_stack = collect_trace_count;
04522   collect_trace_count = 0;
04523 # endif
04524 
04525   PRINTTIME((STDERR, "gc: uncollectable start: %ld\n", GETTIMEREL()));
04526 
04527   /*** Uncollectable and pointerful ***/
04528   for (j = 0; j < num_common_sets; j++)
04529     if (common_sets[j]->uncollectable)
04530       if (!common_sets[j]->atomic
04531 # if ALLOW_TRACE_COUNT
04532          || collecting_with_trace_count
04533 # endif
04534          ) {
04535        push_uncollectable_chunk(*(common_sets[j]->othersptr), common_sets[j]);
04536        push_uncollectable_common(common_sets[j]->blocks, common_sets[j]);
04537       }
04538 
04539 # if ALLOW_TRACE_PATH
04540   current_trace_source = "uncollectable";
04541 # endif
04542 
04543   collect();
04544 
04545 # if ALLOW_TRACE_COUNT
04546   traced_from_uncollectable = collect_trace_count;
04547   collect_trace_count = 0;
04548 # endif
04549 
04550 # if ALLOW_TRACE_PATH
04551   /* External stacks may collect eagerly: */
04552   current_trace_source = "xstack";
04553 # endif
04554 
04555   if (GC_push_last_roots) {
04556     PRINTTIME((STDERR, "gc: last roots push start: %ld\n", GETTIMEREL()));
04557     /*** ``Last'' roots external hook ***/
04558     GC_push_last_roots();
04559     PRINTTIME((STDERR, "gc: last roots start: %ld\n", GETTIMEREL()));
04560   }
04561 
04562 # if ALLOW_TRACE_PATH
04563   current_trace_source = "xstack";
04564 # endif
04565 
04566   collect();
04567   
04568 # if ALLOW_TRACE_COUNT
04569   /* Count this as stack tracing */
04570   traced_from_stack += collect_trace_count;
04571   collect_trace_count = 0;
04572 # endif
04573   
04574   PRINTTIME((STDERR, "gc: queue finalize start: %ld\n", GETTIMEREL()));
04575 
04576 # if ALLOW_TRACE_PATH
04577   current_trace_source = "finalization";
04578 # endif
04579 
04580   /*** Disappearing Links and Finalization ***/
04581   do_disappear_and_finals();
04582 
04583 # if ALLOW_TRACE_COUNT
04584   traced_from_finals = collect_trace_count;
04585 # endif
04586 
04587   PRINTTIME((STDERR, "gc: finish start: %ld\n", GETTIMEREL()));
04588 
04589   low_plausible = high_plausible = 0;
04590 
04591   for (j = 0; j < num_common_sets; j++) {
04592     FINISH_STATISTIC(num_finishes_stat++);
04593     collect_finish_chunk(common_sets[j]->othersptr, common_sets[j]);
04594     collect_finish_common(common_sets[j]->blocks, 
04595                        common_sets[j]->block_ends,
04596                        common_sets[j]);
04597   }
04598 
04599   PRINTTIME((STDERR, "gc: all done: %ld\n", GETTIMEREL()));
04600 
04601 # if PRINT
04602   FPRINTF(STDERR,
04603          "done %ld (%ld), %ld from stack\n", mem_use, mem_real_use,
04604          mem_use - root_marked);
04605 # endif
04606 
04607   if (mem_use) {
04608 # if USE_GC_FREE_SPACE_DIVISOR
04609     long root_size;
04610 
04611     if (roots_count)
04612       root_size = roots[1] - roots[0];
04613     else
04614       root_size = 0;
04615 
04616     mem_limit = mem_use + ((sector_mem_use + root_size) / GC_free_space_divisor);
04617 # else
04618     mem_limit = MEM_USE_FACTOR * mem_use;
04619 # endif
04620   }
04621 
04622   free_collect_temp(collect_stack, sizeof(unsigned long) * (collect_stack_size + 1));
04623 
04624 # if ALLOW_TRACE_COUNT
04625   done_trace_stack(&collect_trace_stack);
04626   done_trace_stack(&collect_wait_trace_stack);
04627 # endif
04628 # if ALLOW_TRACE_PATH
04629   done_trace_stack(&collect_trace_path_stack);
04630 # endif
04631 
04632 #else
04633   if (mem_use)
04634     mem_limit = MEM_USE_FACTOR * mem_use;
04635 #endif
04636 
04637 #if PRINT_INFO_PER_GC
04638   FPRINTF(STDERR, "done  %ld (%ld); recovered %ld in %ld msecs\n",
04639          mem_use, sector_mem_use, orig_mem_use - mem_use,
04640          (long)GETTIME() - start_time);
04641 # if SHOW_SECTOR_MAPS_AT_GC
04642   dump_sector_map("                            ");
04643 # endif
04644   last_gc_end = GETTIME();
04645 #endif
04646 
04647 #if STAMP_AND_REMEMBER_SOURCE
04648   stamp_clock++;
04649 #endif
04650 
04651 #if CHECK_COLLECTING
04652   collecting_now = 0;
04653 #endif
04654 
04655   if (GC_collect_end_callback)
04656     GC_collect_end_callback();
04657 
04658   /* Run queued finalizers. Garbage collections may happen: */
04659   PRINTTIME((STDERR, "gc: finalize start: %ld\n", GETTIMEREL()));
04660   run_finalizers();
04661   PRINTTIME((STDERR, "gc: finalize end: %ld\n", GETTIMEREL()));
04662 
04663 #if MARK_STATS
04664   fprintf(STDERR, 
04665          "mark stats:\n"
04666          " %d pairs\n"
04667          " %d lookups\n"
04668          "   %d interior\n"
04669          "   %d plausible\n"
04670          "     %d paged\n"
04671          "       %d block page\n"
04672          "         %d block\n"
04673          "           %d block aligned\n"
04674          "             %d block mark\n"
04675          "               %d block pushes\n"
04676          "                 %d block tail pushes\n"
04677          "       %d chunk page\n"
04678          "         %d chunk mark\n",
04679          num_pairs_stat,
04680          num_checks_stat,
04681          num_interior_checks_stat,
04682          num_plausibles_stat,
04683          num_pages_stat,
04684          num_blocks_stat,
04685          num_blockallocs_stat,
04686          num_blockaligns_stat,
04687          num_blockmarks_stat,
04688          num_blockpushes_stat,
04689          num_blockpushes_tail_stat,
04690          num_chunks_stat,
04691          num_chunkmarks_stat);
04692 #endif
04693 #if ALLOC_STATS
04694   fprintf(STDERR, 
04695          "alloc stats:\n"
04696          " %d allocs\n"
04697          "   %d nonzero allocs\n"
04698          "   %d common allocs\n"
04699          "     %d common tries\n"
04700          "     %d common fails\n"
04701          "     %d common second tries\n"
04702          "     %d common newblocks\n"
04703          "   %d chunk allocs\n",
04704          num_allocs_stat,
04705          num_nonzero_allocs_stat,
04706          num_common_allocs_stat,
04707          num_block_alloc_checks_stat,
04708          num_block_alloc_nexts_stat,
04709          num_block_alloc_second_checks_stat,
04710          num_newblock_allocs_stat,
04711          num_chunk_allocs_stat);
04712 #endif
04713 #if FINISH_STATS
04714   fprintf(STDERR,
04715          "finish stats:\n"
04716          " %d finishes\n"
04717          "  %d chunk finishes\n"
04718          "   %d chunk keep finishes\n"
04719          "   %d chunk free finishes\n"
04720          "  %d block finishes\n"
04721          "   %d block filter steps\n"
04722          "   %d block keep finishes\n"
04723          "   %d block free finishes\n"
04724          "   %d block adjust finishes\n",
04725          num_finishes_stat,
04726          num_finish_chunk_stat,
04727          num_finish_chunkkeep_stat,
04728          num_finish_chunkfree_stat,
04729          num_finish_block_stat,
04730          num_finish_blockfiltercycles_stat,
04731          num_finish_blockkeep_stat,
04732          num_finish_blockfree_stat,
04733          num_finish_blockadjust_stat);
04734 #endif
04735 }
04736 
04737 void GC_gcollect(void)
04738 {
04739   long dummy;
04740 
04741   if (!sector_mem_use)
04742     return;
04743 
04744   FLUSH_REGISTER_WINDOWS;
04745   if (!setjmp(buf))
04746     do_GC_gcollect((void *)&dummy);
04747 }
04748 
04749 int GC_trace_count(int *stack, int *roots, int *uncollectable, int *final)
04750 {
04751 #if ALLOW_TRACE_COUNT
04752   int j;
04753 
04754   if (!sector_mem_use)
04755     return 0;
04756 
04757   for (j = 0; j < num_common_sets; j++) {
04758     if (common_sets[j]->trace_init)
04759       common_sets[j]->trace_init();
04760   }
04761 
04762   collecting_with_trace_count = 1;
04763   GC_gcollect();
04764   collecting_with_trace_count = 0;
04765 
04766   if (stack)
04767     *stack = traced_from_stack;
04768   if (roots)
04769     *roots = traced_from_roots;
04770   if (uncollectable)
04771     *uncollectable = traced_from_uncollectable;
04772   if (final)
04773     *final = traced_from_finals;
04774 
04775   for (j = 0; j < num_common_sets; j++) {
04776     if (common_sets[j]->trace_done)
04777       common_sets[j]->trace_done();
04778   }
04779 
04780   return mem_traced;
04781 #else
04782   return 0;
04783 #endif
04784 }
04785 
04786 void GC_trace_path(void)
04787 {
04788 #if ALLOW_TRACE_PATH
04789   int j;
04790 
04791   if (!sector_mem_use)
04792     return;
04793 
04794   for (j = 0; j < num_common_sets; j++) {
04795     if (common_sets[j]->trace_init)
04796       common_sets[j]->trace_init();
04797   }
04798 
04799   trace_path_buffer_pos = 0;
04800 
04801   collecting_with_trace_path = 1;
04802   GC_gcollect();
04803   collecting_with_trace_path = 0;
04804 
04805   for (j = 0; j < num_common_sets; j++) {
04806     if (common_sets[j]->trace_done)
04807       common_sets[j]->trace_done();
04808   }
04809 #endif
04810 }
04811 
04812 void GC_store_path(void *v, unsigned long src, void *path_data)
04813 {
04814   /* Note: a trace path of the form X->X->Y->Z->... (with two Xs)
04815      indicates an off-by-one stack source. */
04816 #if ALLOW_TRACE_PATH
04817   TraceStack *s = (TraceStack *)path_data;
04818   int len, i;
04819 
04820   if (trace_path_buffer_pos < 0)
04821     return;
04822 
04823   len = s->count / 3;
04824   if (len * 2 + 3 > (TRACE_PATH_BUFFER_SIZE - trace_path_buffer_pos - 7)) {
04825     trace_path_buffer[trace_path_buffer_pos++] = (void *)2;
04826     trace_path_buffer[trace_path_buffer_pos++] = "truncated";
04827     trace_path_buffer[trace_path_buffer_pos++] = 0;
04828     trace_path_buffer[trace_path_buffer_pos++] = v; /* already padded */
04829     trace_path_buffer[trace_path_buffer_pos++] = 0;
04830     trace_path_buffer[trace_path_buffer_pos] = 0;
04831     trace_path_buffer_pos = -1;
04832     return;
04833   }
04834 
04835   if (len) {
04836     unsigned long prev = 0;
04837 
04838     trace_path_buffer[trace_path_buffer_pos++] = (void *)(len + 2);
04839     trace_path_buffer[trace_path_buffer_pos++] = current_trace_source;
04840     trace_path_buffer[trace_path_buffer_pos++] = 0;
04841     for (i = 1; len--; i += 3) {
04842       trace_path_buffer[trace_path_buffer_pos++] = (void *)PAD_FORWARD(s->stack[i]);
04843       trace_path_buffer[trace_path_buffer_pos++] = 0; /* reset on next iteration */
04844 
04845       if (i > 1) {
04846        /* See if we have offset information in the original trace info.
04847           (It might be missing because KEEP_DETAIL might be turned off, or
04848             PUSH_COLLECT had 0 for its third argument.) */
04849        unsigned long diff;
04850        if (s->stack[i + 1])
04851          diff = ((unsigned long)s->stack[i + 1]) - prev;
04852        else
04853          diff = 0;
04854        trace_path_buffer[trace_path_buffer_pos - 3] = (void *)diff;
04855       }
04856       prev = (unsigned long)s->stack[i];
04857     }
04858 
04859     trace_path_buffer[trace_path_buffer_pos - 1] = (void *)(src - prev);
04860 
04861     trace_path_buffer[trace_path_buffer_pos++] = v; /* already padded */
04862     trace_path_buffer[trace_path_buffer_pos++] = 0;
04863     trace_path_buffer[trace_path_buffer_pos] = 0;
04864   }
04865 #endif
04866 }
04867 
04868 void **GC_get_next_path(void **prev, int *len)
04869 {
04870 #if ALLOW_TRACE_PATH
04871   void **p;
04872 
04873   if (!prev)
04874     p = trace_path_buffer;
04875   else
04876     p = prev + (2 * (((long *)prev)[-1]));
04877     
04878   *len = *(long *)p;
04879   if (!*len)
04880     return NULL;
04881 
04882   return p + 1;
04883 #else
04884   return NULL;
04885 #endif
04886 }
04887 
04888 void GC_clear_paths(void)
04889 {
04890 #if ALLOW_TRACE_PATH
04891   int i;
04892 
04893   for (i = 0; i < TRACE_PATH_BUFFER_SIZE; i++)
04894     trace_path_buffer[i] = NULL;
04895 #endif
04896 }
04897 
04898 /**********************************************************************/
04899 
04900 #if FPRINTF_USE_PRIM_STRINGOUT
04901 
04902 #if PRIM_STRINGOUT_AS_FWRITE
04903 void GC_prim_stringout(char *s, int len)
04904 {
04905   fwrite(s, len, 1, stderr);
04906 }
04907 #else
04908 # if PRIM_STRINGOUT_AS_WINDOWS_CONSOLE
04909 void GC_prim_stringout(char *s, int len)
04910 {
04911   static HANDLE console;
04912   DWORD wrote;
04913 
04914   if (!console) {
04915        COORD size;
04916     AllocConsole();
04917     console = GetStdHandle(STD_OUTPUT_HANDLE);
04918        size.X = 90;
04919        size.Y = 500;
04920        SetConsoleScreenBufferSize(console, size);
04921   }
04922 
04923   WriteConsole(console, s, len, &wrote, NULL);
04924 }
04925 # else
04926 extern void GC_prim_stringout(char *s, int len);
04927 # endif
04928 #endif
04929 
04930 #include <stdarg.h>
04931 #include <ctype.h>
04932 
04933 #define NP_BUFSIZE 512
04934 
04935 /* Non-allocating printf. */
04936 static void sgc_fprintf(int ignored, const char *c, ...)
04937 {
04938   char buffer[NP_BUFSIZE];
04939   int pos;
04940   va_list args;
04941 
04942   va_start(args, c);
04943 
04944   pos = 0;
04945   while (*c) {
04946     if (*c == '%') {
04947       int len = -1, slen;
04948       int islong = 0;
04949       char *s;
04950 
04951       if (pos) {
04952        GC_prim_stringout(buffer, pos);
04953        pos = 0;
04954       }
04955 
04956       c++;
04957       if (isdigit(*c)) {
04958        len = 0;
04959        while (isdigit(*c)) {
04960          len = (len * 10) + (*c - '0');
04961          c++;
04962        }
04963       }
04964 
04965       if (*c == 'l') {
04966        islong = 1;
04967        c++;
04968       }
04969       
04970       switch (*c) {
04971       case 'd':
04972       case 'x':
04973        {
04974          long v;
04975          int d, i;
04976 
04977          if (islong) {
04978            v = va_arg(args, long);
04979          } else {
04980            v = va_arg(args, int);
04981          }
04982          
04983          if (!v) {
04984            s = "0";
04985            slen = 1;
04986          } else {
04987            int neg = 0;
04988 
04989            i = NP_BUFSIZE - 2;
04990            
04991            if (v < 0) {
04992              neg = 1;
04993              v = -v;
04994            }
04995 
04996            d = (((*c) == 'd') ? 10 : 16);
04997            while (v) {
04998              int digit = (v % d);
04999              if (digit < 10)
05000               digit += '0';
05001              else
05002               digit += 'a' - 10;
05003              buffer[i--] = digit;
05004              v = v / d;
05005            }
05006            if (neg)
05007              buffer[i--] = '-';
05008 
05009            s = buffer + i + 1;
05010            slen = (NP_BUFSIZE - 2) - i;
05011          }
05012        }
05013        break;
05014       case 's':
05015        s = va_arg(args, char*);
05016        slen = strlen(s);
05017        break;
05018       default:
05019        s = "???";
05020        slen = 3;
05021        break;
05022       }
05023 
05024       c++;
05025 
05026       if (len != -1) {
05027        if (slen > len)
05028          slen = len;
05029        else {
05030          int i;
05031          for (i = slen; i < len; i++)
05032            GC_prim_stringout(" ", 1);
05033        }
05034       }
05035       
05036       if (slen)
05037        GC_prim_stringout(s, slen);
05038     } else {
05039       if (pos == (NP_BUFSIZE - 1)) {
05040        GC_prim_stringout(buffer, pos);
05041        pos = 0;
05042       }
05043       buffer[pos++] = *(c++);
05044     }
05045   }
05046 
05047   if (pos)
05048     GC_prim_stringout(buffer, pos);
05049 
05050   /* Suggest a flush: */
05051   GC_prim_stringout(NULL, 0);
05052 
05053   va_end(args);
05054 }
05055 
05056 #endif
05057 
05058