Back to index

plt-scheme  4.2.1
mark.c
Go to the documentation of this file.
00001 
00002 /*
00003  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
00004  * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
00005  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
00006  *
00007  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00008  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00009  *
00010  * Permission is hereby granted to use or copy this program
00011  * for any purpose,  provided the above notices are retained on all copies.
00012  * Permission to modify the code and to distribute modified code is granted,
00013  * provided the above notices are retained, and a notice that the code was
00014  * modified is included with the above copyright notice.
00015  *
00016  */
00017 
00018 
00019 # include <stdio.h>
00020 # include "private/gc_pmark.h"
00021 
00022 #if defined(MSWIN32) && defined(__GNUC__)
00023 # include <excpt.h>
00024 #endif
00025 
00026 /* We put this here to minimize the risk of inlining. */
00027 /*VARARGS*/
00028 #ifdef __WATCOMC__
00029   void GC_noop(void *p, ...) {}
00030 #else
00031   void GC_noop() {}
00032 #endif
00033 
00034 /* Single argument version, robust against whole program analysis. */
00035 void GC_noop1(x)
00036 word x;
00037 {
00038     static VOLATILE word sink;
00039 
00040     sink = x;
00041 }
00042 
00043 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
00044 
00045 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
00046 
00047 /* Initialize GC_obj_kinds properly and standard free lists properly.        */
00048 /* This must be done statically since they may be accessed before     */
00049 /* GC_init is called.                                                 */
00050 /* It's done here, since we need to deal with mark descriptors.              */
00051 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
00052 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
00053               0 | GC_DS_LENGTH, FALSE, FALSE },
00054 /* NORMAL  */ { &GC_objfreelist[0], 0,
00055               0 | GC_DS_LENGTH,  /* Adjusted in GC_init_inner for EXTRA_BYTES */
00056               TRUE /* add length to descr */, TRUE },
00057 /* UNCOLLECTABLE */
00058              { &GC_uobjfreelist[0], 0,
00059               0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
00060 # ifdef ATOMIC_UNCOLLECTABLE
00061    /* AUNCOLLECTABLE */
00062              { &GC_auobjfreelist[0], 0,
00063               0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
00064 # endif
00065 # ifdef STUBBORN_ALLOC
00066 /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
00067               0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
00068 # endif
00069 };
00070 
00071 # ifdef ATOMIC_UNCOLLECTABLE
00072 #   ifdef STUBBORN_ALLOC
00073       int GC_n_kinds = 5;
00074 #   else
00075       int GC_n_kinds = 4;
00076 #   endif
00077 # else
00078 #   ifdef STUBBORN_ALLOC
00079       int GC_n_kinds = 4;
00080 #   else
00081       int GC_n_kinds = 3;
00082 #   endif
00083 # endif
00084 
00085 
00086 # ifndef INITIAL_MARK_STACK_SIZE
00087 #   define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
00088               /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a    */
00089               /* multiple of HBLKSIZE.                         */
00090               /* The incremental collector actually likes a larger    */
00091               /* size, since it want to push all marked dirty objs    */
00092               /* before marking anything new.  Currently we let it    */
00093               /* grow dynamically.                             */
00094 # endif
00095 
00096 /*
00097  * Limits of stack for GC_mark routine.
00098  * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
00099  * need to be marked from.
00100  */
00101 
00102 word GC_n_rescuing_pages;   /* Number of dirty pages we marked from */
00103                             /* excludes ptrfree pages, etc.           */
00104 
00105 mse * GC_mark_stack;
00106 
00107 mse * GC_mark_stack_limit;
00108 
00109 word GC_mark_stack_size = 0;
00110  
00111 #ifdef PARALLEL_MARK
00112   mse * VOLATILE GC_mark_stack_top;
00113 #else
00114   mse * GC_mark_stack_top;
00115 #endif
00116 
00117 static struct hblk * scan_ptr;
00118 
00119 mark_state_t GC_mark_state = MS_NONE;
00120 
00121 GC_bool GC_mark_stack_too_small = FALSE;
00122 
00123 GC_bool GC_objects_are_marked = FALSE;    /* Are there collectable marked    */
00124                                    /* objects in the heap?            */
00125 
00126 /* Is a collection in progress?  Note that this can return true in the       */
00127 /* nonincremental case, if a collection has been abandoned and the    */
00128 /* mark state is now MS_INVALID.                               */
00129 GC_bool GC_collection_in_progress()
00130 {
00131     return(GC_mark_state != MS_NONE);
00132 }
00133 
00134 /* clear all mark bits in the header */
00135 void GC_clear_hdr_marks(hhdr)
00136 register hdr * hhdr;
00137 {
00138 #   ifdef USE_MARK_BYTES
00139       BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
00140 #   else
00141       BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
00142 #   endif
00143 }
00144 
00145 /* Set all mark bits in the header.  Used for uncollectable blocks. */
00146 void GC_set_hdr_marks(hhdr)
00147 register hdr * hhdr;
00148 {
00149     register int i;
00150 
00151     for (i = 0; i < MARK_BITS_SZ; ++i) {
00152 #     ifdef USE_MARK_BYTES
00153        hhdr -> hb_marks[i] = 1;
00154 #     else
00155        hhdr -> hb_marks[i] = ONES;
00156 #     endif
00157     }
00158 }
00159 
00160 /*
00161  * Clear all mark bits associated with block h.
00162  */
00163 /*ARGSUSED*/
00164 # if defined(__STDC__) || defined(__cplusplus)
00165     static void clear_marks_for_block(struct hblk *h, word dummy)
00166 # else
00167     static void clear_marks_for_block(h, dummy)
00168     struct hblk *h;
00169     word dummy;
00170 # endif
00171 {
00172     register hdr * hhdr = HDR(h);
00173     
00174     if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
00175         /* Mark bit for these is cleared only once the object is      */
00176         /* explicitly deallocated.  This either frees the block, or   */
00177         /* the bit is cleared once the object is on the free list.    */
00178     GC_clear_hdr_marks(hhdr);
00179 }
00180 
00181 /* Slow but general routines for setting/clearing/asking about mark bits */
00182 void GC_set_mark_bit(p)
00183 ptr_t p;
00184 {
00185     register struct hblk *h = HBLKPTR(p);
00186     register hdr * hhdr = HDR(h);
00187     register int word_no = (word *)p - (word *)h;
00188     
00189     set_mark_bit_from_hdr(hhdr, word_no);
00190 }
00191 
00192 void GC_clear_mark_bit(p)
00193 ptr_t p;
00194 {
00195     register struct hblk *h = HBLKPTR(p);
00196     register hdr * hhdr = HDR(h);
00197     register int word_no = (word *)p - (word *)h;
00198     
00199     clear_mark_bit_from_hdr(hhdr, word_no);
00200 }
00201 
00202 GC_bool GC_is_marked(p)
00203 ptr_t p;
00204 {
00205     register struct hblk *h = HBLKPTR(p);
00206     register hdr * hhdr = HDR(h);
00207     register int word_no = (word *)p - (word *)h;
00208     
00209     return(mark_bit_from_hdr(hhdr, word_no));
00210 }
00211 
00212 
00213 /*
00214  * Clear mark bits in all allocated heap blocks.  This invalidates
00215  * the marker invariant, and sets GC_mark_state to reflect this.
00216  * (This implicitly starts marking to reestablish the invariant.)
00217  */
00218 void GC_clear_marks()
00219 {
00220     GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
00221     GC_objects_are_marked = FALSE;
00222     GC_mark_state = MS_INVALID;
00223     scan_ptr = 0;
00224 #   ifdef GATHERSTATS
00225        /* Counters reflect currently marked objects: reset here */
00226         GC_composite_in_use = 0;
00227         GC_atomic_in_use = 0;
00228 #   endif
00229 
00230 }
00231 
00232 /* Initiate a garbage collection.  Initiates a full collection if the */
00233 /* mark       state is invalid.                                       */
00234 /*ARGSUSED*/
00235 void GC_initiate_gc()
00236 {
00237     if (GC_dirty_maintained) GC_read_dirty();
00238 #   ifdef STUBBORN_ALLOC
00239        GC_read_changed();
00240 #   endif
00241 #   ifdef CHECKSUMS
00242        {
00243            extern void GC_check_dirty();
00244            
00245            if (GC_dirty_maintained) GC_check_dirty();
00246        }
00247 #   endif
00248     GC_n_rescuing_pages = 0;
00249     if (GC_mark_state == MS_NONE) {
00250         GC_mark_state = MS_PUSH_RESCUERS;
00251     } else if (GC_mark_state != MS_INVALID) {
00252        ABORT("unexpected state");
00253     } /* else this is really a full collection, and mark       */
00254       /* bits are invalid.                              */
00255     scan_ptr = 0;
00256 }
00257 
00258 
00259 static void alloc_mark_stack();
00260 
00261 /* Perform a small amount of marking.                   */
00262 /* We try to touch roughly a page of memory.            */
00263 /* Return TRUE if we just finished a mark phase. */
00264 /* Cold_gc_frame is an address inside a GC frame that   */
00265 /* remains valid until all marking is complete.         */
00266 /* A zero value indicates that it's OK to miss some     */
00267 /* register values.                              */
00268 /* We hold the allocation lock.  In the case of  */
00269 /* incremental collection, the world may not be stopped.*/
00270 #ifdef MSWIN32
00271   /* For win32, this is called after we establish a structured */
00272   /* exception handler, in case Windows unmaps one of our root */
00273   /* segments.  See below.  In either case, we acquire the     */
00274   /* allocator lock long before we get here.                   */
00275   GC_bool GC_mark_some_inner(cold_gc_frame)
00276   ptr_t cold_gc_frame;
00277 #else
00278   GC_bool GC_mark_some(cold_gc_frame)
00279   ptr_t cold_gc_frame;
00280 #endif
00281 {
00282     switch(GC_mark_state) {
00283        case MS_NONE:
00284            return(FALSE);
00285            
00286        case MS_PUSH_RESCUERS:
00287            if (GC_mark_stack_top
00288                >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
00289               /* Go ahead and mark, even though that might cause us to */
00290               /* see more marked dirty objects later on.  Avoid this   */
00291               /* in the future.                                 */
00292               GC_mark_stack_too_small = TRUE;
00293                MARK_FROM_MARK_STACK();
00294                return(FALSE);
00295            } else {
00296                scan_ptr = GC_push_next_marked_dirty(scan_ptr);
00297                if (scan_ptr == 0) {
00298 #                 ifdef CONDPRINT
00299                     if (GC_print_stats) {
00300                      GC_printf1("Marked from %lu dirty pages\n",
00301                                (unsigned long)GC_n_rescuing_pages);
00302                     }
00303 #                 endif
00304                   GC_push_roots(FALSE, cold_gc_frame);
00305                   GC_objects_are_marked = TRUE;
00306                   if (GC_mark_state != MS_INVALID) {
00307                       GC_mark_state = MS_ROOTS_PUSHED;
00308                   }
00309               }
00310            }
00311            return(FALSE);
00312        
00313        case MS_PUSH_UNCOLLECTABLE:
00314            if (GC_mark_stack_top
00315                >= GC_mark_stack + GC_mark_stack_size/4) {
00316 #             ifdef PARALLEL_MARK
00317                 /* Avoid this, since we don't parallelize the marker  */
00318                 /* here.                                       */
00319                 if (GC_parallel) GC_mark_stack_too_small = TRUE;
00320 #             endif
00321                MARK_FROM_MARK_STACK();
00322                return(FALSE);
00323            } else {
00324                scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
00325                if (scan_ptr == 0) {
00326                   GC_push_roots(TRUE, cold_gc_frame);
00327                   GC_objects_are_marked = TRUE;
00328                   if (GC_mark_state != MS_INVALID) {
00329                       GC_mark_state = MS_ROOTS_PUSHED;
00330                   }
00331               }
00332            }
00333            return(FALSE);
00334        
00335        case MS_ROOTS_PUSHED:
00336 #          ifdef PARALLEL_MARK
00337              /* In the incremental GC case, this currently doesn't    */
00338              /* quite do the right thing, since it runs to            */
00339              /* completion.  On the other hand, starting a            */
00340              /* parallel marker is expensive, so perhaps it is        */
00341              /* the right thing?                               */
00342              /* Eventually, incremental marking should run            */
00343              /* asynchronously in multiple threads, without grabbing  */
00344              /* the allocation lock.                                  */
00345                if (GC_parallel) {
00346                 GC_do_parallel_mark();
00347                 GC_ASSERT(GC_mark_stack_top < GC_first_nonempty);
00348                 GC_mark_stack_top = GC_mark_stack - 1;
00349                  if (GC_mark_stack_too_small) {
00350                    alloc_mark_stack(2*GC_mark_stack_size);
00351                  }
00352                 if (GC_mark_state == MS_ROOTS_PUSHED) {
00353                    GC_mark_state = MS_NONE;
00354                    return(TRUE);
00355                 } else {
00356                   return(FALSE);
00357                  }
00358               }
00359 #          endif
00360            if (GC_mark_stack_top >= GC_mark_stack) {
00361                MARK_FROM_MARK_STACK();
00362                return(FALSE);
00363            } else {
00364                GC_mark_state = MS_NONE;
00365                if (GC_mark_stack_too_small) {
00366                    alloc_mark_stack(2*GC_mark_stack_size);
00367                }
00368                return(TRUE);
00369            }
00370            
00371        case MS_INVALID:
00372        case MS_PARTIALLY_INVALID:
00373            if (!GC_objects_are_marked) {
00374               GC_mark_state = MS_PUSH_UNCOLLECTABLE;
00375               return(FALSE);
00376            }
00377            if (GC_mark_stack_top >= GC_mark_stack) {
00378                MARK_FROM_MARK_STACK();
00379                return(FALSE);
00380            }
00381            if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
00382               /* About to start a heap scan for marked objects. */
00383               /* Mark stack is empty.  OK to reallocate.         */
00384               if (GC_mark_stack_too_small) {
00385                    alloc_mark_stack(2*GC_mark_stack_size);
00386               }
00387               GC_mark_state = MS_PARTIALLY_INVALID;
00388            }
00389            scan_ptr = GC_push_next_marked(scan_ptr);
00390            if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
00391               GC_push_roots(TRUE, cold_gc_frame);
00392               GC_objects_are_marked = TRUE;
00393               if (GC_mark_state != MS_INVALID) {
00394                   GC_mark_state = MS_ROOTS_PUSHED;
00395               }
00396            }
00397            return(FALSE);
00398        default:
00399            ABORT("GC_mark_some: bad state");
00400            return(FALSE);
00401     }
00402 }
00403 
00404 
00405 #ifdef MSWIN32
00406 
00407 # ifdef __GNUC__
00408 
00409     typedef struct {
00410       EXCEPTION_REGISTRATION ex_reg;
00411       void *alt_path;
00412     } ext_ex_regn;
00413 
00414 
00415     static EXCEPTION_DISPOSITION mark_ex_handler(
00416         struct _EXCEPTION_RECORD *ex_rec, 
00417         void *est_frame,
00418         struct _CONTEXT *context,
00419         void *disp_ctxt)
00420     {
00421         if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
00422           ext_ex_regn *xer = (ext_ex_regn *)est_frame;
00423 
00424           /* Unwind from the inner function assuming the standard */
00425           /* function prologue.                                   */
00426           /* Assumes code has not been compiled with              */
00427           /* -fomit-frame-pointer.                                */
00428           context->Esp = context->Ebp;
00429           context->Ebp = *((DWORD *)context->Esp);
00430           context->Esp = context->Esp - 8;
00431 
00432           /* Resume execution at the "real" handler within the    */
00433           /* wrapper function.                                    */
00434           context->Eip = (DWORD )(xer->alt_path);
00435 
00436           return ExceptionContinueExecution;
00437 
00438         } else {
00439             return ExceptionContinueSearch;
00440         }
00441     }
00442 # endif /* __GNUC__ */
00443 
00444 
00445   GC_bool GC_mark_some(cold_gc_frame)
00446   ptr_t cold_gc_frame;
00447   {
00448       GC_bool ret_val;
00449 
00450 #   ifndef __GNUC__
00451       /* Windows 98 appears to asynchronously create and remove  */
00452       /* writable memory mappings, for reasons we haven't yet    */
00453       /* understood.  Since we look for writable regions to      */
00454       /* determine the root set, we may try to mark from an      */
00455       /* address range that disappeared since we started the     */
00456       /* collection.  Thus we have to recover from faults here.  */
00457       /* This code does not appear to be necessary for Windows   */
00458       /* 95/NT/2000. Note that this code should never generate   */
00459       /* an incremental GC write fault.                          */
00460 
00461       __try {
00462 
00463 #   else /* __GNUC__ */
00464 
00465       /* Manually install an exception handler since GCC does    */
00466       /* not yet support Structured Exception Handling (SEH) on  */
00467       /* Win32.                                                  */
00468 
00469       ext_ex_regn er;
00470 
00471       er.alt_path = &&handle_ex;
00472       er.ex_reg.handler = mark_ex_handler;
00473       asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
00474       asm volatile ("movl %0, %%fs:0" : : "r" (&er));
00475 
00476 #   endif /* __GNUC__ */
00477 
00478           ret_val = GC_mark_some_inner(cold_gc_frame);
00479 
00480 #   ifndef __GNUC__
00481 
00482       } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
00483                 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
00484 
00485 #   else /* __GNUC__ */
00486 
00487           /* Prevent GCC from considering the following code unreachable */
00488           /* and thus eliminating it.                                    */
00489           if (er.alt_path != 0)
00490               goto rm_handler;
00491 
00492 handle_ex:
00493           /* Execution resumes from here on an access violation. */
00494 
00495 #   endif /* __GNUC__ */
00496 
00497 #         ifdef CONDPRINT
00498             if (GC_print_stats) {
00499              GC_printf0("Caught ACCESS_VIOLATION in marker. "
00500                        "Memory mapping disappeared.\n");
00501             }
00502 #         endif /* CONDPRINT */
00503 
00504           /* We have bad roots on the stack.  Discard mark stack.  */
00505           /* Rescan from marked objects.  Redetermine roots.    */
00506           GC_invalidate_mark_state();     
00507           scan_ptr = 0;
00508 
00509           ret_val = FALSE;
00510 
00511 #   ifndef __GNUC__
00512 
00513       }
00514 
00515 #   else /* __GNUC__ */
00516 
00517 rm_handler:
00518       /* Uninstall the exception handler */
00519       asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
00520 
00521 #   endif /* __GNUC__ */
00522 
00523       return ret_val;
00524   }
00525 #endif /* MSWIN32 */
00526 
00527 
00528 GC_bool GC_mark_stack_empty()
00529 {
00530     return(GC_mark_stack_top < GC_mark_stack);
00531 }      
00532 
00533 #ifdef PROF_MARKER
00534     word GC_prof_array[10];
00535 #   define PROF(n) GC_prof_array[n]++
00536 #else
00537 #   define PROF(n)
00538 #endif
00539 
00540 /* Given a pointer to someplace other than a small object page or the */
00541 /* first page of a large object, either:                       */
00542 /*     - return a pointer to somewhere in the first page of the large */
00543 /*       object, if current points to a large object.                 */
00544 /*       In this case *hhdr is replaced with a pointer to the header  */
00545 /*       for the large object.                                        */
00546 /*     - just return current if it does not point to a large object.  */
00547 /*ARGSUSED*/
00548 ptr_t GC_find_start(current, hhdr, new_hdr_p)
00549 register ptr_t current;
00550 register hdr *hhdr, **new_hdr_p;
00551 {
00552     if (GC_all_interior_pointers) {
00553        if (hhdr != 0) {
00554            register ptr_t orig = current;
00555            
00556            current = (ptr_t)HBLKPTR(current);
00557            do {
00558              current = current - HBLKSIZE*(word)hhdr;
00559              hhdr = HDR(current);
00560            } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
00561            /* current points to near the start of the large object */
00562            if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(orig);
00563            if ((word *)orig - (word *)current
00564                 >= (ptrdiff_t)(hhdr->hb_sz)) {
00565                /* Pointer past the end of the block */
00566                return(orig);
00567            }
00568            *new_hdr_p = hhdr;
00569            return(current);
00570        } else {
00571            return(current);
00572         }
00573     } else {
00574         return(current);
00575     }
00576 }
00577 
00578 void GC_invalidate_mark_state()
00579 {
00580     GC_mark_state = MS_INVALID;
00581     GC_mark_stack_top = GC_mark_stack-1;
00582 }
00583 
00584 mse * GC_signal_mark_stack_overflow(msp)
00585 mse * msp;
00586 {
00587     GC_mark_state = MS_INVALID;
00588     GC_mark_stack_too_small = TRUE;
00589 #   ifdef CONDPRINT
00590       if (GC_print_stats) {
00591        GC_printf1("Mark stack overflow; current size = %lu entries\n",
00592                   GC_mark_stack_size);
00593       }
00594 #   endif
00595     return(msp - GC_MARK_STACK_DISCARDS);
00596 }
00597 
00598 /* PLTSCHEME: used by setjmpup: */
00599 int GC_did_mark_stack_overflow(void)
00600 {
00601   return GC_mark_state == MS_INVALID;
00602 }
00603 void GC_mark_from_mark_stack(void)
00604 {
00605   MARK_FROM_MARK_STACK();
00606 }
00607 void GC_mark_overflow_recover(void *p)
00608 {
00609   GC_set_mark_bit(p);
00610   while (!GC_mark_some((ptr_t)0));
00611 }
00612 
00613 /*
00614  * Mark objects pointed to by the regions described by
00615  * mark stack entries between GC_mark_stack and GC_mark_stack_top,
00616  * inclusive.  Assumes the upper limit of a mark stack entry
00617  * is never 0.  A mark stack entry never has size 0.
00618  * We try to traverse on the order of a hblk of memory before we return.
00619  * Caller is responsible for calling this until the mark stack is empty.
00620  * Note that this is the most performance critical routine in the
00621  * collector.  Hence it contains all sorts of ugly hacks to speed
00622  * things up.  In particular, we avoid procedure calls on the common
00623  * path, we take advantage of peculiarities of the mark descriptor
00624  * encoding, we optionally maintain a cache for the block address to
00625  * header mapping, we prefetch when an object is "grayed", etc. 
00626  */
00627 mse * GC_mark_from(mark_stack_top, mark_stack, mark_stack_limit)
00628 mse * mark_stack_top;
00629 mse * mark_stack;
00630 mse * mark_stack_limit;
00631 {
00632   int credit = HBLKSIZE;    /* Remaining credit for marking work      */
00633   register word * current_p;       /* Pointer to current candidate ptr.      */
00634   register word current;    /* Candidate pointer.                     */
00635   register word * limit;    /* (Incl) limit of current candidate      */
00636                             /* range                           */
00637   register word descr;
00638   register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
00639   register ptr_t least_ha = GC_least_plausible_heap_addr;
00640   DECLARE_HDR_CACHE;
00641 
00642 # define SPLIT_RANGE_WORDS 128  /* Must be power of 2.         */
00643 
00644   GC_objects_are_marked = TRUE;
00645   INIT_HDR_CACHE;
00646 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
00647   while (mark_stack_top >= mark_stack && credit >= 0) {
00648 # else
00649   while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
00650        >= 0) {
00651 # endif
00652     current_p = mark_stack_top -> mse_start;
00653     descr = mark_stack_top -> mse_descr;
00654   retry:
00655     /* current_p and descr describe the current object.        */
00656     /* *mark_stack_top is vacant.                       */
00657     /* The following is 0 only for small objects described by a simple       */
00658     /* length descriptor.  For many applications this is the common   */
00659     /* case, so we try to detect it quickly.                          */
00660     if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
00661       word tag = descr & GC_DS_TAGS;
00662       
00663       switch(tag) {
00664         case GC_DS_LENGTH:
00665           /* Large length.                                      */
00666           /* Process part of the range to avoid pushing too much on the      */
00667           /* stack.                                            */
00668          GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
00669                          - (word)GC_least_plausible_heap_addr);
00670 #        ifdef PARALLEL_MARK
00671 #          define SHARE_BYTES 2048
00672            if (descr > SHARE_BYTES && GC_parallel
00673               && mark_stack_top < mark_stack_limit - 1) {
00674              int new_size = (descr/2) & ~(sizeof(word)-1);
00675              mark_stack_top -> mse_start = current_p;
00676              mark_stack_top -> mse_descr = new_size + sizeof(word);
00677                                    /* makes sure we handle     */
00678                                    /* misaligned pointers.            */
00679              mark_stack_top++;
00680              current_p = (word *) ((char *)current_p + new_size);
00681              descr -= new_size;
00682              goto retry;
00683            }
00684 #        endif /* PARALLEL_MARK */
00685           mark_stack_top -> mse_start =
00686               limit = current_p + SPLIT_RANGE_WORDS-1;
00687           mark_stack_top -> mse_descr =
00688                      descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
00689           /* Make sure that pointers overlapping the two ranges are   */
00690           /* considered.                                       */
00691           limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
00692           break;
00693         case GC_DS_BITMAP:
00694           mark_stack_top--;
00695           descr &= ~GC_DS_TAGS;
00696           credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
00697           while (descr != 0) {
00698             if ((signed_word)descr < 0) {
00699               current = *current_p;
00700              FIXUP_POINTER(current);
00701              if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
00702               PREFETCH((ptr_t)current);
00703                 HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
00704                            mark_stack_limit, current_p, exit1);
00705              }
00706             }
00707            descr <<= 1;
00708            ++ current_p;
00709           }
00710           continue;
00711         case GC_DS_PROC:
00712           mark_stack_top--;
00713           credit -= GC_PROC_BYTES;
00714           mark_stack_top =
00715               (*PROC(descr))
00716                          (current_p, mark_stack_top,
00717                          mark_stack_limit, ENV(descr));
00718           continue;
00719         case GC_DS_PER_OBJECT:
00720          if ((signed_word)descr >= 0) {
00721            /* Descriptor is in the object.       */
00722             descr = *(word *)((ptr_t)current_p + descr - GC_DS_PER_OBJECT);
00723          } else {
00724            /* Descriptor is in type descriptor pointed to by first    */
00725            /* word in object.                                         */
00726            ptr_t type_descr = *(ptr_t *)current_p;
00727            /* type_descr is either a valid pointer to the descriptor  */
00728            /* structure, or this object was on a free list.  If it    */
00729            /* it was anything but the last object on the free list,   */
00730            /* we will misinterpret the next object on the free list as */
00731            /* the type descriptor, and get a 0 GC descriptor, which   */
00732            /* is ideal.  Unfortunately, we need to check for the last */
00733            /* object case explicitly.                                 */
00734            if (0 == type_descr) {
00735               /* Rarely executed.  */
00736               mark_stack_top--;
00737               continue;
00738            }
00739             descr = *(word *)(type_descr
00740                            - (descr - (GC_DS_PER_OBJECT
00741                                      - GC_INDIR_PER_OBJ_BIAS)));
00742          }
00743          if (0 == descr) {
00744              /* Can happen either because we generated a 0 descriptor */
00745              /* or we saw a pointer to a free object.                 */
00746              mark_stack_top--;
00747              continue;
00748          }
00749           goto retry;
00750       }
00751     } else /* Small object with length descriptor */ {
00752       mark_stack_top--;
00753       limit = (word *)(((ptr_t)current_p) + (word)descr);
00754     }
00755     /* The simple case in which we're scanning a range. */
00756     GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
00757     credit -= (ptr_t)limit - (ptr_t)current_p;
00758     limit -= 1;
00759     {
00760 #     define PREF_DIST 4
00761 
00762 #     ifndef SMALL_CONFIG
00763         word deferred;
00764 
00765        /* Try to prefetch the next pointer to be examined asap.       */
00766        /* Empirically, this also seems to help slightly without       */
00767        /* prefetches, at least on linux/X86.  Presumably this loop    */
00768        /* ends up with less register pressure, and gcc thus ends up   */
00769        /* generating slightly better code.  Overall gcc code quality  */
00770        /* for this loop is still not great.                           */
00771        for(;;) {
00772          PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
00773          GC_ASSERT(limit >= current_p);
00774          deferred = *limit;
00775          FIXUP_POINTER(deferred);
00776          limit = (word *)((char *)limit - ALIGNMENT);
00777          if ((ptr_t)deferred >= least_ha && (ptr_t)deferred <  greatest_ha) {
00778            PREFETCH((ptr_t)deferred);
00779            break;
00780          }
00781          if (current_p > limit) goto next_object;
00782          /* Unroll once, so we don't do too many of the prefetches    */
00783          /* based on limit.                                    */
00784          deferred = *limit;
00785          FIXUP_POINTER(deferred);
00786          limit = (word *)((char *)limit - ALIGNMENT);
00787          if ((ptr_t)deferred >= least_ha && (ptr_t)deferred <  greatest_ha) {
00788            PREFETCH((ptr_t)deferred);
00789            break;
00790          }
00791          if (current_p > limit) goto next_object;
00792        }
00793 #     endif
00794 
00795       while (current_p <= limit) {
00796        /* Empirically, unrolling this loop doesn't help a lot. */
00797        /* Since HC_PUSH_CONTENTS expands to a lot of code,     */
00798        /* we don't.                                     */
00799         current = *current_p;
00800        FIXUP_POINTER(current);
00801         PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
00802         if ((ptr_t)current >= least_ha && (ptr_t)current <  greatest_ha) {
00803          /* Prefetch the contents of the object we just pushed.  It's */
00804          /* likely we will need them soon.                            */
00805          PREFETCH((ptr_t)current);
00806           HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
00807                          mark_stack_limit, current_p, exit2);
00808         }
00809         current_p = (word *)((char *)current_p + ALIGNMENT);
00810       }
00811 
00812 #     ifndef SMALL_CONFIG
00813        /* We still need to mark the entry we previously prefetched.   */
00814        /* We alrady know that it passes the preliminary pointer       */
00815        /* validity test.                                       */
00816         HC_PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
00817                        mark_stack_limit, current_p, exit4);
00818        next_object:;
00819 #     endif
00820     }
00821   }
00822   return mark_stack_top;
00823 }
00824 
00825 #ifdef PARALLEL_MARK
00826 
00827 /* We assume we have an ANSI C Compiler.  */
00828 GC_bool GC_help_wanted = FALSE;
00829 unsigned GC_helper_count = 0;
00830 unsigned GC_active_count = 0;
00831 mse * VOLATILE GC_first_nonempty;
00832 word GC_mark_no = 0;
00833 
00834 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
00835        /* Under normal circumstances, this is big enough to guarantee */
00836        /* We don't overflow half of it in a single call to            */
00837        /* GC_mark_from.                                        */
00838 
00839 
00840 /* Steal mark stack entries starting at mse low into mark stack local */
00841 /* until we either steal mse high, or we have max entries.            */
00842 /* Return a pointer to the top of the local mark stack.                */
00843 /* *next is replaced by a pointer to the next unscanned mark stack    */
00844 /* entry.                                                      */
00845 mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
00846                        unsigned max, mse **next)
00847 {
00848     mse *p;
00849     mse *top = local - 1;
00850     unsigned i = 0;
00851 
00852     /* Make sure that prior writes to the mark stack are visible. */
00853     /* On some architectures, the fact that the reads are        */
00854     /* volatile should suffice.                                  */
00855 #   if !defined(IA64) && !defined(HP_PA) && !defined(I386)
00856       GC_memory_barrier();
00857 #   endif
00858     GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
00859     for (p = low; p <= high && i <= max; ++p) {
00860        word descr = *(volatile word *) &(p -> mse_descr);
00861        /* In the IA64 memory model, the following volatile store is   */
00862        /* ordered after this read of descr.  Thus a thread must read  */
00863        /* the original nonzero value.  HP_PA appears to be similar,   */
00864        /* and if I'm reading the P4 spec correctly, X86 is probably   */
00865        /* also OK.  In some other cases we need a barrier.            */
00866 #       if !defined(IA64) && !defined(HP_PA) && !defined(I386)
00867           GC_memory_barrier();
00868 #       endif
00869        if (descr != 0) {
00870            *(volatile word *) &(p -> mse_descr) = 0;
00871            /* More than one thread may get this entry, but that's only */
00872            /* a minor performance problem.                            */
00873            ++top;
00874            top -> mse_descr = descr;
00875            top -> mse_start = p -> mse_start;
00876            GC_ASSERT(  (top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH || 
00877                      top -> mse_descr < (ptr_t)GC_greatest_plausible_heap_addr
00878                                         - (ptr_t)GC_least_plausible_heap_addr);
00879            /* If this is a big object, count it as                    */
00880            /* size/256 + 1 objects.                                   */
00881            ++i;
00882            if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8);
00883        }
00884     }
00885     *next = p;
00886     return top;
00887 }
00888 
00889 /* Copy back a local mark stack.   */
00890 /* low and high are inclusive bounds.     */
00891 void GC_return_mark_stack(mse * low, mse * high)
00892 {
00893     mse * my_top;
00894     mse * my_start;
00895     size_t stack_size;
00896 
00897     if (high < low) return;
00898     stack_size = high - low + 1;
00899     GC_acquire_mark_lock();
00900     my_top = GC_mark_stack_top;
00901     my_start = my_top + 1;
00902     if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
00903 #     ifdef CONDPRINT
00904        if (GC_print_stats) {
00905          GC_printf0("No room to copy back mark stack.");
00906        }
00907 #     endif
00908       GC_mark_state = MS_INVALID;
00909       GC_mark_stack_too_small = TRUE;
00910       /* We drop the local mark stack.  We'll fix things later.       */
00911     } else {
00912       BCOPY(low, my_start, stack_size * sizeof(mse));
00913       GC_ASSERT(GC_mark_stack_top = my_top);
00914 #     if !defined(IA64) && !defined(HP_PA)
00915         GC_memory_barrier();
00916 #     endif
00917        /* On IA64, the volatile write acts as a release barrier. */
00918       GC_mark_stack_top = my_top + stack_size;
00919     }
00920     GC_release_mark_lock();
00921     GC_notify_all_marker();
00922 }
00923 
00924 /* Mark from the local mark stack.        */
00925 /* On return, the local mark stack is empty.     */
00926 /* But this may be achieved by copying the       */
00927 /* local mark stack back into the global one.    */
00928 void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
00929 {
00930     unsigned n;
00931 #   define N_LOCAL_ITERS 1
00932 
00933 #   ifdef GC_ASSERTIONS
00934       /* Make sure we don't hold mark lock. */
00935        GC_acquire_mark_lock();
00936        GC_release_mark_lock();
00937 #   endif
00938     for (;;) {
00939         for (n = 0; n < N_LOCAL_ITERS; ++n) {
00940            local_top = GC_mark_from(local_top, local_mark_stack,
00941                                  local_mark_stack + LOCAL_MARK_STACK_SIZE);
00942            if (local_top < local_mark_stack) return;
00943            if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
00944               GC_return_mark_stack(local_mark_stack, local_top);
00945               return;
00946            }
00947        }
00948        if (GC_mark_stack_top < GC_first_nonempty &&
00949            GC_active_count < GC_helper_count
00950            && local_top > local_mark_stack + 1) {
00951            /* Try to share the load, since the main stack is empty,   */
00952            /* and helper threads are waiting for a refill.            */
00953            /* The entries near the bottom of the stack are likely     */
00954            /* to require more work.  Thus we return those, eventhough */
00955            /* it's harder.                                     */
00956            mse * p;
00957            mse * new_bottom = local_mark_stack
00958                             + (local_top - local_mark_stack)/2;
00959            GC_ASSERT(new_bottom > local_mark_stack
00960                     && new_bottom < local_top);
00961            GC_return_mark_stack(local_mark_stack, new_bottom - 1);
00962            memmove(local_mark_stack, new_bottom,
00963                   (local_top - new_bottom + 1) * sizeof(mse));
00964            local_top -= (new_bottom - local_mark_stack);
00965        }
00966     }
00967 }
00968 
00969 #define ENTRIES_TO_GET 5
00970 
00971 long GC_markers = 2;        /* Normally changed by thread-library-    */
00972                             /* -specific code.                 */
00973 
00974 /* Mark using the local mark stack until the global mark stack is empty      */
00975 /* and there are no active workers. Update GC_first_nonempty to reflect      */
00976 /* progress.                                                   */
00977 /* Caller does not hold mark lock.                             */
00978 /* Caller has already incremented GC_helper_count.  We decrement it,  */
00979 /* and maintain GC_active_count.                               */
00980 void GC_mark_local(mse *local_mark_stack, int id)
00981 {
00982     mse * my_first_nonempty;
00983 
00984     GC_acquire_mark_lock();
00985     GC_active_count++;
00986     my_first_nonempty = GC_first_nonempty;
00987     GC_ASSERT(GC_first_nonempty >= GC_mark_stack && 
00988              GC_first_nonempty <= GC_mark_stack_top + 1);
00989 #   ifdef PRINTSTATS
00990        GC_printf1("Starting mark helper %lu\n", (unsigned long)id);
00991 #   endif
00992     GC_release_mark_lock();
00993     for (;;) {
00994        size_t n_on_stack;
00995         size_t n_to_get;
00996        mse *next;
00997        mse * my_top;
00998        mse * local_top;
00999         mse * global_first_nonempty = GC_first_nonempty;
01000 
01001        GC_ASSERT(my_first_nonempty >= GC_mark_stack && 
01002                 my_first_nonempty <= GC_mark_stack_top + 1);
01003        GC_ASSERT(global_first_nonempty >= GC_mark_stack && 
01004                 global_first_nonempty <= GC_mark_stack_top + 1);
01005        if (my_first_nonempty < global_first_nonempty) {
01006            my_first_nonempty = global_first_nonempty;
01007         } else if (global_first_nonempty < my_first_nonempty) {
01008            GC_compare_and_exchange((word *)(&GC_first_nonempty), 
01009                                (word) global_first_nonempty,
01010                                (word) my_first_nonempty);
01011            /* If this fails, we just go ahead, without updating       */
01012            /* GC_first_nonempty.                               */
01013        }
01014        /* Perhaps we should also update GC_first_nonempty, if it */
01015        /* is less.  But that would require using atomic updates. */
01016        my_top = GC_mark_stack_top;
01017        n_on_stack = my_top - my_first_nonempty + 1;
01018         if (0 == n_on_stack) {
01019            GC_acquire_mark_lock();
01020             my_top = GC_mark_stack_top;
01021             n_on_stack = my_top - my_first_nonempty + 1;
01022            if (0 == n_on_stack) {
01023               GC_active_count--;
01024               GC_ASSERT(GC_active_count <= GC_helper_count);
01025               /* Other markers may redeposit objects    */
01026               /* on the stack.                          */
01027               if (0 == GC_active_count) GC_notify_all_marker();
01028               while (GC_active_count > 0
01029                      && GC_first_nonempty > GC_mark_stack_top) {
01030                   /* We will be notified if either GC_active_count    */
01031                   /* reaches zero, or if more objects are pushed on   */
01032                   /* the global mark stack.                           */
01033                   GC_wait_marker();
01034               }
01035               if (GC_active_count == 0 &&
01036                   GC_first_nonempty > GC_mark_stack_top) { 
01037                   GC_bool need_to_notify = FALSE;
01038                   /* The above conditions can't be falsified while we */
01039                   /* hold the mark lock, since neither         */
01040                   /* GC_active_count nor GC_mark_stack_top can */
01041                   /* change.  GC_first_nonempty can only be           */
01042                   /* incremented asynchronously.  Thus we know that   */
01043                   /* both conditions actually held simultaneously.    */
01044                   GC_helper_count--;
01045                   if (0 == GC_helper_count) need_to_notify = TRUE;
01046 #                 ifdef PRINTSTATS
01047                     GC_printf1(
01048                       "Finished mark helper %lu\n", (unsigned long)id);
01049 #                 endif
01050                   GC_release_mark_lock();
01051                   if (need_to_notify) GC_notify_all_marker();
01052                   return;
01053               }
01054               /* else there's something on the stack again, or */
01055               /* another helper may push something.                   */
01056               GC_active_count++;
01057                GC_ASSERT(GC_active_count > 0);
01058               GC_release_mark_lock();
01059               continue;
01060            } else {
01061               GC_release_mark_lock();
01062            }
01063        }
01064        n_to_get = ENTRIES_TO_GET;
01065        if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
01066        local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
01067                                    local_mark_stack, n_to_get,
01068                                     &my_first_nonempty);
01069         GC_ASSERT(my_first_nonempty >= GC_mark_stack && 
01070                  my_first_nonempty <= GC_mark_stack_top + 1);
01071        GC_do_local_mark(local_mark_stack, local_top);
01072     }
01073 }
01074 
01075 /* Perform Parallel mark.                 */
01076 /* We hold the GC lock, not the mark lock.       */
01077 /* Currently runs until the mark stack is */
01078 /* empty.                                 */
01079 void GC_do_parallel_mark()
01080 {
01081     mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
01082     mse * local_top;
01083     mse * my_top;
01084 
01085     GC_acquire_mark_lock();
01086     GC_ASSERT(I_HOLD_LOCK());
01087     /* This could be a GC_ASSERT, but it seems safer to keep it on    */
01088     /* all the time, especially since it's cheap.                     */
01089     if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
01090        ABORT("Tried to start parallel mark in bad state");
01091 #   ifdef PRINTSTATS
01092        GC_printf1("Starting marking for mark phase number %lu\n",
01093                  (unsigned long)GC_mark_no);
01094 #   endif
01095     GC_first_nonempty = GC_mark_stack;
01096     GC_active_count = 0;
01097     GC_helper_count = 1;
01098     GC_help_wanted = TRUE;
01099     GC_release_mark_lock();
01100     GC_notify_all_marker();
01101        /* Wake up potential helpers.      */
01102     GC_mark_local(local_mark_stack, 0);
01103     GC_acquire_mark_lock();
01104     GC_help_wanted = FALSE;
01105     /* Done; clean up.      */
01106     while (GC_helper_count > 0) GC_wait_marker();
01107     /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
01108 #   ifdef PRINTSTATS
01109        GC_printf1(
01110            "Finished marking for mark phase number %lu\n",
01111            (unsigned long)GC_mark_no);
01112 #   endif
01113     GC_mark_no++;
01114     GC_release_mark_lock();
01115     GC_notify_all_marker();
01116 }
01117 
01118 
01119 /* Try to help out the marker, if it's running.          */
01120 /* We do not hold the GC lock, but the requestor does.  */
01121 void GC_help_marker(word my_mark_no)
01122 {
01123     mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
01124     unsigned my_id;
01125     mse * my_first_nonempty;
01126 
01127     if (!GC_parallel) return;
01128     GC_acquire_mark_lock();
01129     while (GC_mark_no < my_mark_no
01130            || !GC_help_wanted && GC_mark_no == my_mark_no) {
01131       GC_wait_marker();
01132     }
01133     my_id = GC_helper_count;
01134     if (GC_mark_no != my_mark_no || my_id >= GC_markers) {
01135       /* Second test is useful only if original threads can also      */
01136       /* act as helpers.  Under Linux they can't.                     */
01137       GC_release_mark_lock();
01138       return;
01139     }
01140     GC_helper_count = my_id + 1;
01141     GC_release_mark_lock();
01142     GC_mark_local(local_mark_stack, my_id);
01143     /* GC_mark_local decrements GC_helper_count. */
01144 }
01145 
01146 #endif /* PARALLEL_MARK */
01147 
01148 /* Allocate or reallocate space for mark stack of size s words  */
01149 /* May silently fail.                                          */
01150 static void alloc_mark_stack(n)
01151 word n;
01152 {
01153     mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
01154     
01155     GC_mark_stack_too_small = FALSE;
01156     if (GC_mark_stack_size != 0) {
01157         if (new_stack != 0) {
01158           word displ = (word)GC_mark_stack & (GC_page_size - 1);
01159           signed_word size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
01160           
01161           /* Recycle old space */
01162              if (0 != displ) displ = GC_page_size - displ;
01163              size = (size - displ) & ~(GC_page_size - 1);
01164              if (size > 0) {
01165                GC_add_to_heap((struct hblk *)
01166                             ((word)GC_mark_stack + displ), (word)size);
01167              }
01168           GC_mark_stack = new_stack;
01169           GC_mark_stack_size = n;
01170          GC_mark_stack_limit = new_stack + n;
01171 #        ifdef CONDPRINT
01172            if (GC_print_stats) {
01173              GC_printf1("Grew mark stack to %lu frames\n",
01174                       (unsigned long) GC_mark_stack_size);
01175            }
01176 #        endif
01177         } else {
01178 #        ifdef CONDPRINT
01179            if (GC_print_stats) {
01180              GC_printf1("Failed to grow mark stack to %lu frames\n",
01181                       (unsigned long) n);
01182            }
01183 #        endif
01184         }
01185     } else {
01186         if (new_stack == 0) {
01187             GC_err_printf0("No space for mark stack\n");
01188             EXIT();
01189         }
01190         GC_mark_stack = new_stack;
01191         GC_mark_stack_size = n;
01192        GC_mark_stack_limit = new_stack + n;
01193     }
01194     GC_mark_stack_top = GC_mark_stack-1;
01195 }
01196 
01197 void GC_mark_init()
01198 {
01199     alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
01200 }
01201 
01202 /*
01203  * Push all locations between b and t onto the mark stack.
01204  * b is the first location to be checked. t is one past the last
01205  * location to be checked.
01206  * Should only be used if there is no possibility of mark stack
01207  * overflow.
01208  */
01209 void GC_push_all(bottom, top)
01210 ptr_t bottom;
01211 ptr_t top;
01212 {
01213     register word length;
01214     
01215     bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
01216     top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
01217     if (top == 0 || bottom == top) return;
01218     GC_mark_stack_top++;
01219     if (GC_mark_stack_top >= GC_mark_stack_limit) {
01220        ABORT("unexpected mark stack overflow");
01221     }
01222     length = top - bottom;
01223 #   if GC_DS_TAGS > ALIGNMENT - 1
01224        length += GC_DS_TAGS;
01225        length &= ~GC_DS_TAGS;
01226 #   endif
01227     GC_mark_stack_top -> mse_start = (word *)bottom;
01228     GC_mark_stack_top -> mse_descr = length;
01229 }
01230 
01231 /*
01232  * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
01233  * We use push_fn to actually push the block.
01234  * Used both to selectively push dirty pages, or to push a block
01235  * in piecemeal fashion, to allow for more marking concurrency.
01236  * Will not overflow mark stack if push_fn pushes a small fixed number
01237  * of entries.  (This is invoked only if push_fn pushes a single entry,
01238  * or if it marks each object before pushing it, thus ensuring progress
01239  * in the event of a stack overflow.)
01240  */
01241 void GC_push_selected(bottom, top, dirty_fn, push_fn)
01242 ptr_t bottom;
01243 ptr_t top;
01244 int (*dirty_fn) GC_PROTO((struct hblk * h));
01245 void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));
01246 {
01247     register struct hblk * h;
01248 
01249     bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
01250     top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
01251 
01252     if (top == 0 || bottom == top) return;
01253     h = HBLKPTR(bottom + HBLKSIZE);
01254     if (top <= (ptr_t) h) {
01255        if ((*dirty_fn)(h-1)) {
01256            (*push_fn)(bottom, top);
01257        }
01258        return;
01259     }
01260     if ((*dirty_fn)(h-1)) {
01261         (*push_fn)(bottom, (ptr_t)h);
01262     }
01263     while ((ptr_t)(h+1) <= top) {
01264        if ((*dirty_fn)(h)) {
01265            if ((word)(GC_mark_stack_top - GC_mark_stack)
01266               > 3 * GC_mark_stack_size / 4) {
01267               /* Danger of mark stack overflow */
01268               (*push_fn)((ptr_t)h, top);
01269               return;
01270            } else {
01271               (*push_fn)((ptr_t)h, (ptr_t)(h+1));
01272            }
01273        }
01274        h++;
01275     }
01276     if ((ptr_t)h != top) {
01277        if ((*dirty_fn)(h)) {
01278             (*push_fn)((ptr_t)h, top);
01279         }
01280     }
01281     if (GC_mark_stack_top >= GC_mark_stack_limit) {
01282         ABORT("unexpected mark stack overflow");
01283     }
01284 }
01285 
01286 # ifndef SMALL_CONFIG
01287 
01288 #ifdef PARALLEL_MARK
01289     /* Break up root sections into page size chunks to better spread  */
01290     /* out work.                                               */
01291     GC_bool GC_true_func(struct hblk *h) { return TRUE; }
01292 #   define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
01293 #else
01294 #   define GC_PUSH_ALL(b,t) GC_push_all(b,t);
01295 #endif
01296 
01297 
01298 void GC_push_conditional(bottom, top, all)
01299 ptr_t bottom;
01300 ptr_t top;
01301 int all;
01302 {
01303     if (all) {
01304       if (GC_dirty_maintained) {
01305 #      ifdef PROC_VDB
01306            /* Pages that were never dirtied cannot contain pointers   */
01307            GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
01308 #      else
01309            GC_push_all(bottom, top);
01310 #      endif
01311       } else {
01312        GC_push_all(bottom, top);
01313       }
01314     } else {
01315        GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
01316     }
01317 }
01318 #endif
01319 
01320 # if defined(MSWIN32) || defined(MSWINCE)
01321   void __cdecl GC_push_one(p)
01322 # else
01323   void GC_push_one(p)
01324 # endif
01325 word p;
01326 {
01327     GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
01328 }
01329 
01330 struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src)
01331 GC_PTR obj;
01332 struct GC_ms_entry * mark_stack_ptr;
01333 struct GC_ms_entry * mark_stack_limit;
01334 GC_PTR *src;
01335 {
01336    PREFETCH(obj);
01337    PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src,
01338                was_marked /* internally generated exit label */);
01339    return mark_stack_ptr;
01340 }
01341 
01342 # ifdef __STDC__
01343 #   define BASE(p) (word)GC_base((void *)(p))
01344 # else
01345 #   define BASE(p) (word)GC_base((char *)(p))
01346 # endif
01347 
01348 /* Mark and push (i.e. gray) a single object p onto the main   */
01349 /* mark stack.  Consider p to be valid if it is an interior    */
01350 /* pointer.                                             */
01351 /* The object p has passed a preliminary pointer validity      */
01352 /* test, but we do not definitely know whether it is valid.    */
01353 /* Mark bits are NOT atomically updated.  Thus this must be the       */
01354 /* only thread setting them.                                   */
01355 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
01356     void GC_mark_and_push_stack(p, source)
01357     ptr_t source;
01358 # else
01359     void GC_mark_and_push_stack(p)
01360 #   define source 0
01361 # endif
01362 register word p;
01363 {
01364     register word r;
01365     register hdr * hhdr; 
01366     register int displ;
01367   
01368     GET_HDR(p, hhdr);
01369     if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
01370         if (hhdr != 0) {
01371           r = BASE(p);
01372          hhdr = HDR(r);
01373          displ = BYTES_TO_WORDS(HBLKDISPL(r));
01374        }
01375     } else {
01376         register map_entry_type map_entry;
01377         
01378         displ = HBLKDISPL(p);
01379         map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
01380         if (map_entry >= MAX_OFFSET) {
01381           if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) {
01382               r = BASE(p);
01383              displ = BYTES_TO_WORDS(HBLKDISPL(r));
01384              if (r == 0) hhdr = 0;
01385           } else {
01386              /* Offset invalid, but map reflects interior pointers    */
01387               hhdr = 0;
01388           }
01389         } else {
01390           displ = BYTES_TO_WORDS(displ);
01391           displ -= map_entry;
01392           r = (word)((word *)(HBLKPTR(p)) + displ);
01393         }
01394     }
01395     /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
01396     /* displ is the word index within the block.         */
01397     if (hhdr == 0) {
01398 #      ifdef PRINT_BLACK_LIST
01399          GC_add_to_black_list_stack(p, source);
01400 #      else
01401          GC_add_to_black_list_stack(p);
01402 #      endif
01403 #      undef source  /* In case we had to define it. */
01404     } else {
01405        if (!mark_bit_from_hdr(hhdr, displ)) {
01406            set_mark_bit_from_hdr(hhdr, displ);
01407            GC_STORE_BACK_PTR(source, (ptr_t)r);
01408            PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
01409                     GC_mark_stack_limit);
01410        }
01411     }
01412 }
01413 
01414 # ifdef TRACE_BUF
01415 
01416 # define TRACE_ENTRIES 1000
01417 
01418 struct trace_entry {
01419     char * kind;
01420     word gc_no;
01421     word words_allocd;
01422     word arg1;
01423     word arg2;
01424 } GC_trace_buf[TRACE_ENTRIES];
01425 
01426 int GC_trace_buf_ptr = 0;
01427 
01428 void GC_add_trace_entry(char *kind, word arg1, word arg2)
01429 {
01430     GC_trace_buf[GC_trace_buf_ptr].kind = kind;
01431     GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
01432     GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
01433     GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
01434     GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
01435     GC_trace_buf_ptr++;
01436     if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
01437 }
01438 
01439 void GC_print_trace(word gc_no, GC_bool lock)
01440 {
01441     int i;
01442     struct trace_entry *p;
01443     
01444     if (lock) LOCK();
01445     for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
01446        if (i < 0) i = TRACE_ENTRIES-1;
01447        p = GC_trace_buf + i;
01448        if (p -> gc_no < gc_no || p -> kind == 0) return;
01449        printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
01450               p -> kind, p -> gc_no, p -> words_allocd,
01451               (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
01452     }
01453     printf("Trace incomplete\n");
01454     if (lock) UNLOCK();
01455 }
01456 
01457 # endif /* TRACE_BUF */
01458 
01459 /*
01460  * A version of GC_push_all that treats all interior pointers as valid
01461  * and scans the entire region immediately, in case the contents
01462  * change.
01463  */
01464 void GC_push_all_eager(bottom, top)
01465 ptr_t bottom;
01466 ptr_t top;
01467 {
01468     word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
01469     word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
01470     register word *p;
01471     register word q;
01472     register word *lim;
01473     register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
01474     register ptr_t least_ha = GC_least_plausible_heap_addr;
01475 #   define GC_greatest_plausible_heap_addr greatest_ha
01476 #   define GC_least_plausible_heap_addr least_ha
01477 
01478     if (top == 0) return;
01479     /* check all pointers in range and push if they appear     */
01480     /* to be valid.                                     */
01481       lim = t - 1 /* longword */;
01482       for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
01483        q = *p;
01484        GC_PUSH_ONE_STACK(q, p);
01485       }
01486 #   undef GC_greatest_plausible_heap_addr
01487 #   undef GC_least_plausible_heap_addr
01488 }
01489 
01490 #ifndef THREADS
01491 /*
01492  * A version of GC_push_all that treats all interior pointers as valid
01493  * and scans part of the area immediately, to make sure that saved
01494  * register values are not lost.
01495  * Cold_gc_frame delimits the stack section that must be scanned
01496  * eagerly.  A zero value indicates that no eager scanning is needed.
01497  */
01498 void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
01499 ptr_t bottom;
01500 ptr_t top;
01501 ptr_t cold_gc_frame;
01502 {
01503   if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
01504     /* Push the hot end of the stack eagerly, so that register values   */
01505     /* saved inside GC frames are marked before they disappear.              */
01506     /* The rest of the marking can be deferred until later.           */
01507     if (0 == cold_gc_frame) {
01508        GC_push_all_stack(bottom, top);
01509        return;
01510     }
01511     GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
01512 #   ifdef STACK_GROWS_DOWN
01513        GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
01514        GC_push_all_eager(bottom, cold_gc_frame);
01515 #   else /* STACK_GROWS_UP */
01516        GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
01517        GC_push_all_eager(cold_gc_frame, top);
01518 #   endif /* STACK_GROWS_UP */
01519   } else {
01520     GC_push_all_eager(bottom, top);
01521   }
01522 # ifdef TRACE_BUF
01523       GC_add_trace_entry("GC_push_all_stack", bottom, top);
01524 # endif
01525 }
01526 #endif /* !THREADS */
01527 
01528 void GC_push_all_stack(bottom, top)
01529 ptr_t bottom;
01530 ptr_t top;
01531 {
01532   if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
01533     GC_push_all(bottom, top);
01534   } else {
01535     GC_push_all_eager(bottom, top);
01536   }
01537 }
01538 
01539 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
01540 /* Push all objects reachable from marked objects in the given block */
01541 /* of size 1 objects.                                               */
01542 void GC_push_marked1(h, hhdr)
01543 struct hblk *h;
01544 register hdr * hhdr;
01545 {
01546     word * mark_word_addr = &(hhdr->hb_marks[0]);
01547     register word *p;
01548     word *plim;
01549     register int i;
01550     register word q;
01551     register word mark_word;
01552     register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
01553     register ptr_t least_ha = GC_least_plausible_heap_addr;
01554     register mse * mark_stack_top = GC_mark_stack_top;
01555     register mse * mark_stack_limit = GC_mark_stack_limit;
01556 #   define GC_mark_stack_top mark_stack_top
01557 #   define GC_mark_stack_limit mark_stack_limit
01558 #   define GC_greatest_plausible_heap_addr greatest_ha
01559 #   define GC_least_plausible_heap_addr least_ha
01560     
01561     p = (word *)(h->hb_body);
01562     plim = (word *)(((word)h) + HBLKSIZE);
01563 
01564     /* go through all words in block */
01565        while( p < plim )  {
01566            mark_word = *mark_word_addr++;
01567            i = 0;
01568            while(mark_word != 0) {
01569              if (mark_word & 1) {
01570                  q = p[i];
01571                  GC_PUSH_ONE_HEAP(q, p + i);
01572              }
01573              i++;
01574              mark_word >>= 1;
01575            }
01576            p += WORDSZ;
01577        }
01578 #   undef GC_greatest_plausible_heap_addr
01579 #   undef GC_least_plausible_heap_addr        
01580 #   undef GC_mark_stack_top
01581 #   undef GC_mark_stack_limit
01582     GC_mark_stack_top = mark_stack_top;
01583 }
01584 
01585 
01586 #ifndef UNALIGNED
01587 
01588 /* Push all objects reachable from marked objects in the given block */
01589 /* of size 2 objects.                                               */
01590 void GC_push_marked2(h, hhdr)
01591 struct hblk *h;
01592 register hdr * hhdr;
01593 {
01594     word * mark_word_addr = &(hhdr->hb_marks[0]);
01595     register word *p;
01596     word *plim;
01597     register int i;
01598     register word q;
01599     register word mark_word;
01600     register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
01601     register ptr_t least_ha = GC_least_plausible_heap_addr;
01602     register mse * mark_stack_top = GC_mark_stack_top;
01603     register mse * mark_stack_limit = GC_mark_stack_limit;
01604 #   define GC_mark_stack_top mark_stack_top
01605 #   define GC_mark_stack_limit mark_stack_limit
01606 #   define GC_greatest_plausible_heap_addr greatest_ha
01607 #   define GC_least_plausible_heap_addr least_ha
01608     
01609     p = (word *)(h->hb_body);
01610     plim = (word *)(((word)h) + HBLKSIZE);
01611 
01612     /* go through all words in block */
01613        while( p < plim )  {
01614            mark_word = *mark_word_addr++;
01615            i = 0;
01616            while(mark_word != 0) {
01617              if (mark_word & 1) {
01618                  q = p[i];
01619                  GC_PUSH_ONE_HEAP(q, p + i);
01620                  q = p[i+1];
01621                  GC_PUSH_ONE_HEAP(q, p + i);
01622              }
01623              i += 2;
01624              mark_word >>= 2;
01625            }
01626            p += WORDSZ;
01627        }
01628 #   undef GC_greatest_plausible_heap_addr
01629 #   undef GC_least_plausible_heap_addr        
01630 #   undef GC_mark_stack_top
01631 #   undef GC_mark_stack_limit
01632     GC_mark_stack_top = mark_stack_top;
01633 }
01634 
01635 /* Push all objects reachable from marked objects in the given block */
01636 /* of size 4 objects.                                               */
01637 /* There is a risk of mark stack overflow here.  But we handle that. */
01638 /* And only unmarked objects get pushed, so it's not very likely.    */
01639 void GC_push_marked4(h, hhdr)
01640 struct hblk *h;
01641 register hdr * hhdr;
01642 {
01643     word * mark_word_addr = &(hhdr->hb_marks[0]);
01644     register word *p;
01645     word *plim;
01646     register int i;
01647     register word q;
01648     register word mark_word;
01649     register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
01650     register ptr_t least_ha = GC_least_plausible_heap_addr;
01651     register mse * mark_stack_top = GC_mark_stack_top;
01652     register mse * mark_stack_limit = GC_mark_stack_limit;
01653 #   define GC_mark_stack_top mark_stack_top
01654 #   define GC_mark_stack_limit mark_stack_limit
01655 #   define GC_greatest_plausible_heap_addr greatest_ha
01656 #   define GC_least_plausible_heap_addr least_ha
01657     
01658     p = (word *)(h->hb_body);
01659     plim = (word *)(((word)h) + HBLKSIZE);
01660 
01661     /* go through all words in block */
01662        while( p < plim )  {
01663            mark_word = *mark_word_addr++;
01664            i = 0;
01665            while(mark_word != 0) {
01666              if (mark_word & 1) {
01667                  q = p[i];
01668                  GC_PUSH_ONE_HEAP(q, p + i);
01669                  q = p[i+1];
01670                  GC_PUSH_ONE_HEAP(q, p + i + 1);
01671                  q = p[i+2];
01672                  GC_PUSH_ONE_HEAP(q, p + i + 2);
01673                  q = p[i+3];
01674                  GC_PUSH_ONE_HEAP(q, p + i + 3);
01675              }
01676              i += 4;
01677              mark_word >>= 4;
01678            }
01679            p += WORDSZ;
01680        }
01681 #   undef GC_greatest_plausible_heap_addr
01682 #   undef GC_least_plausible_heap_addr        
01683 #   undef GC_mark_stack_top
01684 #   undef GC_mark_stack_limit
01685     GC_mark_stack_top = mark_stack_top;
01686 }
01687 
01688 #endif /* UNALIGNED */
01689 
01690 #endif /* SMALL_CONFIG */
01691 
01692 /* Push all objects reachable from marked objects in the given block */
01693 void GC_push_marked(h, hhdr)
01694 struct hblk *h;
01695 register hdr * hhdr;
01696 {
01697     register int sz = hhdr -> hb_sz;
01698     register int descr = hhdr -> hb_descr;
01699     register word * p;
01700     register int word_no;
01701     register word * lim;
01702     register mse * GC_mark_stack_top_reg;
01703     register mse * mark_stack_limit = GC_mark_stack_limit;
01704     
01705     /* Some quick shortcuts: */
01706        if ((0 | GC_DS_LENGTH) == descr) return;
01707         if (GC_block_empty(hhdr)/* nothing marked */) return;
01708     GC_n_rescuing_pages++;
01709     GC_objects_are_marked = TRUE;
01710     if (sz > MAXOBJSZ) {
01711         lim = (word *)h;
01712     } else {
01713         lim = (word *)(h + 1) - sz;
01714     }
01715     
01716     switch(sz) {
01717 #   if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)   
01718      case 1:
01719        GC_push_marked1(h, hhdr);
01720        break;
01721 #   endif
01722 #   if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \
01723        !defined(USE_MARK_BYTES)
01724      case 2:
01725        GC_push_marked2(h, hhdr);
01726        break;
01727      case 4:
01728        GC_push_marked4(h, hhdr);
01729        break;
01730 #   endif       
01731      default:
01732       GC_mark_stack_top_reg = GC_mark_stack_top;
01733       for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) {
01734          if (mark_bit_from_hdr(hhdr, word_no)) {
01735            /* Mark from fields inside the object */
01736              PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
01737 #           ifdef GATHERSTATS
01738               /* Subtract this object from total, since it was */
01739               /* added in twice.                               */
01740               GC_composite_in_use -= sz;
01741 #           endif
01742          }
01743       }
01744       GC_mark_stack_top = GC_mark_stack_top_reg;
01745     }
01746 }
01747 
01748 #ifndef SMALL_CONFIG
01749 /* Test whether any page in the given block is dirty    */
01750 GC_bool GC_block_was_dirty(h, hhdr)
01751 struct hblk *h;
01752 register hdr * hhdr;
01753 {
01754     register int sz = hhdr -> hb_sz;
01755     
01756     if (sz <= MAXOBJSZ) {
01757          return(GC_page_was_dirty(h));
01758     } else {
01759         register ptr_t p = (ptr_t)h;
01760          sz = WORDS_TO_BYTES(sz);
01761          while (p < (ptr_t)h + sz) {
01762              if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
01763              p += HBLKSIZE;
01764          }
01765          return(FALSE);
01766     }
01767 }
01768 #endif /* SMALL_CONFIG */
01769 
01770 /* Similar to GC_push_next_marked, but return address of next block   */
01771 struct hblk * GC_push_next_marked(h)
01772 struct hblk *h;
01773 {
01774     register hdr * hhdr;
01775     
01776     h = GC_next_used_block(h);
01777     if (h == 0) return(0);
01778     hhdr = HDR(h);
01779     GC_push_marked(h, hhdr);
01780     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
01781 }
01782 
01783 #ifndef SMALL_CONFIG
01784 /* Identical to above, but mark only from dirty pages   */
01785 struct hblk * GC_push_next_marked_dirty(h)
01786 struct hblk *h;
01787 {
01788     register hdr * hhdr;
01789     
01790     if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
01791     for (;;) {
01792         h = GC_next_used_block(h);
01793         if (h == 0) return(0);
01794         hhdr = HDR(h);
01795 #      ifdef STUBBORN_ALLOC
01796           if (hhdr -> hb_obj_kind == STUBBORN) {
01797             if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
01798                 break;
01799             }
01800           } else {
01801             if (GC_block_was_dirty(h, hhdr)) break;
01802           }
01803 #      else
01804          if (GC_block_was_dirty(h, hhdr)) break;
01805 #      endif
01806         h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
01807     }
01808     GC_push_marked(h, hhdr);
01809     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
01810 }
01811 #endif
01812 
01813 /* Similar to above, but for uncollectable pages.  Needed since we    */
01814 /* do not clear marks for such pages, even for full collections.      */
01815 struct hblk * GC_push_next_marked_uncollectable(h)
01816 struct hblk *h;
01817 {
01818     register hdr * hhdr = HDR(h);
01819     
01820     for (;;) {
01821         h = GC_next_used_block(h);
01822         if (h == 0) return(0);
01823         hhdr = HDR(h);
01824        if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
01825         h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
01826     }
01827     GC_push_marked(h, hhdr);
01828     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
01829 }
01830 
01831