Back to index

plt-scheme  4.2.1
allchblk.c
Go to the documentation of this file.
00001 /* 
00002  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
00003  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
00004  * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
00005  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
00006  *
00007  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00008  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00009  *
00010  * Permission is hereby granted to use or copy this program
00011  * for any purpose,  provided the above notices are retained on all copies.
00012  * Permission to modify the code and to distribute modified code is granted,
00013  * provided the above notices are retained, and a notice that the code was
00014  * modified is included with the above copyright notice.
00015  */
00016 
00017 /* #define DEBUG */
00018 #include <stdio.h>
00019 #include "private/gc_priv.h"
00020 
00021 GC_bool GC_use_entire_heap = 0;
00022 
00023 /*
00024  * Free heap blocks are kept on one of several free lists,
00025  * depending on the size of the block.  Each free list is doubly linked.
00026  * Adjacent free blocks are coalesced.
00027  */
00028 
00029  
00030 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
00031               /* largest block we will allocate starting on a black   */
00032               /* listed block.  Must be >= HBLKSIZE.                  */
00033 
00034 
00035 # define UNIQUE_THRESHOLD 32
00036        /* Sizes up to this many HBLKs each have their own free list    */
00037 # define HUGE_THRESHOLD 256
00038        /* Sizes of at least this many heap blocks are mapped to a     */
00039        /* single free list.                                    */
00040 # define FL_COMPRESSION 8
00041        /* In between sizes map this many distinct sizes to a single   */
00042        /* bin.                                                        */
00043 
00044 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
00045                              + UNIQUE_THRESHOLD
00046 
00047 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
00048 
00049 #ifndef USE_MUNMAP
00050 
00051   word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
00052        /* Number of free bytes on each list.     */
00053 
00054   /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS     */
00055   /* > GC_max_large_allocd_bytes?                              */
00056 # ifdef __GNUC__
00057   __inline__
00058 # endif
00059   static GC_bool GC_enough_large_bytes_left(bytes,n)
00060   word bytes;
00061   int n;
00062   {
00063     int i;
00064     for (i = N_HBLK_FLS; i >= n; --i) {
00065        bytes += GC_free_bytes[i];
00066        if (bytes > GC_max_large_allocd_bytes) return TRUE;
00067     }
00068     return FALSE;
00069   }
00070 
00071 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
00072 
00073 # define FREE_ASSERT(e) GC_ASSERT(e)
00074 
00075 #else /* USE_MUNMAP */
00076 
00077 # define INCR_FREE_BYTES(n, b)
00078 # define FREE_ASSERT(e)
00079 
00080 #endif /* USE_MUNMAP */
00081 
00082 /* Map a number of blocks to the appropriate large block free list index. */
00083 int GC_hblk_fl_from_blocks(blocks_needed)
00084 word blocks_needed;
00085 {
00086     if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
00087     if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
00088     return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
00089                                    + UNIQUE_THRESHOLD;
00090     
00091 }
00092 
00093 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
00094 # define NHDR(hhdr) HDR(hhdr -> hb_next)
00095 
00096 # ifdef USE_MUNMAP
00097 #   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
00098 # else  /* !USE_MMAP */
00099 #   define IS_MAPPED(hhdr) 1
00100 # endif /* USE_MUNMAP */
00101 
00102 # if !defined(NO_DEBUGGING)
00103 void GC_print_hblkfreelist()
00104 {
00105     struct hblk * h;
00106     word total_free = 0;
00107     hdr * hhdr;
00108     word sz;
00109     int i;
00110     
00111     for (i = 0; i <= N_HBLK_FLS; ++i) {
00112       h = GC_hblkfreelist[i];
00113 #     ifdef USE_MUNMAP
00114         if (0 != h) GC_printf1("Free list %ld:\n",
00115                              (unsigned long)i);
00116 #     else
00117         if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
00118                              (unsigned long)i,
00119                             (unsigned long)GC_free_bytes[i]);
00120 #     endif
00121       while (h != 0) {
00122         hhdr = HDR(h);
00123         sz = hhdr -> hb_sz;
00124        GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
00125        total_free += sz;
00126         if (GC_is_black_listed(h, HBLKSIZE) != 0) {
00127              GC_printf0("start black listed\n");
00128         } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
00129              GC_printf0("partially black listed\n");
00130         } else {
00131              GC_printf0("not black listed\n");
00132         }
00133         h = hhdr -> hb_next;
00134       }
00135     }
00136 #   ifndef USE_MUNMAP
00137       if (total_free != GC_large_free_bytes) {
00138        GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
00139                  (unsigned long) GC_large_free_bytes);
00140       }
00141 #   endif
00142     GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
00143 }
00144 
00145 /* Return the free list index on which the block described by the header */
00146 /* appears, or -1 if it appears nowhere.                        */
00147 int free_list_index_of(wanted)
00148 hdr * wanted;
00149 {
00150     struct hblk * h;
00151     hdr * hhdr;
00152     int i;
00153     
00154     for (i = 0; i <= N_HBLK_FLS; ++i) {
00155       h = GC_hblkfreelist[i];
00156       while (h != 0) {
00157         hhdr = HDR(h);
00158        if (hhdr == wanted) return i;
00159         h = hhdr -> hb_next;
00160       }
00161     }
00162     return -1;
00163 }
00164 
00165 void GC_dump_regions()
00166 {
00167     unsigned i;
00168     ptr_t start, end;
00169     ptr_t p;
00170     size_t bytes;
00171     hdr *hhdr;
00172     for (i = 0; i < GC_n_heap_sects; ++i) {
00173        start = GC_heap_sects[i].hs_start;
00174        bytes = GC_heap_sects[i].hs_bytes;
00175        end = start + bytes;
00176        /* Merge in contiguous sections.   */
00177          while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
00178            ++i;
00179            end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
00180          }
00181        GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
00182        for (p = start; p < end;) {
00183            hhdr = HDR(p);
00184            GC_printf1("\t0x%lx ", (unsigned long)p);
00185            if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
00186               GC_printf1("Missing header!!(%ld)\n", hhdr);
00187               p += HBLKSIZE;
00188               continue;
00189            }
00190            if (HBLK_IS_FREE(hhdr)) {
00191                 int correct_index = GC_hblk_fl_from_blocks(
00192                                    divHBLKSZ(hhdr -> hb_sz));
00193                int actual_index;
00194               
00195               GC_printf1("\tfree block of size 0x%lx bytes",
00196                         (unsigned long)(hhdr -> hb_sz));
00197               if (IS_MAPPED(hhdr)) {
00198                   GC_printf0("\n");
00199               } else {
00200                   GC_printf0("(unmapped)\n");
00201               }
00202               actual_index = free_list_index_of(hhdr);
00203               if (-1 == actual_index) {
00204                   GC_printf1("\t\tBlock not on free list %ld!!\n",
00205                             correct_index);
00206               } else if (correct_index != actual_index) {
00207                   GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
00208                             actual_index, correct_index);
00209               }
00210               p += hhdr -> hb_sz;
00211            } else {
00212               GC_printf1("\tused for blocks of size 0x%lx bytes\n",
00213                         (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
00214               p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
00215            }
00216        }
00217     }
00218 }
00219 
00220 # endif /* NO_DEBUGGING */
00221 
00222 /* Initialize hdr for a block containing the indicated size and       */
00223 /* kind of objects.                                            */
00224 /* Return FALSE on failure.                                    */
00225 static GC_bool setup_header(hhdr, sz, kind, flags)
00226 register hdr * hhdr;
00227 word sz;      /* object size in words */
00228 int kind;
00229 unsigned char flags;
00230 {
00231     register word descr;
00232     
00233     /* Add description of valid object pointers */
00234       if (!GC_add_map_entry(sz)) return(FALSE);
00235       hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
00236       
00237     /* Set size, kind and mark proc fields */
00238       hhdr -> hb_sz = sz;
00239       hhdr -> hb_obj_kind = kind;
00240       hhdr -> hb_flags = flags;
00241       descr = GC_obj_kinds[kind].ok_descriptor;
00242       if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
00243       hhdr -> hb_descr = descr;
00244       
00245     /* Clear mark bits */
00246       GC_clear_hdr_marks(hhdr);
00247       
00248     hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
00249     return(TRUE);
00250 }
00251 
00252 #define FL_UNKNOWN -1
00253 /*
00254  * Remove hhdr from the appropriate free list.
00255  * We assume it is on the nth free list, or on the size
00256  * appropriate free list if n is FL_UNKNOWN.
00257  */
00258 void GC_remove_from_fl(hhdr, n)
00259 hdr * hhdr;
00260 int n;
00261 {
00262     int index;
00263 
00264     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
00265 #   ifndef USE_MUNMAP
00266       /* We always need index to mainatin free counts.  */
00267       if (FL_UNKNOWN == n) {
00268           index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
00269       } else {
00270          index = n;
00271       }
00272 #   endif
00273     if (hhdr -> hb_prev == 0) {
00274 #      ifdef USE_MUNMAP
00275          if (FL_UNKNOWN == n) {
00276             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
00277          } else {
00278            index = n;
00279          }
00280 #      endif
00281        GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
00282        GC_hblkfreelist[index] = hhdr -> hb_next;
00283     } else {
00284        hdr *phdr;
00285        GET_HDR(hhdr -> hb_prev, phdr);
00286        phdr -> hb_next = hhdr -> hb_next;
00287     }
00288     FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
00289     INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
00290     if (0 != hhdr -> hb_next) {
00291        hdr * nhdr;
00292        GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
00293        GET_HDR(hhdr -> hb_next, nhdr);
00294        nhdr -> hb_prev = hhdr -> hb_prev;
00295     }
00296 }
00297 
00298 /*
00299  * Return a pointer to the free block ending just before h, if any.
00300  */
00301 struct hblk * GC_free_block_ending_at(h)
00302 struct hblk *h;
00303 {
00304     struct hblk * p = h - 1;
00305     hdr * phdr;
00306 
00307     GET_HDR(p, phdr);
00308     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
00309        p = FORWARDED_ADDR(p,phdr);
00310        phdr = HDR(p);
00311     }
00312     if (0 != phdr) {
00313         if(HBLK_IS_FREE(phdr)) {
00314            return p;
00315        } else {
00316            return 0;
00317        }
00318     }
00319     p = GC_prev_block(h - 1);
00320     if (0 != p) {
00321       phdr = HDR(p);
00322       if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
00323        return p;
00324       }
00325     }
00326     return 0;
00327 }
00328 
00329 /*
00330  * Add hhdr to the appropriate free list.
00331  * We maintain individual free lists sorted by address.
00332  */
00333 void GC_add_to_fl(h, hhdr)
00334 struct hblk *h;
00335 hdr * hhdr;
00336 {
00337     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
00338     struct hblk *second = GC_hblkfreelist[index];
00339     hdr * second_hdr;
00340 #   ifdef GC_ASSERTIONS
00341       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
00342       hdr * nexthdr = HDR(next);
00343       struct hblk *prev = GC_free_block_ending_at(h);
00344       hdr * prevhdr = HDR(prev);
00345       GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
00346       GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
00347 #   endif
00348     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
00349     GC_hblkfreelist[index] = h;
00350     INCR_FREE_BYTES(index, hhdr -> hb_sz);
00351     FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
00352     hhdr -> hb_next = second;
00353     hhdr -> hb_prev = 0;
00354     if (0 != second) {
00355       GET_HDR(second, second_hdr);
00356       second_hdr -> hb_prev = h;
00357     }
00358     GC_invalidate_map(hhdr);
00359 }
00360 
00361 #ifdef USE_MUNMAP
00362 
00363 /* Unmap blocks that haven't been recently touched.  This is the only way */
00364 /* way blocks are ever unmapped.                                 */
00365 void GC_unmap_old(void)
00366 {
00367     struct hblk * h;
00368     hdr * hhdr;
00369     word sz;
00370     unsigned short last_rec, threshold;
00371     int i;
00372 #   define UNMAP_THRESHOLD 6
00373     
00374     for (i = 0; i <= N_HBLK_FLS; ++i) {
00375       for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
00376         hhdr = HDR(h);
00377        if (!IS_MAPPED(hhdr)) continue;
00378        threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
00379        last_rec = hhdr -> hb_last_reclaimed;
00380        if ((last_rec > GC_gc_no || last_rec < threshold)
00381            && threshold < GC_gc_no /* not recently wrapped */) {
00382           sz = hhdr -> hb_sz;
00383          GC_unmap((ptr_t)h, sz);
00384          hhdr -> hb_flags |= WAS_UNMAPPED;
00385        }
00386       }
00387     }  
00388 }
00389 
00390 /* Merge all unmapped blocks that are adjacent to other free          */
00391 /* blocks.  This may involve remapping, since all blocks are either   */
00392 /* fully mapped or fully unmapped.                             */
00393 void GC_merge_unmapped(void)
00394 {
00395     struct hblk * h, *next;
00396     hdr * hhdr, *nexthdr;
00397     word size, nextsize;
00398     int i;
00399     
00400     for (i = 0; i <= N_HBLK_FLS; ++i) {
00401       h = GC_hblkfreelist[i];
00402       while (h != 0) {
00403        GET_HDR(h, hhdr);
00404        size = hhdr->hb_sz;
00405        next = (struct hblk *)((word)h + size);
00406        GET_HDR(next, nexthdr);
00407        /* Coalesce with successor, if possible */
00408          if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
00409            nextsize = nexthdr -> hb_sz;
00410            if (IS_MAPPED(hhdr)) {
00411              GC_ASSERT(!IS_MAPPED(nexthdr));
00412              /* make both consistent, so that we can merge */
00413                if (size > nextsize) {
00414                 GC_remap((ptr_t)next, nextsize);
00415               } else {
00416                 GC_unmap((ptr_t)h, size);
00417                 hhdr -> hb_flags |= WAS_UNMAPPED;
00418               }
00419            } else if (IS_MAPPED(nexthdr)) {
00420              GC_ASSERT(!IS_MAPPED(hhdr));
00421              if (size > nextsize) {
00422               GC_unmap((ptr_t)next, nextsize);
00423              } else {
00424               GC_remap((ptr_t)h, size);
00425               hhdr -> hb_flags &= ~WAS_UNMAPPED;
00426               hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
00427              }
00428            } else {
00429              /* Unmap any gap in the middle */
00430               GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
00431            }
00432            /* If they are both unmapped, we merge, but leave unmapped. */
00433            GC_remove_from_fl(hhdr, i);
00434            GC_remove_from_fl(nexthdr, FL_UNKNOWN);
00435            hhdr -> hb_sz += nexthdr -> hb_sz; 
00436            GC_remove_header(next);
00437            GC_add_to_fl(h, hhdr); 
00438            /* Start over at beginning of list */
00439            h = GC_hblkfreelist[i];
00440          } else /* not mergable with successor */ {
00441            h = hhdr -> hb_next;
00442          }
00443       } /* while (h != 0) ... */
00444     } /* for ... */
00445 }
00446 
00447 #endif /* USE_MUNMAP */
00448 
00449 /*
00450  * Return a pointer to a block starting at h of length bytes.
00451  * Memory for the block is mapped.
00452  * Remove the block from its free list, and return the remainder (if any)
00453  * to its appropriate free list.
00454  * May fail by returning 0.
00455  * The header for the returned block must be set up by the caller.
00456  * If the return value is not 0, then hhdr is the header for it.
00457  */
00458 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
00459 struct hblk *h;
00460 hdr * hhdr;
00461 word bytes;
00462 int index;
00463 {
00464     word total_size = hhdr -> hb_sz;
00465     struct hblk * rest;
00466     hdr * rest_hdr;
00467 
00468     GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
00469     GC_remove_from_fl(hhdr, index);
00470     if (total_size == bytes) return h;
00471     rest = (struct hblk *)((word)h + bytes);
00472     rest_hdr = GC_install_header(rest);
00473     if (0 == rest_hdr) {
00474        /* This may be very bad news ... */
00475        WARN("Header allocation failed: Dropping block.\n", 0);
00476        return(0);
00477     }
00478     rest_hdr -> hb_sz = total_size - bytes;
00479     rest_hdr -> hb_flags = 0;
00480 #   ifdef GC_ASSERTIONS
00481       /* Mark h not free, to avoid assertion about adjacent free blocks. */
00482         hhdr -> hb_map = 0;
00483 #   endif
00484     GC_add_to_fl(rest, rest_hdr);
00485     return h;
00486 }
00487 
00488 /*
00489  * H is a free block.  N points at an address inside it.
00490  * A new header for n has already been set up.  Fix up h's header
00491  * to reflect the fact that it is being split, move it to the
00492  * appropriate free list.
00493  * N replaces h in the original free list.
00494  *
00495  * Nhdr is not completely filled in, since it is about to allocated.
00496  * It may in fact end up on the wrong free list for its size.
00497  * (Hence adding it to a free list is silly.  But this path is hopefully
00498  * rare enough that it doesn't matter.  The code is cleaner this way.)
00499  */
00500 void GC_split_block(h, hhdr, n, nhdr, index)
00501 struct hblk *h;
00502 hdr * hhdr;
00503 struct hblk *n;
00504 hdr * nhdr;
00505 int index;    /* Index of free list */
00506 {
00507     word total_size = hhdr -> hb_sz;
00508     word h_size = (word)n - (word)h;
00509     struct hblk *prev = hhdr -> hb_prev;
00510     struct hblk *next = hhdr -> hb_next;
00511 
00512     /* Replace h with n on its freelist */
00513       nhdr -> hb_prev = prev;
00514       nhdr -> hb_next = next;
00515       nhdr -> hb_sz = total_size - h_size;
00516       nhdr -> hb_flags = 0;
00517       if (0 != prev) {
00518        HDR(prev) -> hb_next = n;
00519       } else {
00520         GC_hblkfreelist[index] = n;
00521       }
00522       if (0 != next) {
00523        HDR(next) -> hb_prev = n;
00524       }
00525       INCR_FREE_BYTES(index, -(signed_word)h_size);
00526       FREE_ASSERT(GC_free_bytes[index] > 0);
00527 #     ifdef GC_ASSERTIONS
00528        nhdr -> hb_map = 0;  /* Don't fail test for consecutive */
00529                             /* free blocks in GC_add_to_fl.           */
00530 #     endif
00531 #   ifdef USE_MUNMAP
00532       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
00533 #   endif
00534     hhdr -> hb_sz = h_size;
00535     GC_add_to_fl(h, hhdr);
00536     GC_invalidate_map(nhdr);
00537 }
00538        
00539 struct hblk * GC_allochblk_nth();
00540 
00541 /*
00542  * Allocate (and return pointer to) a heap block
00543  *   for objects of size sz words, searching the nth free list.
00544  *
00545  * NOTE: We set obj_map field in header correctly.
00546  *       Caller is responsible for building an object freelist in block.
00547  *
00548  * Unlike older versions of the collectors, the client is responsible
00549  * for clearing the block, if necessary.
00550  */
00551 struct hblk *
00552 GC_allochblk(sz, kind, flags)
00553 word sz;
00554 int kind;
00555 unsigned flags;  /* IGNORE_OFF_PAGE or 0 */
00556 {
00557     word blocks = OBJ_SZ_TO_BLOCKS(sz);
00558     int start_list = GC_hblk_fl_from_blocks(blocks);
00559     int i;
00560     for (i = start_list; i <= N_HBLK_FLS; ++i) {
00561        struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
00562        if (0 != result) {
00563            return result;
00564        }
00565     }
00566     return 0;
00567 }
00568 /*
00569  * The same, but with search restricted to nth free list.
00570  */
00571 struct hblk *
00572 GC_allochblk_nth(sz, kind, flags, n)
00573 word sz;
00574 int kind;
00575 unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
00576 int n;
00577 {
00578     register struct hblk *hbp;
00579     register hdr * hhdr;           /* Header corr. to hbp */
00580     register struct hblk *thishbp;
00581     register hdr * thishdr;        /* Header corr. to hbp */
00582     signed_word size_needed;    /* number of bytes in requested objects */
00583     signed_word size_avail; /* bytes available in this block   */
00584 
00585     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
00586 
00587     /* search for a big enough block in free list */
00588        hbp = GC_hblkfreelist[n];
00589        for(; 0 != hbp; hbp = hhdr -> hb_next) {
00590            GET_HDR(hbp, hhdr);
00591            size_avail = hhdr->hb_sz;
00592            if (size_avail < size_needed) continue;
00593            if (size_avail != size_needed
00594               && !GC_use_entire_heap
00595               && !GC_dont_gc
00596               && USED_HEAP_SIZE >= GC_requested_heapsize
00597               && !TRUE_INCREMENTAL && GC_should_collect()) {
00598 #             ifdef USE_MUNMAP
00599                   continue;
00600 #             else
00601                   /* If we have enough large blocks left to cover any */
00602                   /* previous request for large blocks, we go ahead   */
00603                   /* and split.  Assuming a steady state, that should */
00604                   /* be safe.  It means that we can use the full      */
00605                   /* heap if we allocate only small objects.          */
00606                   if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
00607                     continue;
00608                   } 
00609                   /* If we are deallocating lots of memory from       */
00610                   /* finalizers, fail and collect sooner rather       */
00611                   /* than later.                               */
00612                   if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
00613                      > (GC_heapsize >> 4))  {
00614                     continue;
00615                   }
00616 #             endif /* !USE_MUNMAP */
00617            }
00618            /* If the next heap block is obviously better, go on.      */
00619            /* This prevents us from disassembling a single large block */
00620            /* to get tiny blocks.                              */
00621            {
00622              signed_word next_size;
00623              
00624              thishbp = hhdr -> hb_next;
00625              if (thishbp != 0) {
00626               GET_HDR(thishbp, thishdr);
00627                next_size = (signed_word)(thishdr -> hb_sz);
00628                if (next_size < size_avail
00629                  && next_size >= size_needed
00630                  && !GC_is_black_listed(thishbp, (word)size_needed)) {
00631                  continue;
00632                }
00633              }
00634            }
00635            if ( !IS_UNCOLLECTABLE(kind) &&
00636                 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
00637              struct hblk * lasthbp = hbp;
00638              ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
00639              signed_word orig_avail = size_avail;
00640              signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
00641                                           HBLKSIZE
00642                                           : size_needed);
00643              
00644              
00645              while ((ptr_t)lasthbp <= search_end
00646                     && (thishbp = GC_is_black_listed(lasthbp,
00647                                                 (word)eff_size_needed))
00648                       != 0) {
00649                lasthbp = thishbp;
00650              }
00651              size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
00652              thishbp = lasthbp;
00653              if (size_avail >= size_needed) {
00654                if (thishbp != hbp &&
00655                   0 != (thishdr = GC_install_header(thishbp))) {
00656                 /* Make sure it's mapped before we mangle it. */
00657 #                 ifdef USE_MUNMAP
00658                     if (!IS_MAPPED(hhdr)) {
00659                       GC_remap((ptr_t)hbp, hhdr -> hb_sz);
00660                       hhdr -> hb_flags &= ~WAS_UNMAPPED;
00661                     }
00662 #                 endif
00663                  /* Split the block at thishbp */
00664                     GC_split_block(hbp, hhdr, thishbp, thishdr, n);
00665                 /* Advance to thishbp */
00666                     hbp = thishbp;
00667                     hhdr = thishdr;
00668                     /* We must now allocate thishbp, since it may     */
00669                     /* be on the wrong free list.                     */
00670               }
00671              } else if (size_needed > (signed_word)BL_LIMIT
00672                         && orig_avail - size_needed
00673                          > (signed_word)BL_LIMIT) {
00674                /* Punt, since anything else risks unreasonable heap growth. */
00675               if (++GC_large_alloc_warn_suppressed
00676                   >= GC_large_alloc_warn_interval) {
00677               /* PLTSCHEME: rather not see this particular message (or setenv). */
00678 #if 0
00679                  WARN("Repeated allocation of very large block "
00680                      "(appr. size %ld):\n"
00681                      "\tMay lead to memory leak and poor performance.\n",
00682                      size_needed);
00683 #endif
00684                 GC_large_alloc_warn_suppressed = 0;
00685               }
00686                size_avail = orig_avail;
00687              } else if (size_avail == 0 && size_needed == HBLKSIZE
00688                       && IS_MAPPED(hhdr)) {
00689               if (!GC_find_leak) {
00690                 static unsigned count = 0;
00691                 
00692                 /* The block is completely blacklisted.  We need      */
00693                 /* to drop some such blocks, since otherwise we spend */
00694                 /* all our time traversing them if pointerfree */
00695                 /* blocks are unpopular.                       */
00696                  /* A dropped block will be reconsidered at next GC.  */
00697                  if ((++count & 3) == 0) {
00698                    /* Allocate and drop the block in small chunks, to */
00699                    /* maximize the chance that we will recover some   */
00700                    /* later.                                          */
00701                     word total_size = hhdr -> hb_sz;
00702                      struct hblk * limit = hbp + divHBLKSZ(total_size);
00703                      struct hblk * h;
00704                     struct hblk * prev = hhdr -> hb_prev;
00705                      
00706                     GC_words_wasted += BYTES_TO_WORDS(total_size);
00707                     GC_large_free_bytes -= total_size;
00708                     GC_remove_from_fl(hhdr, n);
00709                      for (h = hbp; h < limit; h++) {
00710                        if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
00711                          (void) setup_header(
00712                               hhdr,
00713                                      BYTES_TO_WORDS(HBLKSIZE),
00714                                      PTRFREE, 0); /* Cant fail */
00715                               if (GC_debugging_started) {
00716                                 BZERO(h, HBLKSIZE);
00717                               }
00718                        }
00719                      }
00720                    /* Restore hbp to point at free block */
00721                     hbp = prev;
00722                     if (0 == hbp) {
00723                      return GC_allochblk_nth(sz, kind, flags, n);
00724                     }
00725                     hhdr = HDR(hbp);
00726                  }
00727               }
00728              }
00729            }
00730            if( size_avail >= size_needed ) {
00731 #             ifdef USE_MUNMAP
00732                 if (!IS_MAPPED(hhdr)) {
00733                   GC_remap((ptr_t)hbp, hhdr -> hb_sz);
00734                   hhdr -> hb_flags &= ~WAS_UNMAPPED;
00735                 }
00736 #              endif
00737               /* hbp may be on the wrong freelist; the parameter n    */
00738               /* is important.                                 */
00739               hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
00740               break;
00741            }
00742        }
00743 
00744     if (0 == hbp) return 0;
00745        
00746     /* Add it to map of valid blocks */
00747        if (!GC_install_counts(hbp, (word)size_needed)) return(0);
00748        /* This leaks memory under very rare conditions. */
00749               
00750     /* Set up header */
00751         if (!setup_header(hhdr, sz, kind, flags)) {
00752             GC_remove_counts(hbp, (word)size_needed);
00753             return(0); /* ditto */
00754         }
00755 
00756     /* Notify virtual dirty bit implementation that we are about to write.  */
00757     /* Ensure that pointerfree objects are not protected if it's avoidable. */
00758        GC_remove_protection(hbp, divHBLKSZ(size_needed),
00759                           (hhdr -> hb_descr == 0) /* pointer-free */);
00760         
00761     /* We just successfully allocated a block.  Restart count of      */
00762     /* consecutive failures.                                          */
00763     {
00764        extern unsigned GC_fail_count;
00765        
00766        GC_fail_count = 0;
00767     }
00768 
00769     GC_large_free_bytes -= size_needed;
00770     
00771     GC_ASSERT(IS_MAPPED(hhdr));
00772     return( hbp );
00773 }
00774  
00775 struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
00776 
00777 /*
00778  * Free a heap block.
00779  *
00780  * Coalesce the block with its neighbors if possible.
00781  *
00782  * All mark words are assumed to be cleared.
00783  */
00784 void
00785 GC_freehblk(hbp)
00786 struct hblk *hbp;
00787 {
00788 struct hblk *next, *prev;
00789 hdr *hhdr, *prevhdr, *nexthdr;
00790 signed_word size;
00791 
00792 
00793     GET_HDR(hbp, hhdr);
00794     size = hhdr->hb_sz;
00795     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
00796     GC_remove_counts(hbp, (word)size);
00797     hhdr->hb_sz = size;
00798 #   ifdef USE_MUNMAP
00799       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
00800 #   endif
00801     
00802     /* Check for duplicate deallocation in the easy case */
00803       if (HBLK_IS_FREE(hhdr)) {
00804         GC_printf1("Duplicate large block deallocation of 0x%lx\n",
00805                  (unsigned long) hbp);
00806        ABORT("Duplicate large block deallocation");
00807       }
00808 
00809     GC_ASSERT(IS_MAPPED(hhdr));
00810     GC_invalidate_map(hhdr);
00811     next = (struct hblk *)((word)hbp + size);
00812     GET_HDR(next, nexthdr);
00813     prev = GC_free_block_ending_at(hbp);
00814     /* Coalesce with successor, if possible */
00815       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
00816        GC_remove_from_fl(nexthdr, FL_UNKNOWN);
00817        hhdr -> hb_sz += nexthdr -> hb_sz; 
00818        GC_remove_header(next);
00819       }
00820     /* Coalesce with predecessor, if possible. */
00821       if (0 != prev) {
00822        prevhdr = HDR(prev);
00823        if (IS_MAPPED(prevhdr)) {
00824          GC_remove_from_fl(prevhdr, FL_UNKNOWN);
00825          prevhdr -> hb_sz += hhdr -> hb_sz;
00826 #        ifdef USE_MUNMAP
00827            prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
00828 #        endif
00829          GC_remove_header(hbp);
00830          hbp = prev;
00831          hhdr = prevhdr;
00832        }
00833       }
00834     /* FIXME: It is not clear we really always want to do these merges       */
00835     /* with -DUSE_MUNMAP, since it updates ages and hence prevents    */
00836     /* unmapping.                                              */
00837 
00838     GC_large_free_bytes += size;
00839     GC_add_to_fl(hbp, hhdr);    
00840 }
00841