Back to index

lightning-sunbird  0.9+nobinonly
malloc.c
Go to the documentation of this file.
00001 /* 
00002  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
00003  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
00004  *
00005  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00006  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00007  *
00008  * Permission is hereby granted to use or copy this program
00009  * for any purpose,  provided the above notices are retained on all copies.
00010  * Permission to modify the code and to distribute modified code is granted,
00011  * provided the above notices are retained, and a notice that the code was
00012  * modified is included with the above copyright notice.
00013  */
00014 /* Boehm, February 7, 1996 4:32 pm PST */
00015  
00016 #include <stdio.h>
00017 #include <signal.h>
00018 
00019 #include "gc_priv.h"
00020 
00021 extern ptr_t GC_clear_stack();     /* in misc.c, behaves like identity */
00022 void GC_extend_size_map();  /* in misc.c. */
00023 
00024 /* Allocate reclaim list for kind: */
00025 /* Return TRUE on success          */
00026 GC_bool GC_alloc_reclaim_list(kind)
00027 register struct obj_kind * kind;
00028 {
00029     struct hblk ** result = (struct hblk **)
00030               GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
00031     if (result == 0) return(FALSE);
00032     BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
00033     kind -> ok_reclaim_list = result;
00034     return(TRUE);
00035 }
00036 
00037 /* allocate lb bytes for an object of kind.      */
00038 /* Should not be used to directly to allocate    */
00039 /* objects such as STUBBORN objects that  */
00040 /* require special handling on allocation.       */
00041 /* First a version that assumes we already       */
00042 /* hold lock:                             */
00043 ptr_t GC_generic_malloc_inner(lb, k)
00044 register word lb;
00045 register int k;
00046 {
00047 register word lw;
00048 register ptr_t op;
00049 register ptr_t *opp;
00050 
00051     if( SMALL_OBJ(lb) ) {
00052         register struct obj_kind * kind = GC_obj_kinds + k;
00053 #       ifdef MERGE_SIZES
00054          lw = GC_size_map[lb];
00055 #      else
00056          lw = ALIGNED_WORDS(lb);
00057          if (lw == 0) lw = 1;
00058 #       endif
00059        opp = &(kind -> ok_freelist[lw]);
00060         if( (op = *opp) == 0 ) {
00061 #          ifdef MERGE_SIZES
00062              if (GC_size_map[lb] == 0) {
00063                if (!GC_is_initialized)  GC_init_inner();
00064                if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
00065                return(GC_generic_malloc_inner(lb, k));
00066              }
00067 #          else
00068              if (!GC_is_initialized) {
00069                GC_init_inner();
00070                return(GC_generic_malloc_inner(lb, k));
00071              }
00072 #          endif
00073            if (kind -> ok_reclaim_list == 0) {
00074               if (!GC_alloc_reclaim_list(kind)) goto out;
00075            }
00076            op = GC_allocobj(lw, k);
00077            if (op == 0) goto out;
00078         }
00079         /* Here everything is in a consistent state.    */
00080         /* We assume the following assignment is */
00081         /* atomic.  If we get aborted                   */
00082         /* after the assignment, we lose an object,     */
00083         /* but that's benign.                           */
00084         /* Volatile declarations may need to be added   */
00085         /* to prevent the compiler from breaking things.*/
00086         *opp = obj_link(op);
00087         obj_link(op) = 0;
00088     } else {
00089        register struct hblk * h;
00090        register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
00091                                       + HDR_BYTES + HBLKSIZE-1);
00092        
00093        if (!GC_is_initialized) GC_init_inner();
00094        /* Do our share of marking work */
00095           if(GC_incremental && !GC_dont_gc)
00096               GC_collect_a_little_inner((int)n_blocks);
00097        lw = ROUNDED_UP_WORDS(lb);
00098        while ((h = GC_allochblk(lw, k, 0)) == 0
00099               && GC_collect_or_expand(n_blocks, FALSE));
00100        if (h == 0) {
00101            op = 0;
00102        } else {
00103            op = (ptr_t) (h -> hb_body);
00104            GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
00105        }
00106     }
00107     GC_words_allocd += lw;
00108     
00109 out:
00110     return((ptr_t)op);
00111 }
00112 
00113 ptr_t GC_generic_malloc(lb, k)
00114 register word lb;
00115 register int k;
00116 {
00117     ptr_t result;
00118     DCL_LOCK_STATE;
00119 
00120     GC_INVOKE_FINALIZERS();
00121     DISABLE_SIGNALS();
00122     LOCK();
00123     result = GC_generic_malloc_inner(lb, k);
00124     UNLOCK();
00125     ENABLE_SIGNALS();
00126     if (0 == result) {
00127         return((*GC_oom_fn)(lb));
00128     } else {
00129         return(result);
00130     }
00131 }   
00132 
00133 
00134 #define GENERAL_MALLOC(lb,k) \
00135     (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
00136 /* We make the GC_clear_stack_call a tail call, hoping to get more of */
00137 /* the stack.                                                  */
00138 
00139 /* Allocate lb bytes of atomic (pointerfree) data */
00140 # ifdef __STDC__
00141     GC_PTR GC_malloc_atomic(size_t lb)
00142 # else
00143     GC_PTR GC_malloc_atomic(lb)
00144     size_t lb;
00145 # endif
00146 {
00147 register ptr_t op;
00148 register ptr_t * opp;
00149 register word lw;
00150 DCL_LOCK_STATE;
00151 
00152     if( SMALL_OBJ(lb) ) {
00153 #       ifdef MERGE_SIZES
00154          lw = GC_size_map[lb];
00155 #      else
00156          lw = ALIGNED_WORDS(lb);
00157 #       endif
00158        opp = &(GC_aobjfreelist[lw]);
00159        FASTLOCK();
00160         if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
00161             FASTUNLOCK();
00162             return(GENERAL_MALLOC((word)lb, PTRFREE));
00163         }
00164         /* See above comment on signals.  */
00165         *opp = obj_link(op);
00166         GC_words_allocd += lw;
00167         FASTUNLOCK();
00168         return((GC_PTR) op);
00169    } else {
00170        return(GENERAL_MALLOC((word)lb, PTRFREE));
00171    }
00172 }
00173 
00174 /* Allocate lb bytes of composite (pointerful) data */
00175 # ifdef __STDC__
00176     GC_PTR GC_malloc(size_t lb)
00177 # else
00178     GC_PTR GC_malloc(lb)
00179     size_t lb;
00180 # endif
00181 {
00182 register ptr_t op;
00183 register ptr_t *opp;
00184 register word lw;
00185 DCL_LOCK_STATE;
00186 
00187     if( SMALL_OBJ(lb) ) {
00188 #       ifdef MERGE_SIZES
00189          lw = GC_size_map[lb];
00190 #      else
00191          lw = ALIGNED_WORDS(lb);
00192 #       endif
00193        opp = &(GC_objfreelist[lw]);
00194        FASTLOCK();
00195         if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
00196             FASTUNLOCK();
00197             return(GENERAL_MALLOC((word)lb, NORMAL));
00198         }
00199         /* See above comment on signals.  */
00200         *opp = obj_link(op);
00201         obj_link(op) = 0;
00202         GC_words_allocd += lw;
00203         FASTUNLOCK();
00204         return((GC_PTR) op);
00205    } else {
00206        return(GENERAL_MALLOC((word)lb, NORMAL));
00207    }
00208 }
00209 
00210 # ifdef REDIRECT_MALLOC
00211 # ifdef __STDC__
00212     GC_PTR malloc(size_t lb)
00213 # else
00214     GC_PTR malloc(lb)
00215     size_t lb;
00216 # endif
00217   {
00218     /* It might help to manually inline the GC_malloc call here.      */
00219     /* But any decent compiler should reduce the extra procedure call */
00220     /* to at most a jump instruction in this case.                    */
00221 #   if defined(I386) && defined(SOLARIS_THREADS)
00222       /*
00223        * Thread initialisation can call malloc before
00224        * we're ready for it.
00225        * It's not clear that this is enough to help matters.
00226        * The thread implementation may well call malloc at other
00227        * inopportune times.
00228        */
00229       if (!GC_is_initialized) return sbrk(lb);
00230 #   endif /* I386 && SOLARIS_THREADS */
00231     return(REDIRECT_MALLOC(lb));
00232   }
00233 
00234 # ifdef __STDC__
00235     GC_PTR calloc(size_t n, size_t lb)
00236 # else
00237     GC_PTR calloc(n, lb)
00238     size_t n, lb;
00239 # endif
00240   {
00241     return(REDIRECT_MALLOC(n*lb));
00242   }
00243 # endif /* REDIRECT_MALLOC */
00244 
00245 GC_PTR GC_generic_or_special_malloc(lb,knd)
00246 word lb;
00247 int knd;
00248 {
00249     switch(knd) {
00250 #     ifdef STUBBORN_ALLOC
00251        case STUBBORN:
00252            return(GC_malloc_stubborn((size_t)lb));
00253 #     endif
00254        case PTRFREE:
00255            return(GC_malloc_atomic((size_t)lb));
00256        case NORMAL:
00257            return(GC_malloc((size_t)lb));
00258        case UNCOLLECTABLE:
00259            return(GC_malloc_uncollectable((size_t)lb));
00260 #       ifdef ATOMIC_UNCOLLECTABLE
00261          case AUNCOLLECTABLE:
00262            return(GC_malloc_atomic_uncollectable((size_t)lb));
00263 #      endif /* ATOMIC_UNCOLLECTABLE */
00264        default:
00265            return(GC_generic_malloc(lb,knd));
00266     }
00267 }
00268 
00269 
00270 /* Change the size of the block pointed to by p to contain at least   */
00271 /* lb bytes.  The object may be (and quite likely will be) moved.     */
00272 /* The kind (e.g. atomic) is the same as that of the old.            */
00273 /* Shrinking of large blocks is not implemented well.                 */
00274 # ifdef __STDC__
00275     GC_PTR GC_realloc(GC_PTR p, size_t lb)
00276 # else
00277     GC_PTR GC_realloc(p,lb)
00278     GC_PTR p;
00279     size_t lb;
00280 # endif
00281 {
00282 register struct hblk * h;
00283 register hdr * hhdr;
00284 register word sz;     /* Current size in bytes   */
00285 register word orig_sz;       /* Original sz in bytes    */
00286 int obj_kind;
00287 
00288     if (p == 0) return(GC_malloc(lb));    /* Required by ANSI */
00289     h = HBLKPTR(p);
00290     hhdr = HDR(h);
00291     sz = hhdr -> hb_sz;
00292     obj_kind = hhdr -> hb_obj_kind;
00293     sz = WORDS_TO_BYTES(sz);
00294     orig_sz = sz;
00295 
00296     if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
00297        /* Round it up to the next whole heap block */
00298          register word descr;
00299          
00300          sz = (sz+HDR_BYTES+HBLKSIZE-1)
00301               & (~HBLKMASK);
00302          sz -= HDR_BYTES;
00303          hhdr -> hb_sz = BYTES_TO_WORDS(sz);
00304          descr = GC_obj_kinds[obj_kind].ok_descriptor;
00305           if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
00306           hhdr -> hb_descr = descr;
00307          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
00308          /* Extra area is already cleared by allochblk. */
00309     }
00310     if (ADD_SLOP(lb) <= sz) {
00311        if (lb >= (sz >> 1)) {
00312 #          ifdef STUBBORN_ALLOC
00313                if (obj_kind == STUBBORN) GC_change_stubborn(p);
00314 #          endif
00315            if (orig_sz > lb) {
00316              /* Clear unneeded part of object to avoid bogus pointer */
00317              /* tracing.                                      */
00318              /* Safe for stubborn objects.                           */
00319                BZERO(((ptr_t)p) + lb, orig_sz - lb);
00320            }
00321            return(p);
00322        } else {
00323            /* shrink */
00324              GC_PTR result =
00325                      GC_generic_or_special_malloc((word)lb, obj_kind);
00326 
00327              if (result == 0) return(0);
00328                  /* Could also return original object.  But this      */
00329                  /* gives the client warning of imminent disaster.    */
00330              BCOPY(p, result, lb);
00331 #            ifndef IGNORE_FREE
00332                GC_free(p);
00333 #            endif
00334              return(result);
00335        }
00336     } else {
00337        /* grow */
00338          GC_PTR result =
00339               GC_generic_or_special_malloc((word)lb, obj_kind);
00340 
00341          if (result == 0) return(0);
00342          BCOPY(p, result, sz);
00343 #        ifndef IGNORE_FREE
00344            GC_free(p);
00345 #        endif
00346          return(result);
00347     }
00348 }
00349 
00350 # ifdef REDIRECT_MALLOC
00351 # ifdef __STDC__
00352     GC_PTR realloc(GC_PTR p, size_t lb)
00353 # else
00354     GC_PTR realloc(p,lb)
00355     GC_PTR p;
00356     size_t lb;
00357 # endif
00358   {
00359     return(GC_REALLOC(p, lb));
00360   }
00361 # endif /* REDIRECT_MALLOC */
00362 
00363 /* Explicitly deallocate an object p.                          */
00364 # ifdef __STDC__
00365     void GC_free(GC_PTR p)
00366 # else
00367     void GC_free(p)
00368     GC_PTR p;
00369 # endif
00370 {
00371     register struct hblk *h;
00372     register hdr *hhdr;
00373     register signed_word sz;
00374     register ptr_t * flh;
00375     register int knd;
00376     register struct obj_kind * ok;
00377     DCL_LOCK_STATE;
00378 
00379     if (p == 0) return;
00380        /* Required by ANSI.  It's not my fault ...      */
00381     h = HBLKPTR(p);
00382     hhdr = HDR(h);
00383 #   if defined(REDIRECT_MALLOC) && \
00384        (defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
00385        /* We have to redirect malloc calls during initialization.     */
00386        /* Don't try to deallocate that memory.                        */
00387        if (0 == hhdr) return;
00388 #   endif
00389     knd = hhdr -> hb_obj_kind;
00390     sz = hhdr -> hb_sz;
00391     ok = &GC_obj_kinds[knd];
00392     if (sz <= MAXOBJSZ) {
00393 #      ifdef THREADS
00394            DISABLE_SIGNALS();
00395            LOCK();
00396 #      endif
00397        GC_mem_freed += sz;
00398        /* A signal here can make GC_mem_freed and GC_non_gc_bytes     */
00399        /* inconsistent.  We claim this is benign.                     */
00400        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
00401               /* Its unnecessary to clear the mark bit.  If the       */
00402               /* object is reallocated, it doesn't matter.  O.w. the  */
00403               /* collector will do it, since it's on a free list.     */
00404        if (ok -> ok_init) {
00405            BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
00406        }
00407        flh = &(ok -> ok_freelist[sz]);
00408        obj_link(p) = *flh;
00409        *flh = (ptr_t)p;
00410 #      ifdef THREADS
00411            UNLOCK();
00412            ENABLE_SIGNALS();
00413 #      endif
00414     } else {
00415        DISABLE_SIGNALS();
00416         LOCK();
00417         GC_mem_freed += sz;
00418        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
00419         GC_freehblk(h);
00420         UNLOCK();
00421         ENABLE_SIGNALS();
00422     }
00423 }
00424 
00425 # ifdef REDIRECT_MALLOC
00426 #   ifdef __STDC__
00427       void free(GC_PTR p)
00428 #   else
00429       void free(p)
00430       GC_PTR p;
00431 #   endif
00432   {
00433 #   ifndef IGNORE_FREE
00434       GC_FREE(p);
00435 #   endif
00436   }
00437 # endif  /* REDIRECT_MALLOC */
00438 
00439 #if defined(LINUX)
00440 /* fake __mmap() */
00441 
00442 __ptr_t
00443 __mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
00444 {
00445   raise(SIGINT);
00446   return NULL;
00447 }
00448 #endif