Back to index

plt-scheme  4.2.1
mallocx.c
Go to the documentation of this file.
00001 /*
00002  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
00003  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
00004  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
00005  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
00006  *
00007  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00008  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00009  *
00010  * Permission is hereby granted to use or copy this program
00011  * for any purpose,  provided the above notices are retained on all copies.
00012  * Permission to modify the code and to distribute modified code is granted,
00013  * provided the above notices are retained, and a notice that the code was
00014  * modified is included with the above copyright notice.
00015  */
00016 
00017 /*
00018  * These are extra allocation routines which are likely to be less
00019  * frequently used than those in malloc.c.  They are separate in the
00020  * hope that the .o file will be excluded from statically linked
00021  * executables.  We should probably break this up further.
00022  */
00023 
00024 #include <stdio.h>
00025 #include "private/gc_priv.h"
00026 
00027 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
00028 void GC_extend_size_map();      /* in misc.c. */
00029 GC_bool GC_alloc_reclaim_list();   /* in malloc.c */
00030 
00031 /* Some externally visible but unadvertised variables to allow access to */
00032 /* free lists from inlined allocators without including gc_priv.h      */
00033 /* or introducing dependencies on internal data structure layouts.     */
00034 ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
00035 ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
00036 ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
00037 # ifdef ATOMIC_UNCOLLECTABLE
00038     ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
00039 # endif
00040 
00041 
00042 GC_PTR GC_generic_or_special_malloc(lb,knd)
00043 word lb;
00044 int knd;
00045 {
00046     switch(knd) {
00047 #     ifdef STUBBORN_ALLOC
00048        case STUBBORN:
00049            return(GC_malloc_stubborn((size_t)lb));
00050 #     endif
00051        case PTRFREE:
00052            return(GC_malloc_atomic((size_t)lb));
00053        case NORMAL:
00054            return(GC_malloc((size_t)lb));
00055        case UNCOLLECTABLE:
00056            return(GC_malloc_uncollectable((size_t)lb));
00057 #       ifdef ATOMIC_UNCOLLECTABLE
00058          case AUNCOLLECTABLE:
00059            return(GC_malloc_atomic_uncollectable((size_t)lb));
00060 #      endif /* ATOMIC_UNCOLLECTABLE */
00061        default:
00062            return(GC_generic_malloc(lb,knd));
00063     }
00064 }
00065 
00066 
00067 /* Change the size of the block pointed to by p to contain at least   */
00068 /* lb bytes.  The object may be (and quite likely will be) moved.     */
00069 /* The kind (e.g. atomic) is the same as that of the old.            */
00070 /* Shrinking of large blocks is not implemented well.                 */
00071 # ifdef __STDC__
00072     GC_PTR GC_realloc(GC_PTR p, size_t lb)
00073 # else
00074     GC_PTR GC_realloc(p,lb)
00075     GC_PTR p;
00076     size_t lb;
00077 # endif
00078 {
00079 register struct hblk * h;
00080 register hdr * hhdr;
00081 register word sz;     /* Current size in bytes   */
00082 register word orig_sz;       /* Original sz in bytes    */
00083 int obj_kind;
00084 
00085     if (p == 0) return(GC_malloc(lb));    /* Required by ANSI */
00086     h = HBLKPTR(p);
00087     hhdr = HDR(h);
00088     sz = hhdr -> hb_sz;
00089     obj_kind = hhdr -> hb_obj_kind;
00090     sz = WORDS_TO_BYTES(sz);
00091     orig_sz = sz;
00092 
00093     if (sz > MAXOBJBYTES) {
00094        /* Round it up to the next whole heap block */
00095          register word descr;
00096          
00097          sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
00098          hhdr -> hb_sz = BYTES_TO_WORDS(sz);
00099          descr = GC_obj_kinds[obj_kind].ok_descriptor;
00100           if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
00101           hhdr -> hb_descr = descr;
00102          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
00103          /* Extra area is already cleared by GC_alloc_large_and_clear. */
00104     }
00105     if (ADD_SLOP(lb) <= sz) {
00106        if (lb >= (sz >> 1)) {
00107 #          ifdef STUBBORN_ALLOC
00108                if (obj_kind == STUBBORN) GC_change_stubborn(p);
00109 #          endif
00110            if (orig_sz > lb) {
00111              /* Clear unneeded part of object to avoid bogus pointer */
00112              /* tracing.                                      */
00113              /* Safe for stubborn objects.                           */
00114                BZERO(((ptr_t)p) + lb, orig_sz - lb);
00115            }
00116            return(p);
00117        } else {
00118            /* shrink */
00119              GC_PTR result =
00120                      GC_generic_or_special_malloc((word)lb, obj_kind);
00121 
00122              if (result == 0) return(0);
00123                  /* Could also return original object.  But this      */
00124                  /* gives the client warning of imminent disaster.    */
00125              BCOPY(p, result, lb);
00126 #            ifndef IGNORE_FREE
00127                GC_free(p);
00128 #            endif
00129              return(result);
00130        }
00131     } else {
00132        /* grow */
00133          GC_PTR result =
00134               GC_generic_or_special_malloc((word)lb, obj_kind);
00135 
00136          if (result == 0) return(0);
00137          BCOPY(p, result, sz);
00138 #        ifndef IGNORE_FREE
00139            GC_free(p);
00140 #        endif
00141          return(result);
00142     }
00143 }
00144 
00145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
00146 #   define REDIRECT_REALLOC GC_realloc
00147 # endif
00148 
00149 # ifdef REDIRECT_REALLOC
00150 
00151 /* As with malloc, avoid two levels of extra calls here.       */
00152 # ifdef GC_ADD_CALLER
00153 #   define RA GC_RETURN_ADDR,
00154 # else
00155 #   define RA
00156 # endif
00157 # define GC_debug_realloc_replacement(p, lb) \
00158        GC_debug_realloc(p, lb, RA "unknown", 0)
00159 
00160 # ifdef __STDC__
00161     GC_PTR realloc(GC_PTR p, size_t lb)
00162 # else
00163     GC_PTR realloc(p,lb)
00164     GC_PTR p;
00165     size_t lb;
00166 # endif
00167   {
00168     return(REDIRECT_REALLOC(p, lb));
00169   }
00170 
00171 # undef GC_debug_realloc_replacement
00172 # endif /* REDIRECT_REALLOC */
00173 
00174 
00175 /* Allocate memory such that only pointers to near the         */
00176 /* beginning of the object are considered.                     */
00177 /* We avoid holding allocation lock while we clear memory.     */
00178 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
00179 register size_t lb;
00180 register int k;
00181 {
00182     register ptr_t result;
00183     word lw;
00184     word n_blocks;
00185     GC_bool init;
00186     DCL_LOCK_STATE;
00187     
00188     if (SMALL_OBJ(lb))
00189         return(GC_generic_malloc((word)lb, k));
00190     lw = ROUNDED_UP_WORDS(lb);
00191     n_blocks = OBJ_SZ_TO_BLOCKS(lw);
00192     init = GC_obj_kinds[k].ok_init;
00193     if (GC_have_errors) GC_print_all_errors();
00194     GC_INVOKE_FINALIZERS();
00195     DISABLE_SIGNALS();
00196     LOCK();
00197     result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
00198     if (0 != result) {
00199         if (GC_debugging_started) {
00200            BZERO(result, n_blocks * HBLKSIZE);
00201         } else {
00202 #           ifdef THREADS
00203              /* Clear any memory that might be used for GC descriptors */
00204              /* before we release the lock.                          */
00205                ((word *)result)[0] = 0;
00206                ((word *)result)[1] = 0;
00207                ((word *)result)[lw-1] = 0;
00208                ((word *)result)[lw-2] = 0;
00209 #          endif
00210         }
00211     }
00212     GC_words_allocd += lw;
00213     UNLOCK();
00214     ENABLE_SIGNALS();
00215     if (0 == result) {
00216         return((*GC_oom_fn)(lb));
00217     } else {
00218        if (init && !GC_debugging_started) {
00219            BZERO(result, n_blocks * HBLKSIZE);
00220         }
00221         return(result);
00222     }
00223 }
00224 
00225 # if defined(__STDC__) || defined(__cplusplus)
00226   void * GC_malloc_ignore_off_page(size_t lb)
00227 # else
00228   char * GC_malloc_ignore_off_page(lb)
00229   register size_t lb;
00230 # endif
00231 {
00232     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
00233 }
00234 
00235 # if defined(__STDC__) || defined(__cplusplus)
00236   void * GC_malloc_atomic_ignore_off_page(size_t lb)
00237 # else
00238   char * GC_malloc_atomic_ignore_off_page(lb)
00239   register size_t lb;
00240 # endif
00241 {
00242     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
00243 }
00244 
00245 /* Increment GC_words_allocd from code that doesn't have direct access       */
00246 /* to GC_arrays.                                               */
00247 # ifdef __STDC__
00248 void GC_incr_words_allocd(size_t n)
00249 {
00250     GC_words_allocd += n;
00251 }
00252 
00253 /* The same for GC_mem_freed.                           */
00254 void GC_incr_mem_freed(size_t n)
00255 {
00256     GC_mem_freed += n;
00257 }
00258 # endif /* __STDC__ */
00259 
00260 /* Analogous to the above, but assumes a small object size, and       */
00261 /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.              */
00262 ptr_t GC_generic_malloc_words_small_inner(lw, k)
00263 register word lw;
00264 register int k;
00265 {
00266 register ptr_t op;
00267 register ptr_t *opp;
00268 register struct obj_kind * kind = GC_obj_kinds + k;
00269 
00270     opp = &(kind -> ok_freelist[lw]);
00271     if( (op = *opp) == 0 ) {
00272         if (!GC_is_initialized) {
00273             GC_init_inner();
00274         }
00275        if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
00276            op = GC_clear_stack(GC_allocobj((word)lw, k));
00277        }
00278        if (op == 0) {
00279            UNLOCK();
00280            ENABLE_SIGNALS();
00281            return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
00282        }
00283     }
00284     *opp = obj_link(op);
00285     obj_link(op) = 0;
00286     GC_words_allocd += lw;
00287     return((ptr_t)op);
00288 }
00289 
00290 /* Analogous to the above, but assumes a small object size, and       */
00291 /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.              */
00292 #ifdef __STDC__
00293      ptr_t GC_generic_malloc_words_small(size_t lw, int k)
00294 #else 
00295      ptr_t GC_generic_malloc_words_small(lw, k)
00296      register word lw;
00297      register int k;
00298 #endif
00299 {
00300 register ptr_t op;
00301 DCL_LOCK_STATE;
00302 
00303     if (GC_have_errors) GC_print_all_errors();
00304     GC_INVOKE_FINALIZERS();
00305     DISABLE_SIGNALS();
00306     LOCK();
00307     op = GC_generic_malloc_words_small_inner(lw, k);
00308     UNLOCK();
00309     ENABLE_SIGNALS();
00310     return((ptr_t)op);
00311 }
00312 
00313 #if defined(THREADS) && !defined(SRC_M3)
00314 
00315 extern signed_word GC_mem_found;   /* Protected by GC lock.  */
00316 
00317 #ifdef PARALLEL_MARK
00318 volatile signed_word GC_words_allocd_tmp = 0;
00319                         /* Number of words of memory allocated since    */
00320                         /* we released the GC lock.  Instead of         */
00321                         /* reacquiring the GC lock just to add this in, */
00322                         /* we add it in the next time we reacquire      */
00323                         /* the lock.  (Atomically adding it doesn't     */
00324                         /* work, since we would have to atomically      */
00325                         /* update it in GC_malloc, which is too         */
00326                         /* expensive.                                   */
00327 #endif /* PARALLEL_MARK */
00328 
00329 /* See reclaim.c: */
00330 extern ptr_t GC_reclaim_generic();
00331 
00332 /* Return a list of 1 or more objects of the indicated size, linked   */
00333 /* through the first word in the object.  This has the advantage that */
00334 /* it acquires the allocation lock only once, and may greatly reduce  */
00335 /* time wasted contending for the allocation lock.  Typical usage would */
00336 /* be in a thread that requires many items of the same size.  It would       */
00337 /* keep its own free list in thread-local storage, and call           */
00338 /* GC_malloc_many or friends to replenish it.  (We do not round up    */
00339 /* object sizes, since a call indicates the intention to consume many */
00340 /* objects of exactly this size.)                              */
00341 /* We return the free-list by assigning it to *result, since it is    */
00342 /* not safe to return, e.g. a linked list of pointer-free objects,    */
00343 /* since the collector would not retain the entire list if it were    */
00344 /* invoked just as we were returning.                                 */
00345 /* Note that the client should usually clear the link field.          */
00346 void GC_generic_malloc_many(lb, k, result)
00347 register word lb;
00348 register int k;
00349 ptr_t *result;
00350 {
00351 ptr_t op;
00352 ptr_t p;
00353 ptr_t *opp;
00354 word lw;
00355 word my_words_allocd = 0;
00356 struct obj_kind * ok = &(GC_obj_kinds[k]);
00357 DCL_LOCK_STATE;
00358 
00359 #   if defined(GATHERSTATS) || defined(PARALLEL_MARK)
00360 #     define COUNT_ARG , &my_words_allocd
00361 #   else
00362 #     define COUNT_ARG
00363 #     define NEED_TO_COUNT
00364 #   endif
00365     if (!SMALL_OBJ(lb)) {
00366         op = GC_generic_malloc(lb, k);
00367         if(0 != op) obj_link(op) = 0;
00368        *result = op;
00369         return;
00370     }
00371     lw = ALIGNED_WORDS(lb);
00372     if (GC_have_errors) GC_print_all_errors();
00373     GC_INVOKE_FINALIZERS();
00374     DISABLE_SIGNALS();
00375     LOCK();
00376     if (!GC_is_initialized) GC_init_inner();
00377     /* Do our share of marking work */
00378       if (GC_incremental && !GC_dont_gc) {
00379         ENTER_GC();
00380        GC_collect_a_little_inner(1);
00381         EXIT_GC();
00382       }
00383     /* First see if we can reclaim a page of objects waiting to be */
00384     /* reclaimed.                                          */
00385     {
00386        struct hblk ** rlh = ok -> ok_reclaim_list;
00387        struct hblk * hbp;
00388        hdr * hhdr;
00389 
00390        rlh += lw;
00391        while ((hbp = *rlh) != 0) {
00392             hhdr = HDR(hbp);
00393             *rlh = hhdr -> hb_next;
00394            hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
00395 #          ifdef PARALLEL_MARK
00396               {
00397                 signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
00398 
00399                 GC_ASSERT(my_words_allocd_tmp >= 0);
00400                 /* We only decrement it while holding the GC lock.    */
00401                 /* Thus we can't accidentally adjust it down in more  */
00402                 /* than one thread simultaneously.                    */
00403                 if (my_words_allocd_tmp != 0) {
00404                   (void)GC_atomic_add(
00405                             (volatile GC_word *)(&GC_words_allocd_tmp),
00406                             (GC_word)(-my_words_allocd_tmp));
00407                   GC_words_allocd += my_words_allocd_tmp;
00408                 }
00409               }
00410               GC_acquire_mark_lock();
00411               ++ GC_fl_builder_count;
00412               UNLOCK();
00413               ENABLE_SIGNALS();
00414               GC_release_mark_lock();
00415 #          endif
00416            op = GC_reclaim_generic(hbp, hhdr, lw,
00417                                 ok -> ok_init, 0 COUNT_ARG);
00418             if (op != 0) {
00419 #            ifdef NEED_TO_COUNT
00420               /* We are neither gathering statistics, nor marking in  */
00421               /* parallel.  Thus GC_reclaim_generic doesn't count     */
00422               /* for us.                                       */
00423               for (p = op; p != 0; p = obj_link(p)) {
00424                 my_words_allocd += lw;
00425               }
00426 #            endif
00427 #            if defined(GATHERSTATS)
00428                /* We also reclaimed memory, so we need to adjust      */
00429                /* that count.                                         */
00430               /* This should be atomic, so the results may be         */
00431               /* inaccurate.                                          */
00432               GC_mem_found += my_words_allocd;
00433 #            endif
00434 #            ifdef PARALLEL_MARK
00435               *result = op;
00436               (void)GC_atomic_add(
00437                             (volatile GC_word *)(&GC_words_allocd_tmp),
00438                             (GC_word)(my_words_allocd));
00439               GC_acquire_mark_lock();
00440               -- GC_fl_builder_count;
00441               if (GC_fl_builder_count == 0) GC_notify_all_builder();
00442               GC_release_mark_lock();
00443               (void) GC_clear_stack(0);
00444               return;
00445 #            else
00446                GC_words_allocd += my_words_allocd;
00447                goto out;
00448 #            endif
00449            }
00450 #          ifdef PARALLEL_MARK
00451              GC_acquire_mark_lock();
00452              -- GC_fl_builder_count;
00453              if (GC_fl_builder_count == 0) GC_notify_all_builder();
00454              GC_release_mark_lock();
00455              DISABLE_SIGNALS();
00456              LOCK();
00457              /* GC lock is needed for reclaim list access.     We     */
00458              /* must decrement fl_builder_count before reaquiring GC  */
00459              /* lock.  Hopefully this path is rare.                   */
00460 #          endif
00461        }
00462     }
00463     /* Next try to use prefix of global free list if there is one.    */
00464     /* We don't refill it, but we need to use it up before allocating */
00465     /* a new block ourselves.                                         */
00466       opp = &(GC_obj_kinds[k].ok_freelist[lw]);
00467       if ( (op = *opp) != 0 ) {
00468        *opp = 0;
00469         my_words_allocd = 0;
00470         for (p = op; p != 0; p = obj_link(p)) {
00471           my_words_allocd += lw;
00472           if (my_words_allocd >= BODY_SZ) {
00473             *opp = obj_link(p);
00474             obj_link(p) = 0;
00475             break;
00476          }
00477         }
00478        GC_words_allocd += my_words_allocd;
00479        goto out;
00480       }
00481     /* Next try to allocate a new block worth of objects of this size.       */
00482     {
00483        struct hblk *h = GC_allochblk(lw, k, 0);
00484        if (h != 0) {
00485          if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
00486          GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
00487                             - BYTES_TO_WORDS(HBLKSIZE) % lw;
00488 #        ifdef PARALLEL_MARK
00489            GC_acquire_mark_lock();
00490            ++ GC_fl_builder_count;
00491            UNLOCK();
00492            ENABLE_SIGNALS();
00493            GC_release_mark_lock();
00494 #        endif
00495 
00496          op = GC_build_fl(h, lw, ok -> ok_init, 0);
00497 #        ifdef PARALLEL_MARK
00498            *result = op;
00499            GC_acquire_mark_lock();
00500            -- GC_fl_builder_count;
00501            if (GC_fl_builder_count == 0) GC_notify_all_builder();
00502            GC_release_mark_lock();
00503            (void) GC_clear_stack(0);
00504            return;
00505 #        else
00506            goto out;
00507 #        endif
00508        }
00509     }
00510     
00511     /* As a last attempt, try allocating a single object.  Note that  */
00512     /* this may trigger a collection or expand the heap.              */
00513       op = GC_generic_malloc_inner(lb, k);
00514       if (0 != op) obj_link(op) = 0;
00515     
00516   out:
00517     *result = op;
00518     UNLOCK();
00519     ENABLE_SIGNALS();
00520     (void) GC_clear_stack(0);
00521 }
00522 
00523 GC_PTR GC_malloc_many(size_t lb)
00524 {
00525     ptr_t result;
00526     GC_generic_malloc_many(lb, NORMAL, &result);
00527     return result;
00528 }
00529 
00530 /* Note that the "atomic" version of this would be unsafe, since the  */
00531 /* links would not be seen by the collector.                          */
00532 # endif
00533 
00534 /* Allocate lb bytes of pointerful, traced, but not collectable data */
00535 # ifdef __STDC__
00536     GC_PTR GC_malloc_uncollectable(size_t lb)
00537 # else
00538     GC_PTR GC_malloc_uncollectable(lb)
00539     size_t lb;
00540 # endif
00541 {
00542 register ptr_t op;
00543 register ptr_t *opp;
00544 register word lw;
00545 DCL_LOCK_STATE;
00546 
00547     if( SMALL_OBJ(lb) ) {
00548 #       ifdef MERGE_SIZES
00549          if (EXTRA_BYTES != 0 && lb != 0) lb--;
00550                 /* We don't need the extra byte, since this won't be  */
00551                 /* collected anyway.                                  */
00552          lw = GC_size_map[lb];
00553 #      else
00554          lw = ALIGNED_WORDS(lb);
00555 #       endif
00556        opp = &(GC_uobjfreelist[lw]);
00557        FASTLOCK();
00558         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
00559             /* See above comment on signals.     */
00560             *opp = obj_link(op);
00561             obj_link(op) = 0;
00562             GC_words_allocd += lw;
00563             /* Mark bit ws already set on free list.  It will be      */
00564            /* cleared only temporarily during a collection, as a      */
00565            /* result of the normal free list mark bit clearing.       */
00566             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
00567             FASTUNLOCK();
00568             return((GC_PTR) op);
00569         }
00570         FASTUNLOCK();
00571         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
00572     } else {
00573        op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
00574     }
00575     if (0 == op) return(0);
00576     /* We don't need the lock here, since we have an undisguised      */
00577     /* pointer.  We do need to hold the lock while we adjust          */
00578     /* mark bits.                                              */
00579     {
00580        register struct hblk * h;
00581        
00582        h = HBLKPTR(op);
00583        lw = HDR(h) -> hb_sz;
00584        
00585        DISABLE_SIGNALS();
00586        LOCK();
00587        GC_set_mark_bit(op);
00588        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
00589        UNLOCK();
00590        ENABLE_SIGNALS();
00591        return((GC_PTR) op);
00592     }
00593 }
00594 
00595 #ifdef __STDC__
00596 /* Not well tested nor integrated. */
00597 /* Debug version is tricky and currently missing.       */
00598 #include <limits.h>
00599 
00600 GC_PTR GC_memalign(size_t align, size_t lb) 
00601 { 
00602     size_t new_lb;
00603     size_t offset;
00604     ptr_t result;
00605 
00606 #   ifdef ALIGN_DOUBLE
00607        if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
00608 #   endif
00609     if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
00610     if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
00611         if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
00612        return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
00613            /* Will be HBLKSIZE aligned.   */
00614     }
00615     /* We could also try to make sure that the real rounded-up object size */
00616     /* is a multiple of align.  That would be correct up to HBLKSIZE.    */
00617     new_lb = lb + align - 1;
00618     result = GC_malloc(new_lb);
00619     offset = (word)result % align;
00620     if (offset != 0) {
00621        offset = align - offset;
00622         if (!GC_all_interior_pointers) {
00623            if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
00624            GC_register_displacement(offset);
00625        }
00626     }
00627     result = (GC_PTR) ((ptr_t)result + offset);
00628     GC_ASSERT((word)result % align == 0);
00629     return result;
00630 }
00631 #endif 
00632 
00633 # ifdef ATOMIC_UNCOLLECTABLE
00634 /* Allocate lb bytes of pointerfree, untraced, uncollectable data     */
00635 /* This is normally roughly equivalent to the system malloc.          */
00636 /* But it may be useful if malloc is redefined.                       */
00637 # ifdef __STDC__
00638     GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
00639 # else
00640     GC_PTR GC_malloc_atomic_uncollectable(lb)
00641     size_t lb;
00642 # endif
00643 {
00644 register ptr_t op;
00645 register ptr_t *opp;
00646 register word lw;
00647 DCL_LOCK_STATE;
00648 
00649     if( SMALL_OBJ(lb) ) {
00650 #       ifdef MERGE_SIZES
00651          if (EXTRA_BYTES != 0 && lb != 0) lb--;
00652                 /* We don't need the extra byte, since this won't be  */
00653                 /* collected anyway.                                  */
00654          lw = GC_size_map[lb];
00655 #      else
00656          lw = ALIGNED_WORDS(lb);
00657 #       endif
00658        opp = &(GC_auobjfreelist[lw]);
00659        FASTLOCK();
00660         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
00661             /* See above comment on signals.     */
00662             *opp = obj_link(op);
00663             obj_link(op) = 0;
00664             GC_words_allocd += lw;
00665            /* Mark bit was already set while object was on free list. */
00666             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
00667             FASTUNLOCK();
00668             return((GC_PTR) op);
00669         }
00670         FASTUNLOCK();
00671         op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
00672     } else {
00673        op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
00674     }
00675     if (0 == op) return(0);
00676     /* We don't need the lock here, since we have an undisguised      */
00677     /* pointer.  We do need to hold the lock while we adjust          */
00678     /* mark bits.                                              */
00679     {
00680        register struct hblk * h;
00681        
00682        h = HBLKPTR(op);
00683        lw = HDR(h) -> hb_sz;
00684        
00685        DISABLE_SIGNALS();
00686        LOCK();
00687        GC_set_mark_bit(op);
00688        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
00689        UNLOCK();
00690        ENABLE_SIGNALS();
00691        return((GC_PTR) op);
00692     }
00693 }
00694 
00695 #endif /* ATOMIC_UNCOLLECTABLE */