Back to index

glibc  2.9
hooks.c
Go to the documentation of this file.
00001 /* Malloc implementation for multiple threads without lock contention.
00002    Copyright (C) 2001-2006, 2007, 2008 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004    Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
00005 
00006    The GNU C Library is free software; you can redistribute it and/or
00007    modify it under the terms of the GNU Lesser General Public License as
00008    published by the Free Software Foundation; either version 2.1 of the
00009    License, or (at your option) any later version.
00010 
00011    The GNU C Library is distributed in the hope that it will be useful,
00012    but WITHOUT ANY WARRANTY; without even the implied warranty of
00013    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014    Lesser General Public License for more details.
00015 
00016    You should have received a copy of the GNU Lesser General Public
00017    License along with the GNU C Library; see the file COPYING.LIB.  If not,
00018    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
00019    Boston, MA 02111-1307, USA.  */
00020 
00021 /* What to do if the standard debugging hooks are in place and a
00022    corrupt pointer is detected: do nothing (0), print an error message
00023    (1), or call abort() (2). */
00024 
00025 /* Hooks for debugging versions.  The initial hooks just call the
00026    initialization routine, then do the normal work. */
00027 
00028 static Void_t*
00029 #if __STD_C
00030 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
00031 #else
00032 malloc_hook_ini(sz, caller)
00033      size_t sz; const __malloc_ptr_t caller;
00034 #endif
00035 {
00036   __malloc_hook = NULL;
00037   ptmalloc_init();
00038   return public_mALLOc(sz);
00039 }
00040 
00041 static Void_t*
00042 #if __STD_C
00043 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
00044 #else
00045 realloc_hook_ini(ptr, sz, caller)
00046      Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
00047 #endif
00048 {
00049   __malloc_hook = NULL;
00050   __realloc_hook = NULL;
00051   ptmalloc_init();
00052   return public_rEALLOc(ptr, sz);
00053 }
00054 
00055 static Void_t*
00056 #if __STD_C
00057 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
00058 #else
00059 memalign_hook_ini(alignment, sz, caller)
00060      size_t alignment; size_t sz; const __malloc_ptr_t caller;
00061 #endif
00062 {
00063   __memalign_hook = NULL;
00064   ptmalloc_init();
00065   return public_mEMALIGn(alignment, sz);
00066 }
00067 
00068 /* Whether we are using malloc checking.  */
00069 static int using_malloc_checking;
00070 
00071 /* A flag that is set by malloc_set_state, to signal that malloc checking
00072    must not be enabled on the request from the user (via the MALLOC_CHECK_
00073    environment variable).  It is reset by __malloc_check_init to tell
00074    malloc_set_state that the user has requested malloc checking.
00075 
00076    The purpose of this flag is to make sure that malloc checking is not
00077    enabled when the heap to be restored was constructed without malloc
00078    checking, and thus does not contain the required magic bytes.
00079    Otherwise the heap would be corrupted by calls to free and realloc.  If
00080    it turns out that the heap was created with malloc checking and the
00081    user has requested it malloc_set_state just calls __malloc_check_init
00082    again to enable it.  On the other hand, reusing such a heap without
00083    further malloc checking is safe.  */
00084 static int disallow_malloc_check;
00085 
00086 /* Activate a standard set of debugging hooks. */
00087 void
00088 __malloc_check_init()
00089 {
00090   if (disallow_malloc_check) {
00091     disallow_malloc_check = 0;
00092     return;
00093   }
00094   using_malloc_checking = 1;
00095   __malloc_hook = malloc_check;
00096   __free_hook = free_check;
00097   __realloc_hook = realloc_check;
00098   __memalign_hook = memalign_check;
00099 }
00100 
00101 /* A simple, standard set of debugging hooks.  Overhead is `only' one
00102    byte per chunk; still this will catch most cases of double frees or
00103    overruns.  The goal here is to avoid obscure crashes due to invalid
00104    usage, unlike in the MALLOC_DEBUG code. */
00105 
00106 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
00107 
00108 /* Instrument a chunk with overrun detector byte(s) and convert it
00109    into a user pointer with requested size sz. */
00110 
00111 static Void_t*
00112 internal_function
00113 #if __STD_C
00114 mem2mem_check(Void_t *ptr, size_t sz)
00115 #else
00116 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
00117 #endif
00118 {
00119   mchunkptr p;
00120   unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
00121   size_t i;
00122 
00123   if (!ptr)
00124     return ptr;
00125   p = mem2chunk(ptr);
00126   for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
00127       i > sz;
00128       i -= 0xFF) {
00129     if(i-sz < 0x100) {
00130       m_ptr[i] = (unsigned char)(i-sz);
00131       break;
00132     }
00133     m_ptr[i] = 0xFF;
00134   }
00135   m_ptr[sz] = MAGICBYTE(p);
00136   return (Void_t*)m_ptr;
00137 }
00138 
00139 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
00140    pointer.  If the provided pointer is not valid, return NULL. */
00141 
00142 static mchunkptr
00143 internal_function
00144 #if __STD_C
00145 mem2chunk_check(Void_t* mem, unsigned char **magic_p)
00146 #else
00147 mem2chunk_check(mem, magic_p) Void_t* mem; unsigned char **magic_p;
00148 #endif
00149 {
00150   mchunkptr p;
00151   INTERNAL_SIZE_T sz, c;
00152   unsigned char magic;
00153 
00154   if(!aligned_OK(mem)) return NULL;
00155   p = mem2chunk(mem);
00156   if (!chunk_is_mmapped(p)) {
00157     /* Must be a chunk in conventional heap memory. */
00158     int contig = contiguous(&main_arena);
00159     sz = chunksize(p);
00160     if((contig &&
00161        ((char*)p<mp_.sbrk_base ||
00162         ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
00163        sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
00164        ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
00165                             (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
00166                             next_chunk(prev_chunk(p))!=p) ))
00167       return NULL;
00168     magic = MAGICBYTE(p);
00169     for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
00170       if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
00171     }
00172   } else {
00173     unsigned long offset, page_mask = malloc_getpagesize-1;
00174 
00175     /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
00176        alignment relative to the beginning of a page.  Check this
00177        first. */
00178     offset = (unsigned long)mem & page_mask;
00179     if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
00180         offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
00181         offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
00182         offset<0x2000) ||
00183        !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
00184        ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
00185        ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
00186       return NULL;
00187     magic = MAGICBYTE(p);
00188     for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
00189       if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
00190     }
00191   }
00192   ((unsigned char*)p)[sz] ^= 0xFF;
00193   if (magic_p)
00194     *magic_p = (unsigned char *)p + sz;
00195   return p;
00196 }
00197 
00198 /* Check for corruption of the top chunk, and try to recover if
00199    necessary. */
00200 
00201 static int
00202 internal_function
00203 #if __STD_C
00204 top_check(void)
00205 #else
00206 top_check()
00207 #endif
00208 {
00209   mchunkptr t = top(&main_arena);
00210   char* brk, * new_brk;
00211   INTERNAL_SIZE_T front_misalign, sbrk_size;
00212   unsigned long pagesz = malloc_getpagesize;
00213 
00214   if (t == initial_top(&main_arena) ||
00215       (!chunk_is_mmapped(t) &&
00216        chunksize(t)>=MINSIZE &&
00217        prev_inuse(t) &&
00218        (!contiguous(&main_arena) ||
00219        (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
00220     return 0;
00221 
00222   malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
00223 
00224   /* Try to set up a new top chunk. */
00225   brk = MORECORE(0);
00226   front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
00227   if (front_misalign > 0)
00228     front_misalign = MALLOC_ALIGNMENT - front_misalign;
00229   sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
00230   sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
00231   new_brk = (char*)(MORECORE (sbrk_size));
00232   if (new_brk == (char*)(MORECORE_FAILURE))
00233     {
00234       MALLOC_FAILURE_ACTION;
00235       return -1;
00236     }
00237   /* Call the `morecore' hook if necessary.  */
00238   if (__after_morecore_hook)
00239     (*__after_morecore_hook) ();
00240   main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
00241 
00242   top(&main_arena) = (mchunkptr)(brk + front_misalign);
00243   set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
00244 
00245   return 0;
00246 }
00247 
00248 static Void_t*
00249 #if __STD_C
00250 malloc_check(size_t sz, const Void_t *caller)
00251 #else
00252 malloc_check(sz, caller) size_t sz; const Void_t *caller;
00253 #endif
00254 {
00255   Void_t *victim;
00256 
00257   if (sz+1 == 0) {
00258     MALLOC_FAILURE_ACTION;
00259     return NULL;
00260   }
00261 
00262   (void)mutex_lock(&main_arena.mutex);
00263   victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
00264   (void)mutex_unlock(&main_arena.mutex);
00265   return mem2mem_check(victim, sz);
00266 }
00267 
00268 static void
00269 #if __STD_C
00270 free_check(Void_t* mem, const Void_t *caller)
00271 #else
00272 free_check(mem, caller) Void_t* mem; const Void_t *caller;
00273 #endif
00274 {
00275   mchunkptr p;
00276 
00277   if(!mem) return;
00278   (void)mutex_lock(&main_arena.mutex);
00279   p = mem2chunk_check(mem, NULL);
00280   if(!p) {
00281     (void)mutex_unlock(&main_arena.mutex);
00282 
00283     malloc_printerr(check_action, "free(): invalid pointer", mem);
00284     return;
00285   }
00286 #if HAVE_MMAP
00287   if (chunk_is_mmapped(p)) {
00288     (void)mutex_unlock(&main_arena.mutex);
00289     munmap_chunk(p);
00290     return;
00291   }
00292 #endif
00293 #if 0 /* Erase freed memory. */
00294   memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
00295 #endif
00296   _int_free(&main_arena, mem);
00297   (void)mutex_unlock(&main_arena.mutex);
00298 }
00299 
00300 static Void_t*
00301 #if __STD_C
00302 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
00303 #else
00304 realloc_check(oldmem, bytes, caller)
00305      Void_t* oldmem; size_t bytes; const Void_t *caller;
00306 #endif
00307 {
00308   mchunkptr oldp;
00309   INTERNAL_SIZE_T nb, oldsize;
00310   Void_t* newmem = 0;
00311   unsigned char *magic_p;
00312 
00313   if (bytes+1 == 0) {
00314     MALLOC_FAILURE_ACTION;
00315     return NULL;
00316   }
00317   if (oldmem == 0) return malloc_check(bytes, NULL);
00318   if (bytes == 0) {
00319     free_check (oldmem, NULL);
00320     return NULL;
00321   }
00322   (void)mutex_lock(&main_arena.mutex);
00323   oldp = mem2chunk_check(oldmem, &magic_p);
00324   (void)mutex_unlock(&main_arena.mutex);
00325   if(!oldp) {
00326     malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
00327     return malloc_check(bytes, NULL);
00328   }
00329   oldsize = chunksize(oldp);
00330 
00331   checked_request2size(bytes+1, nb);
00332   (void)mutex_lock(&main_arena.mutex);
00333 
00334 #if HAVE_MMAP
00335   if (chunk_is_mmapped(oldp)) {
00336 #if HAVE_MREMAP
00337     mchunkptr newp = mremap_chunk(oldp, nb);
00338     if(newp)
00339       newmem = chunk2mem(newp);
00340     else
00341 #endif
00342     {
00343       /* Note the extra SIZE_SZ overhead. */
00344       if(oldsize - SIZE_SZ >= nb)
00345        newmem = oldmem; /* do nothing */
00346       else {
00347         /* Must alloc, copy, free. */
00348         if (top_check() >= 0)
00349          newmem = _int_malloc(&main_arena, bytes+1);
00350         if (newmem) {
00351           MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
00352           munmap_chunk(oldp);
00353         }
00354       }
00355     }
00356   } else {
00357 #endif /* HAVE_MMAP */
00358     if (top_check() >= 0)
00359       newmem = _int_realloc(&main_arena, oldmem, bytes+1);
00360 #if 0 /* Erase freed memory. */
00361     if(newmem)
00362       newp = mem2chunk(newmem);
00363     nb = chunksize(newp);
00364     if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
00365       memset((char*)oldmem + 2*sizeof(mbinptr), 0,
00366              oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
00367     } else if(nb > oldsize+SIZE_SZ) {
00368       memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
00369             0, nb - (oldsize+SIZE_SZ));
00370     }
00371 #endif
00372 #if HAVE_MMAP
00373   }
00374 #endif
00375 
00376   /* mem2chunk_check changed the magic byte in the old chunk.
00377      If newmem is NULL, then the old chunk will still be used though,
00378      so we need to invert that change here.  */
00379   if (newmem == NULL) *magic_p ^= 0xFF;
00380 
00381   (void)mutex_unlock(&main_arena.mutex);
00382 
00383   return mem2mem_check(newmem, bytes);
00384 }
00385 
00386 static Void_t*
00387 #if __STD_C
00388 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
00389 #else
00390 memalign_check(alignment, bytes, caller)
00391      size_t alignment; size_t bytes; const Void_t *caller;
00392 #endif
00393 {
00394   INTERNAL_SIZE_T nb;
00395   Void_t* mem;
00396 
00397   if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
00398   if (alignment <  MINSIZE) alignment = MINSIZE;
00399 
00400   if (bytes+1 == 0) {
00401     MALLOC_FAILURE_ACTION;
00402     return NULL;
00403   }
00404   checked_request2size(bytes+1, nb);
00405   (void)mutex_lock(&main_arena.mutex);
00406   mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
00407     NULL;
00408   (void)mutex_unlock(&main_arena.mutex);
00409   return mem2mem_check(mem, bytes);
00410 }
00411 
00412 #ifndef NO_THREADS
00413 
00414 # ifdef _LIBC
00415 #  if USE___THREAD || !defined SHARED
00416     /* These routines are never needed in this configuration.  */
00417 #   define NO_STARTER
00418 #  endif
00419 # endif
00420 
00421 # ifdef NO_STARTER
00422 #  undef NO_STARTER
00423 # else
00424 
00425 /* The following hooks are used when the global initialization in
00426    ptmalloc_init() hasn't completed yet. */
00427 
00428 static Void_t*
00429 #if __STD_C
00430 malloc_starter(size_t sz, const Void_t *caller)
00431 #else
00432 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
00433 #endif
00434 {
00435   Void_t* victim;
00436 
00437   victim = _int_malloc(&main_arena, sz);
00438 
00439   return victim ? BOUNDED_N(victim, sz) : 0;
00440 }
00441 
00442 static Void_t*
00443 #if __STD_C
00444 memalign_starter(size_t align, size_t sz, const Void_t *caller)
00445 #else
00446 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
00447 #endif
00448 {
00449   Void_t* victim;
00450 
00451   victim = _int_memalign(&main_arena, align, sz);
00452 
00453   return victim ? BOUNDED_N(victim, sz) : 0;
00454 }
00455 
00456 static void
00457 #if __STD_C
00458 free_starter(Void_t* mem, const Void_t *caller)
00459 #else
00460 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
00461 #endif
00462 {
00463   mchunkptr p;
00464 
00465   if(!mem) return;
00466   p = mem2chunk(mem);
00467 #if HAVE_MMAP
00468   if (chunk_is_mmapped(p)) {
00469     munmap_chunk(p);
00470     return;
00471   }
00472 #endif
00473   _int_free(&main_arena, mem);
00474 }
00475 
00476 # endif       /* !defiend NO_STARTER */
00477 #endif /* NO_THREADS */
00478 
00479 
00480 /* Get/set state: malloc_get_state() records the current state of all
00481    malloc variables (_except_ for the actual heap contents and `hook'
00482    function pointers) in a system dependent, opaque data structure.
00483    This data structure is dynamically allocated and can be free()d
00484    after use.  malloc_set_state() restores the state of all malloc
00485    variables to the previously obtained state.  This is especially
00486    useful when using this malloc as part of a shared library, and when
00487    the heap contents are saved/restored via some other method.  The
00488    primary example for this is GNU Emacs with its `dumping' procedure.
00489    `Hook' function pointers are never saved or restored by these
00490    functions, with two exceptions: If malloc checking was in use when
00491    malloc_get_state() was called, then malloc_set_state() calls
00492    __malloc_check_init() if possible; if malloc checking was not in
00493    use in the recorded state but the user requested malloc checking,
00494    then the hooks are reset to 0.  */
00495 
00496 #define MALLOC_STATE_MAGIC   0x444c4541l
00497 #define MALLOC_STATE_VERSION (0*0x100l + 3l) /* major*0x100 + minor */
00498 
00499 struct malloc_save_state {
00500   long          magic;
00501   long          version;
00502   mbinptr       av[NBINS * 2 + 2];
00503   char*         sbrk_base;
00504   int           sbrked_mem_bytes;
00505   unsigned long trim_threshold;
00506   unsigned long top_pad;
00507   unsigned int  n_mmaps_max;
00508   unsigned long mmap_threshold;
00509   int           check_action;
00510   unsigned long max_sbrked_mem;
00511   unsigned long max_total_mem;
00512   unsigned int  n_mmaps;
00513   unsigned int  max_n_mmaps;
00514   unsigned long mmapped_mem;
00515   unsigned long max_mmapped_mem;
00516   int           using_malloc_checking;
00517 };
00518 
00519 Void_t*
00520 public_gET_STATe(void)
00521 {
00522   struct malloc_save_state* ms;
00523   int i;
00524   mbinptr b;
00525 
00526   ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
00527   if (!ms)
00528     return 0;
00529   (void)mutex_lock(&main_arena.mutex);
00530   malloc_consolidate(&main_arena);
00531   ms->magic = MALLOC_STATE_MAGIC;
00532   ms->version = MALLOC_STATE_VERSION;
00533   ms->av[0] = 0;
00534   ms->av[1] = 0; /* used to be binblocks, now no longer used */
00535   ms->av[2] = top(&main_arena);
00536   ms->av[3] = 0; /* used to be undefined */
00537   for(i=1; i<NBINS; i++) {
00538     b = bin_at(&main_arena, i);
00539     if(first(b) == b)
00540       ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
00541     else {
00542       ms->av[2*i+2] = first(b);
00543       ms->av[2*i+3] = last(b);
00544     }
00545   }
00546   ms->sbrk_base = mp_.sbrk_base;
00547   ms->sbrked_mem_bytes = main_arena.system_mem;
00548   ms->trim_threshold = mp_.trim_threshold;
00549   ms->top_pad = mp_.top_pad;
00550   ms->n_mmaps_max = mp_.n_mmaps_max;
00551   ms->mmap_threshold = mp_.mmap_threshold;
00552   ms->check_action = check_action;
00553   ms->max_sbrked_mem = main_arena.max_system_mem;
00554 #ifdef NO_THREADS
00555   ms->max_total_mem = mp_.max_total_mem;
00556 #else
00557   ms->max_total_mem = 0;
00558 #endif
00559   ms->n_mmaps = mp_.n_mmaps;
00560   ms->max_n_mmaps = mp_.max_n_mmaps;
00561   ms->mmapped_mem = mp_.mmapped_mem;
00562   ms->max_mmapped_mem = mp_.max_mmapped_mem;
00563   ms->using_malloc_checking = using_malloc_checking;
00564   (void)mutex_unlock(&main_arena.mutex);
00565   return (Void_t*)ms;
00566 }
00567 
00568 int
00569 public_sET_STATe(Void_t* msptr)
00570 {
00571   struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
00572   size_t i;
00573   mbinptr b;
00574 
00575   disallow_malloc_check = 1;
00576   ptmalloc_init();
00577   if(ms->magic != MALLOC_STATE_MAGIC) return -1;
00578   /* Must fail if the major version is too high. */
00579   if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
00580   (void)mutex_lock(&main_arena.mutex);
00581   /* There are no fastchunks.  */
00582   clear_fastchunks(&main_arena);
00583   set_max_fast(DEFAULT_MXFAST);
00584   for (i=0; i<NFASTBINS; ++i)
00585     main_arena.fastbins[i] = 0;
00586   for (i=0; i<BINMAPSIZE; ++i)
00587     main_arena.binmap[i] = 0;
00588   top(&main_arena) = ms->av[2];
00589   main_arena.last_remainder = 0;
00590   for(i=1; i<NBINS; i++) {
00591     b = bin_at(&main_arena, i);
00592     if(ms->av[2*i+2] == 0) {
00593       assert(ms->av[2*i+3] == 0);
00594       first(b) = last(b) = b;
00595     } else {
00596       if(ms->version >= 3 &&
00597         (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
00598                         largebin_index(chunksize(ms->av[2*i+3]))==i))) {
00599        first(b) = ms->av[2*i+2];
00600        last(b) = ms->av[2*i+3];
00601        /* Make sure the links to the bins within the heap are correct.  */
00602        first(b)->bk = b;
00603        last(b)->fd = b;
00604        /* Set bit in binblocks.  */
00605        mark_bin(&main_arena, i);
00606       } else {
00607        /* Oops, index computation from chunksize must have changed.
00608            Link the whole list into unsorted_chunks.  */
00609        first(b) = last(b) = b;
00610        b = unsorted_chunks(&main_arena);
00611        ms->av[2*i+2]->bk = b;
00612        ms->av[2*i+3]->fd = b->fd;
00613        b->fd->bk = ms->av[2*i+3];
00614        b->fd = ms->av[2*i+2];
00615       }
00616     }
00617   }
00618   if (ms->version < 3) {
00619     /* Clear fd_nextsize and bk_nextsize fields.  */
00620     b = unsorted_chunks(&main_arena)->fd;
00621     while (b != unsorted_chunks(&main_arena)) {
00622       if (!in_smallbin_range(chunksize(b))) {
00623        b->fd_nextsize = NULL;
00624        b->bk_nextsize = NULL;
00625       }
00626       b = b->fd;
00627     }
00628   }
00629   mp_.sbrk_base = ms->sbrk_base;
00630   main_arena.system_mem = ms->sbrked_mem_bytes;
00631   mp_.trim_threshold = ms->trim_threshold;
00632   mp_.top_pad = ms->top_pad;
00633   mp_.n_mmaps_max = ms->n_mmaps_max;
00634   mp_.mmap_threshold = ms->mmap_threshold;
00635   check_action = ms->check_action;
00636   main_arena.max_system_mem = ms->max_sbrked_mem;
00637 #ifdef NO_THREADS
00638   mp_.max_total_mem = ms->max_total_mem;
00639 #endif
00640   mp_.n_mmaps = ms->n_mmaps;
00641   mp_.max_n_mmaps = ms->max_n_mmaps;
00642   mp_.mmapped_mem = ms->mmapped_mem;
00643   mp_.max_mmapped_mem = ms->max_mmapped_mem;
00644   /* add version-dependent code here */
00645   if (ms->version >= 1) {
00646     /* Check whether it is safe to enable malloc checking, or whether
00647        it is necessary to disable it.  */
00648     if (ms->using_malloc_checking && !using_malloc_checking &&
00649         !disallow_malloc_check)
00650       __malloc_check_init ();
00651     else if (!ms->using_malloc_checking && using_malloc_checking) {
00652       __malloc_hook = 0;
00653       __free_hook = 0;
00654       __realloc_hook = 0;
00655       __memalign_hook = 0;
00656       using_malloc_checking = 0;
00657     }
00658   }
00659   check_malloc_state(&main_arena);
00660 
00661   (void)mutex_unlock(&main_arena.mutex);
00662   return 0;
00663 }
00664 
00665 /*
00666  * Local variables:
00667  * c-basic-offset: 2
00668  * End:
00669  */