Back to index

glibc  2.9
malloc.c
Go to the documentation of this file.
00001 /* Malloc implementation for multiple threads without lock contention.
00002    Copyright (C) 1996-2006, 2007, 2008 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004    Contributed by Wolfram Gloger <wg@malloc.de>
00005    and Doug Lea <dl@cs.oswego.edu>, 2001.
00006 
00007    The GNU C Library is free software; you can redistribute it and/or
00008    modify it under the terms of the GNU Lesser General Public License as
00009    published by the Free Software Foundation; either version 2.1 of the
00010    License, or (at your option) any later version.
00011 
00012    The GNU C Library is distributed in the hope that it will be useful,
00013    but WITHOUT ANY WARRANTY; without even the implied warranty of
00014    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015    Lesser General Public License for more details.
00016 
00017    You should have received a copy of the GNU Lesser General Public
00018    License along with the GNU C Library; see the file COPYING.LIB.  If not,
00019    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
00020    Boston, MA 02111-1307, USA.  */
00021 
00022 /*
00023   This is a version (aka ptmalloc2) of malloc/free/realloc written by
00024   Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
00025 
00026 * Version ptmalloc2-20011215
00027   based on:
00028   VERSION 2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
00029 
00030 * Quickstart
00031 
00032   In order to compile this implementation, a Makefile is provided with
00033   the ptmalloc2 distribution, which has pre-defined targets for some
00034   popular systems (e.g. "make posix" for Posix threads).  All that is
00035   typically required with regard to compiler flags is the selection of
00036   the thread package via defining one out of USE_PTHREADS, USE_THR or
00037   USE_SPROC.  Check the thread-m.h file for what effects this has.
00038   Many/most systems will additionally require USE_TSD_DATA_HACK to be
00039   defined, so this is the default for "make posix".
00040 
00041 * Why use this malloc?
00042 
00043   This is not the fastest, most space-conserving, most portable, or
00044   most tunable malloc ever written. However it is among the fastest
00045   while also being among the most space-conserving, portable and tunable.
00046   Consistent balance across these factors results in a good general-purpose
00047   allocator for malloc-intensive programs.
00048 
00049   The main properties of the algorithms are:
00050   * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
00051     with ties normally decided via FIFO (i.e. least recently used).
00052   * For small (<= 64 bytes by default) requests, it is a caching
00053     allocator, that maintains pools of quickly recycled chunks.
00054   * In between, and for combinations of large and small requests, it does
00055     the best it can trying to meet both goals at once.
00056   * For very large requests (>= 128KB by default), it relies on system
00057     memory mapping facilities, if supported.
00058 
00059   For a longer but slightly out of date high-level description, see
00060      http://gee.cs.oswego.edu/dl/html/malloc.html
00061 
00062   You may already by default be using a C library containing a malloc
00063   that is  based on some version of this malloc (for example in
00064   linux). You might still want to use the one in this file in order to
00065   customize settings or to avoid overheads associated with library
00066   versions.
00067 
00068 * Contents, described in more detail in "description of public routines" below.
00069 
00070   Standard (ANSI/SVID/...)  functions:
00071     malloc(size_t n);
00072     calloc(size_t n_elements, size_t element_size);
00073     free(Void_t* p);
00074     realloc(Void_t* p, size_t n);
00075     memalign(size_t alignment, size_t n);
00076     valloc(size_t n);
00077     mallinfo()
00078     mallopt(int parameter_number, int parameter_value)
00079 
00080   Additional functions:
00081     independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
00082     independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
00083     pvalloc(size_t n);
00084     cfree(Void_t* p);
00085     malloc_trim(size_t pad);
00086     malloc_usable_size(Void_t* p);
00087     malloc_stats();
00088 
00089 * Vital statistics:
00090 
00091   Supported pointer representation:       4 or 8 bytes
00092   Supported size_t  representation:       4 or 8 bytes
00093        Note that size_t is allowed to be 4 bytes even if pointers are 8.
00094        You can adjust this by defining INTERNAL_SIZE_T
00095 
00096   Alignment:                              2 * sizeof(size_t) (default)
00097        (i.e., 8 byte alignment with 4byte size_t). This suffices for
00098        nearly all current machines and C compilers. However, you can
00099        define MALLOC_ALIGNMENT to be wider than this if necessary.
00100 
00101   Minimum overhead per allocated chunk:   4 or 8 bytes
00102        Each malloced chunk has a hidden word of overhead holding size
00103        and status information.
00104 
00105   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
00106                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
00107 
00108        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
00109        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
00110        needed; 4 (8) for a trailing size field and 8 (16) bytes for
00111        free list pointers. Thus, the minimum allocatable size is
00112        16/24/32 bytes.
00113 
00114        Even a request for zero bytes (i.e., malloc(0)) returns a
00115        pointer to something of the minimum allocatable size.
00116 
00117        The maximum overhead wastage (i.e., number of extra bytes
00118        allocated than were requested in malloc) is less than or equal
00119        to the minimum size, except for requests >= mmap_threshold that
00120        are serviced via mmap(), where the worst case wastage is 2 *
00121        sizeof(size_t) bytes plus the remainder from a system page (the
00122        minimal mmap unit); typically 4096 or 8192 bytes.
00123 
00124   Maximum allocated size:  4-byte size_t: 2^32 minus about two pages
00125                            8-byte size_t: 2^64 minus about two pages
00126 
00127        It is assumed that (possibly signed) size_t values suffice to
00128        represent chunk sizes. `Possibly signed' is due to the fact
00129        that `size_t' may be defined on a system as either a signed or
00130        an unsigned type. The ISO C standard says that it must be
00131        unsigned, but a few systems are known not to adhere to this.
00132        Additionally, even when size_t is unsigned, sbrk (which is by
00133        default used to obtain memory from system) accepts signed
00134        arguments, and may not be able to handle size_t-wide arguments
00135        with negative sign bit.  Generally, values that would
00136        appear as negative after accounting for overhead and alignment
00137        are supported only via mmap(), which does not have this
00138        limitation.
00139 
00140        Requests for sizes outside the allowed range will perform an optional
00141        failure action and then return null. (Requests may also
00142        also fail because a system is out of memory.)
00143 
00144   Thread-safety: thread-safe unless NO_THREADS is defined
00145 
00146   Compliance: I believe it is compliant with the 1997 Single Unix Specification
00147        (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
00148        others as well.
00149 
00150 * Synopsis of compile-time options:
00151 
00152     People have reported using previous versions of this malloc on all
00153     versions of Unix, sometimes by tweaking some of the defines
00154     below. It has been tested most extensively on Solaris and
00155     Linux. It is also reported to work on WIN32 platforms.
00156     People also report using it in stand-alone embedded systems.
00157 
00158     The implementation is in straight, hand-tuned ANSI C.  It is not
00159     at all modular. (Sorry!)  It uses a lot of macros.  To be at all
00160     usable, this code should be compiled using an optimizing compiler
00161     (for example gcc -O3) that can simplify expressions and control
00162     paths. (FAQ: some macros import variables as arguments rather than
00163     declare locals because people reported that some debuggers
00164     otherwise get confused.)
00165 
00166     OPTION                     DEFAULT VALUE
00167 
00168     Compilation Environment options:
00169 
00170     __STD_C                    derived from C compiler defines
00171     WIN32                      NOT defined
00172     HAVE_MEMCPY                defined
00173     USE_MEMCPY                 1 if HAVE_MEMCPY is defined
00174     HAVE_MMAP                  defined as 1
00175     MMAP_CLEARS                1
00176     HAVE_MREMAP                0 unless linux defined
00177     USE_ARENAS                 the same as HAVE_MMAP
00178     malloc_getpagesize         derived from system #includes, or 4096 if not
00179     HAVE_USR_INCLUDE_MALLOC_H  NOT defined
00180     LACKS_UNISTD_H             NOT defined unless WIN32
00181     LACKS_SYS_PARAM_H          NOT defined unless WIN32
00182     LACKS_SYS_MMAN_H           NOT defined unless WIN32
00183 
00184     Changing default word sizes:
00185 
00186     INTERNAL_SIZE_T            size_t
00187     MALLOC_ALIGNMENT           MAX (2 * sizeof(INTERNAL_SIZE_T),
00188                                 __alignof__ (long double))
00189 
00190     Configuration and functionality options:
00191 
00192     USE_DL_PREFIX              NOT defined
00193     USE_PUBLIC_MALLOC_WRAPPERS NOT defined
00194     USE_MALLOC_LOCK            NOT defined
00195     MALLOC_DEBUG               NOT defined
00196     REALLOC_ZERO_BYTES_FREES   1
00197     MALLOC_FAILURE_ACTION      errno = ENOMEM, if __STD_C defined, else no-op
00198     TRIM_FASTBINS              0
00199 
00200     Options for customizing MORECORE:
00201 
00202     MORECORE                   sbrk
00203     MORECORE_FAILURE           -1
00204     MORECORE_CONTIGUOUS        1
00205     MORECORE_CANNOT_TRIM       NOT defined
00206     MORECORE_CLEARS            1
00207     MMAP_AS_MORECORE_SIZE      (1024 * 1024)
00208 
00209     Tuning options that are also dynamically changeable via mallopt:
00210 
00211     DEFAULT_MXFAST             64
00212     DEFAULT_TRIM_THRESHOLD     128 * 1024
00213     DEFAULT_TOP_PAD            0
00214     DEFAULT_MMAP_THRESHOLD     128 * 1024
00215     DEFAULT_MMAP_MAX           65536
00216 
00217     There are several other #defined constants and macros that you
00218     probably don't want to touch unless you are extending or adapting malloc.  */
00219 
00220 /*
00221   __STD_C should be nonzero if using ANSI-standard C compiler, a C++
00222   compiler, or a C compiler sufficiently close to ANSI to get away
00223   with it.
00224 */
00225 
00226 #ifndef __STD_C
00227 #if defined(__STDC__) || defined(__cplusplus)
00228 #define __STD_C     1
00229 #else
00230 #define __STD_C     0
00231 #endif
00232 #endif /*__STD_C*/
00233 
00234 
00235 /*
00236   Void_t* is the pointer type that malloc should say it returns
00237 */
00238 
00239 #ifndef Void_t
00240 #if (__STD_C || defined(WIN32))
00241 #define Void_t      void
00242 #else
00243 #define Void_t      char
00244 #endif
00245 #endif /*Void_t*/
00246 
00247 #if __STD_C
00248 #include <stddef.h>   /* for size_t */
00249 #include <stdlib.h>   /* for getenv(), abort() */
00250 #else
00251 #include <sys/types.h>
00252 #endif
00253 
00254 #include <malloc-machine.h>
00255 
00256 #ifdef _LIBC
00257 #include <stdio-common/_itoa.h>
00258 #include <bits/wordsize.h>
00259 #endif
00260 
00261 #ifdef __cplusplus
00262 extern "C" {
00263 #endif
00264 
00265 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
00266 
00267 /* #define  LACKS_UNISTD_H */
00268 
00269 #ifndef LACKS_UNISTD_H
00270 #include <unistd.h>
00271 #endif
00272 
00273 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
00274 
00275 /* #define  LACKS_SYS_PARAM_H */
00276 
00277 
00278 #include <stdio.h>    /* needed for malloc_stats */
00279 #include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
00280 
00281 /* For uintptr_t.  */
00282 #include <stdint.h>
00283 
00284 /* For va_arg, va_start, va_end.  */
00285 #include <stdarg.h>
00286 
00287 /* For writev and struct iovec.  */
00288 #include <sys/uio.h>
00289 /* For syslog.  */
00290 #include <sys/syslog.h>
00291 
00292 /* For various dynamic linking things.  */
00293 #include <dlfcn.h>
00294 
00295 
00296 /*
00297   Debugging:
00298 
00299   Because freed chunks may be overwritten with bookkeeping fields, this
00300   malloc will often die when freed memory is overwritten by user
00301   programs.  This can be very effective (albeit in an annoying way)
00302   in helping track down dangling pointers.
00303 
00304   If you compile with -DMALLOC_DEBUG, a number of assertion checks are
00305   enabled that will catch more memory errors. You probably won't be
00306   able to make much sense of the actual assertion errors, but they
00307   should help you locate incorrectly overwritten memory.  The checking
00308   is fairly extensive, and will slow down execution
00309   noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
00310   will attempt to check every non-mmapped allocated and free chunk in
00311   the course of computing the summmaries. (By nature, mmapped regions
00312   cannot be checked very much automatically.)
00313 
00314   Setting MALLOC_DEBUG may also be helpful if you are trying to modify
00315   this code. The assertions in the check routines spell out in more
00316   detail the assumptions and invariants underlying the algorithms.
00317 
00318   Setting MALLOC_DEBUG does NOT provide an automated mechanism for
00319   checking that all accesses to malloced memory stay within their
00320   bounds. However, there are several add-ons and adaptations of this
00321   or other mallocs available that do this.
00322 */
00323 
00324 #if MALLOC_DEBUG
00325 #include <assert.h>
00326 #else
00327 #undef assert
00328 #define assert(x) ((void)0)
00329 #endif
00330 
00331 
00332 /*
00333   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
00334   of chunk sizes.
00335 
00336   The default version is the same as size_t.
00337 
00338   While not strictly necessary, it is best to define this as an
00339   unsigned type, even if size_t is a signed type. This may avoid some
00340   artificial size limitations on some systems.
00341 
00342   On a 64-bit machine, you may be able to reduce malloc overhead by
00343   defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
00344   expense of not being able to handle more than 2^32 of malloced
00345   space. If this limitation is acceptable, you are encouraged to set
00346   this unless you are on a platform requiring 16byte alignments. In
00347   this case the alignment requirements turn out to negate any
00348   potential advantages of decreasing size_t word size.
00349 
00350   Implementors: Beware of the possible combinations of:
00351      - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
00352        and might be the same width as int or as long
00353      - size_t might have different width and signedness as INTERNAL_SIZE_T
00354      - int and long might be 32 or 64 bits, and might be the same width
00355   To deal with this, most comparisons and difference computations
00356   among INTERNAL_SIZE_Ts should cast them to unsigned long, being
00357   aware of the fact that casting an unsigned int to a wider long does
00358   not sign-extend. (This also makes checking for negative numbers
00359   awkward.) Some of these casts result in harmless compiler warnings
00360   on some systems.
00361 */
00362 
00363 #ifndef INTERNAL_SIZE_T
00364 #define INTERNAL_SIZE_T size_t
00365 #endif
00366 
00367 /* The corresponding word size */
00368 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
00369 
00370 
00371 /*
00372   MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
00373   It must be a power of two at least 2 * SIZE_SZ, even on machines
00374   for which smaller alignments would suffice. It may be defined as
00375   larger than this though. Note however that code and data structures
00376   are optimized for the case of 8-byte alignment.
00377 */
00378 
00379 
00380 #ifndef MALLOC_ALIGNMENT
00381 /* XXX This is the correct definition.  It differs from 2*SIZE_SZ only on
00382    powerpc32.  For the time being, changing this is causing more
00383    compatibility problems due to malloc_get_state/malloc_set_state than
00384    will returning blocks not adequately aligned for long double objects
00385    under -mlong-double-128.
00386 
00387 #define MALLOC_ALIGNMENT       (2 * SIZE_SZ < __alignof__ (long double) \
00388                             ? __alignof__ (long double) : 2 * SIZE_SZ)
00389 */
00390 #define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
00391 #endif
00392 
00393 /* The corresponding bit mask value */
00394 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
00395 
00396 
00397 
00398 /*
00399   REALLOC_ZERO_BYTES_FREES should be set if a call to
00400   realloc with zero bytes should be the same as a call to free.
00401   This is required by the C standard. Otherwise, since this malloc
00402   returns a unique pointer for malloc(0), so does realloc(p, 0).
00403 */
00404 
00405 #ifndef REALLOC_ZERO_BYTES_FREES
00406 #define REALLOC_ZERO_BYTES_FREES 1
00407 #endif
00408 
00409 /*
00410   TRIM_FASTBINS controls whether free() of a very small chunk can
00411   immediately lead to trimming. Setting to true (1) can reduce memory
00412   footprint, but will almost always slow down programs that use a lot
00413   of small chunks.
00414 
00415   Define this only if you are willing to give up some speed to more
00416   aggressively reduce system-level memory footprint when releasing
00417   memory in programs that use many small chunks.  You can get
00418   essentially the same effect by setting MXFAST to 0, but this can
00419   lead to even greater slowdowns in programs using many small chunks.
00420   TRIM_FASTBINS is an in-between compile-time option, that disables
00421   only those chunks bordering topmost memory from being placed in
00422   fastbins.
00423 */
00424 
00425 #ifndef TRIM_FASTBINS
00426 #define TRIM_FASTBINS  0
00427 #endif
00428 
00429 
00430 /*
00431   USE_DL_PREFIX will prefix all public routines with the string 'dl'.
00432   This is necessary when you only want to use this malloc in one part
00433   of a program, using your regular system malloc elsewhere.
00434 */
00435 
00436 /* #define USE_DL_PREFIX */
00437 
00438 
00439 /*
00440    Two-phase name translation.
00441    All of the actual routines are given mangled names.
00442    When wrappers are used, they become the public callable versions.
00443    When DL_PREFIX is used, the callable names are prefixed.
00444 */
00445 
00446 #ifdef USE_DL_PREFIX
00447 #define public_cALLOc    dlcalloc
00448 #define public_fREe      dlfree
00449 #define public_cFREe     dlcfree
00450 #define public_mALLOc    dlmalloc
00451 #define public_mEMALIGn  dlmemalign
00452 #define public_rEALLOc   dlrealloc
00453 #define public_vALLOc    dlvalloc
00454 #define public_pVALLOc   dlpvalloc
00455 #define public_mALLINFo  dlmallinfo
00456 #define public_mALLOPt   dlmallopt
00457 #define public_mTRIm     dlmalloc_trim
00458 #define public_mSTATs    dlmalloc_stats
00459 #define public_mUSABLe   dlmalloc_usable_size
00460 #define public_iCALLOc   dlindependent_calloc
00461 #define public_iCOMALLOc dlindependent_comalloc
00462 #define public_gET_STATe dlget_state
00463 #define public_sET_STATe dlset_state
00464 #else /* USE_DL_PREFIX */
00465 #ifdef _LIBC
00466 
00467 /* Special defines for the GNU C library.  */
00468 #define public_cALLOc    __libc_calloc
00469 #define public_fREe      __libc_free
00470 #define public_cFREe     __libc_cfree
00471 #define public_mALLOc    __libc_malloc
00472 #define public_mEMALIGn  __libc_memalign
00473 #define public_rEALLOc   __libc_realloc
00474 #define public_vALLOc    __libc_valloc
00475 #define public_pVALLOc   __libc_pvalloc
00476 #define public_mALLINFo  __libc_mallinfo
00477 #define public_mALLOPt   __libc_mallopt
00478 #define public_mTRIm     __malloc_trim
00479 #define public_mSTATs    __malloc_stats
00480 #define public_mUSABLe   __malloc_usable_size
00481 #define public_iCALLOc   __libc_independent_calloc
00482 #define public_iCOMALLOc __libc_independent_comalloc
00483 #define public_gET_STATe __malloc_get_state
00484 #define public_sET_STATe __malloc_set_state
00485 #define malloc_getpagesize __getpagesize()
00486 #define open             __open
00487 #define mmap             __mmap
00488 #define munmap           __munmap
00489 #define mremap           __mremap
00490 #define mprotect         __mprotect
00491 #define MORECORE         (*__morecore)
00492 #define MORECORE_FAILURE 0
00493 
00494 Void_t * __default_morecore (ptrdiff_t);
00495 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
00496 
00497 #else /* !_LIBC */
00498 #define public_cALLOc    calloc
00499 #define public_fREe      free
00500 #define public_cFREe     cfree
00501 #define public_mALLOc    malloc
00502 #define public_mEMALIGn  memalign
00503 #define public_rEALLOc   realloc
00504 #define public_vALLOc    valloc
00505 #define public_pVALLOc   pvalloc
00506 #define public_mALLINFo  mallinfo
00507 #define public_mALLOPt   mallopt
00508 #define public_mTRIm     malloc_trim
00509 #define public_mSTATs    malloc_stats
00510 #define public_mUSABLe   malloc_usable_size
00511 #define public_iCALLOc   independent_calloc
00512 #define public_iCOMALLOc independent_comalloc
00513 #define public_gET_STATe malloc_get_state
00514 #define public_sET_STATe malloc_set_state
00515 #endif /* _LIBC */
00516 #endif /* USE_DL_PREFIX */
00517 
00518 #ifndef _LIBC
00519 #define __builtin_expect(expr, val)       (expr)
00520 
00521 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
00522 #endif
00523 
00524 /*
00525   HAVE_MEMCPY should be defined if you are not otherwise using
00526   ANSI STD C, but still have memcpy and memset in your C library
00527   and want to use them in calloc and realloc. Otherwise simple
00528   macro versions are defined below.
00529 
00530   USE_MEMCPY should be defined as 1 if you actually want to
00531   have memset and memcpy called. People report that the macro
00532   versions are faster than libc versions on some systems.
00533 
00534   Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
00535   (of <= 36 bytes) are manually unrolled in realloc and calloc.
00536 */
00537 
00538 #define HAVE_MEMCPY
00539 
00540 #ifndef USE_MEMCPY
00541 #ifdef HAVE_MEMCPY
00542 #define USE_MEMCPY 1
00543 #else
00544 #define USE_MEMCPY 0
00545 #endif
00546 #endif
00547 
00548 
00549 #if (__STD_C || defined(HAVE_MEMCPY))
00550 
00551 #ifdef _LIBC
00552 # include <string.h>
00553 #else
00554 #ifdef WIN32
00555 /* On Win32 memset and memcpy are already declared in windows.h */
00556 #else
00557 #if __STD_C
00558 void* memset(void*, int, size_t);
00559 void* memcpy(void*, const void*, size_t);
00560 #else
00561 Void_t* memset();
00562 Void_t* memcpy();
00563 #endif
00564 #endif
00565 #endif
00566 #endif
00567 
00568 /*
00569   MALLOC_FAILURE_ACTION is the action to take before "return 0" when
00570   malloc fails to be able to return memory, either because memory is
00571   exhausted or because of illegal arguments.
00572 
00573   By default, sets errno if running on STD_C platform, else does nothing.
00574 */
00575 
00576 #ifndef MALLOC_FAILURE_ACTION
00577 #if __STD_C
00578 #define MALLOC_FAILURE_ACTION \
00579    errno = ENOMEM;
00580 
00581 #else
00582 #define MALLOC_FAILURE_ACTION
00583 #endif
00584 #endif
00585 
00586 /*
00587   MORECORE-related declarations. By default, rely on sbrk
00588 */
00589 
00590 
00591 #ifdef LACKS_UNISTD_H
00592 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
00593 #if __STD_C
00594 extern Void_t*     sbrk(ptrdiff_t);
00595 #else
00596 extern Void_t*     sbrk();
00597 #endif
00598 #endif
00599 #endif
00600 
00601 /*
00602   MORECORE is the name of the routine to call to obtain more memory
00603   from the system.  See below for general guidance on writing
00604   alternative MORECORE functions, as well as a version for WIN32 and a
00605   sample version for pre-OSX macos.
00606 */
00607 
00608 #ifndef MORECORE
00609 #define MORECORE sbrk
00610 #endif
00611 
00612 /*
00613   MORECORE_FAILURE is the value returned upon failure of MORECORE
00614   as well as mmap. Since it cannot be an otherwise valid memory address,
00615   and must reflect values of standard sys calls, you probably ought not
00616   try to redefine it.
00617 */
00618 
00619 #ifndef MORECORE_FAILURE
00620 #define MORECORE_FAILURE (-1)
00621 #endif
00622 
00623 /*
00624   If MORECORE_CONTIGUOUS is true, take advantage of fact that
00625   consecutive calls to MORECORE with positive arguments always return
00626   contiguous increasing addresses.  This is true of unix sbrk.  Even
00627   if not defined, when regions happen to be contiguous, malloc will
00628   permit allocations spanning regions obtained from different
00629   calls. But defining this when applicable enables some stronger
00630   consistency checks and space efficiencies.
00631 */
00632 
00633 #ifndef MORECORE_CONTIGUOUS
00634 #define MORECORE_CONTIGUOUS 1
00635 #endif
00636 
00637 /*
00638   Define MORECORE_CANNOT_TRIM if your version of MORECORE
00639   cannot release space back to the system when given negative
00640   arguments. This is generally necessary only if you are using
00641   a hand-crafted MORECORE function that cannot handle negative arguments.
00642 */
00643 
00644 /* #define MORECORE_CANNOT_TRIM */
00645 
00646 /*  MORECORE_CLEARS           (default 1)
00647      The degree to which the routine mapped to MORECORE zeroes out
00648      memory: never (0), only for newly allocated space (1) or always
00649      (2).  The distinction between (1) and (2) is necessary because on
00650      some systems, if the application first decrements and then
00651      increments the break value, the contents of the reallocated space
00652      are unspecified.
00653 */
00654 
00655 #ifndef MORECORE_CLEARS
00656 #define MORECORE_CLEARS 1
00657 #endif
00658 
00659 
00660 /*
00661   Define HAVE_MMAP as true to optionally make malloc() use mmap() to
00662   allocate very large blocks.  These will be returned to the
00663   operating system immediately after a free(). Also, if mmap
00664   is available, it is used as a backup strategy in cases where
00665   MORECORE fails to provide space from system.
00666 
00667   This malloc is best tuned to work with mmap for large requests.
00668   If you do not have mmap, operations involving very large chunks (1MB
00669   or so) may be slower than you'd like.
00670 */
00671 
00672 #ifndef HAVE_MMAP
00673 #define HAVE_MMAP 1
00674 
00675 /*
00676    Standard unix mmap using /dev/zero clears memory so calloc doesn't
00677    need to.
00678 */
00679 
00680 #ifndef MMAP_CLEARS
00681 #define MMAP_CLEARS 1
00682 #endif
00683 
00684 #else /* no mmap */
00685 #ifndef MMAP_CLEARS
00686 #define MMAP_CLEARS 0
00687 #endif
00688 #endif
00689 
00690 
00691 /*
00692    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
00693    sbrk fails, and mmap is used as a backup (which is done only if
00694    HAVE_MMAP).  The value must be a multiple of page size.  This
00695    backup strategy generally applies only when systems have "holes" in
00696    address space, so sbrk cannot perform contiguous expansion, but
00697    there is still space available on system.  On systems for which
00698    this is known to be useful (i.e. most linux kernels), this occurs
00699    only when programs allocate huge amounts of memory.  Between this,
00700    and the fact that mmap regions tend to be limited, the size should
00701    be large, to avoid too many mmap calls and thus avoid running out
00702    of kernel resources.
00703 */
00704 
00705 #ifndef MMAP_AS_MORECORE_SIZE
00706 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
00707 #endif
00708 
00709 /*
00710   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
00711   large blocks.  This is currently only possible on Linux with
00712   kernel versions newer than 1.3.77.
00713 */
00714 
00715 #ifndef HAVE_MREMAP
00716 #ifdef linux
00717 #define HAVE_MREMAP 1
00718 #else
00719 #define HAVE_MREMAP 0
00720 #endif
00721 
00722 #endif /* HAVE_MMAP */
00723 
00724 /* Define USE_ARENAS to enable support for multiple `arenas'.  These
00725    are allocated using mmap(), are necessary for threads and
00726    occasionally useful to overcome address space limitations affecting
00727    sbrk(). */
00728 
00729 #ifndef USE_ARENAS
00730 #define USE_ARENAS HAVE_MMAP
00731 #endif
00732 
00733 
00734 /*
00735   The system page size. To the extent possible, this malloc manages
00736   memory from the system in page-size units.  Note that this value is
00737   cached during initialization into a field of malloc_state. So even
00738   if malloc_getpagesize is a function, it is only called once.
00739 
00740   The following mechanics for getpagesize were adapted from bsd/gnu
00741   getpagesize.h. If none of the system-probes here apply, a value of
00742   4096 is used, which should be OK: If they don't apply, then using
00743   the actual value probably doesn't impact performance.
00744 */
00745 
00746 
00747 #ifndef malloc_getpagesize
00748 
00749 #ifndef LACKS_UNISTD_H
00750 #  include <unistd.h>
00751 #endif
00752 
00753 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
00754 #    ifndef _SC_PAGE_SIZE
00755 #      define _SC_PAGE_SIZE _SC_PAGESIZE
00756 #    endif
00757 #  endif
00758 
00759 #  ifdef _SC_PAGE_SIZE
00760 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
00761 #  else
00762 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
00763        extern size_t getpagesize();
00764 #      define malloc_getpagesize getpagesize()
00765 #    else
00766 #      ifdef WIN32 /* use supplied emulation of getpagesize */
00767 #        define malloc_getpagesize getpagesize()
00768 #      else
00769 #        ifndef LACKS_SYS_PARAM_H
00770 #          include <sys/param.h>
00771 #        endif
00772 #        ifdef EXEC_PAGESIZE
00773 #          define malloc_getpagesize EXEC_PAGESIZE
00774 #        else
00775 #          ifdef NBPG
00776 #            ifndef CLSIZE
00777 #              define malloc_getpagesize NBPG
00778 #            else
00779 #              define malloc_getpagesize (NBPG * CLSIZE)
00780 #            endif
00781 #          else
00782 #            ifdef NBPC
00783 #              define malloc_getpagesize NBPC
00784 #            else
00785 #              ifdef PAGESIZE
00786 #                define malloc_getpagesize PAGESIZE
00787 #              else /* just guess */
00788 #                define malloc_getpagesize (4096)
00789 #              endif
00790 #            endif
00791 #          endif
00792 #        endif
00793 #      endif
00794 #    endif
00795 #  endif
00796 #endif
00797 
00798 /*
00799   This version of malloc supports the standard SVID/XPG mallinfo
00800   routine that returns a struct containing usage properties and
00801   statistics. It should work on any SVID/XPG compliant system that has
00802   a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
00803   install such a thing yourself, cut out the preliminary declarations
00804   as described above and below and save them in a malloc.h file. But
00805   there's no compelling reason to bother to do this.)
00806 
00807   The main declaration needed is the mallinfo struct that is returned
00808   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
00809   bunch of fields that are not even meaningful in this version of
00810   malloc.  These fields are are instead filled by mallinfo() with
00811   other numbers that might be of interest.
00812 
00813   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
00814   /usr/include/malloc.h file that includes a declaration of struct
00815   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
00816   version is declared below.  These must be precisely the same for
00817   mallinfo() to work.  The original SVID version of this struct,
00818   defined on most systems with mallinfo, declares all fields as
00819   ints. But some others define as unsigned long. If your system
00820   defines the fields using a type of different width than listed here,
00821   you must #include your system version and #define
00822   HAVE_USR_INCLUDE_MALLOC_H.
00823 */
00824 
00825 /* #define HAVE_USR_INCLUDE_MALLOC_H */
00826 
00827 #ifdef HAVE_USR_INCLUDE_MALLOC_H
00828 #include "/usr/include/malloc.h"
00829 #endif
00830 
00831 
00832 /* ---------- description of public routines ------------ */
00833 
00834 /*
00835   malloc(size_t n)
00836   Returns a pointer to a newly allocated chunk of at least n bytes, or null
00837   if no space is available. Additionally, on failure, errno is
00838   set to ENOMEM on ANSI C systems.
00839 
00840   If n is zero, malloc returns a minumum-sized chunk. (The minimum
00841   size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
00842   systems.)  On most systems, size_t is an unsigned type, so calls
00843   with negative arguments are interpreted as requests for huge amounts
00844   of space, which will often fail. The maximum supported value of n
00845   differs across systems, but is in all cases less than the maximum
00846   representable value of a size_t.
00847 */
00848 #if __STD_C
00849 Void_t*  public_mALLOc(size_t);
00850 #else
00851 Void_t*  public_mALLOc();
00852 #endif
00853 #ifdef libc_hidden_proto
00854 libc_hidden_proto (public_mALLOc)
00855 #endif
00856 
00857 /*
00858   free(Void_t* p)
00859   Releases the chunk of memory pointed to by p, that had been previously
00860   allocated using malloc or a related routine such as realloc.
00861   It has no effect if p is null. It can have arbitrary (i.e., bad!)
00862   effects if p has already been freed.
00863 
00864   Unless disabled (using mallopt), freeing very large spaces will
00865   when possible, automatically trigger operations that give
00866   back unused memory to the system, thus reducing program footprint.
00867 */
00868 #if __STD_C
00869 void     public_fREe(Void_t*);
00870 #else
00871 void     public_fREe();
00872 #endif
00873 #ifdef libc_hidden_proto
00874 libc_hidden_proto (public_fREe)
00875 #endif
00876 
00877 /*
00878   calloc(size_t n_elements, size_t element_size);
00879   Returns a pointer to n_elements * element_size bytes, with all locations
00880   set to zero.
00881 */
00882 #if __STD_C
00883 Void_t*  public_cALLOc(size_t, size_t);
00884 #else
00885 Void_t*  public_cALLOc();
00886 #endif
00887 
00888 /*
00889   realloc(Void_t* p, size_t n)
00890   Returns a pointer to a chunk of size n that contains the same data
00891   as does chunk p up to the minimum of (n, p's size) bytes, or null
00892   if no space is available.
00893 
00894   The returned pointer may or may not be the same as p. The algorithm
00895   prefers extending p when possible, otherwise it employs the
00896   equivalent of a malloc-copy-free sequence.
00897 
00898   If p is null, realloc is equivalent to malloc.
00899 
00900   If space is not available, realloc returns null, errno is set (if on
00901   ANSI) and p is NOT freed.
00902 
00903   if n is for fewer bytes than already held by p, the newly unused
00904   space is lopped off and freed if possible.  Unless the #define
00905   REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
00906   zero (re)allocates a minimum-sized chunk.
00907 
00908   Large chunks that were internally obtained via mmap will always
00909   be reallocated using malloc-copy-free sequences unless
00910   the system supports MREMAP (currently only linux).
00911 
00912   The old unix realloc convention of allowing the last-free'd chunk
00913   to be used as an argument to realloc is not supported.
00914 */
00915 #if __STD_C
00916 Void_t*  public_rEALLOc(Void_t*, size_t);
00917 #else
00918 Void_t*  public_rEALLOc();
00919 #endif
00920 #ifdef libc_hidden_proto
00921 libc_hidden_proto (public_rEALLOc)
00922 #endif
00923 
00924 /*
00925   memalign(size_t alignment, size_t n);
00926   Returns a pointer to a newly allocated chunk of n bytes, aligned
00927   in accord with the alignment argument.
00928 
00929   The alignment argument should be a power of two. If the argument is
00930   not a power of two, the nearest greater power is used.
00931   8-byte alignment is guaranteed by normal malloc calls, so don't
00932   bother calling memalign with an argument of 8 or less.
00933 
00934   Overreliance on memalign is a sure way to fragment space.
00935 */
00936 #if __STD_C
00937 Void_t*  public_mEMALIGn(size_t, size_t);
00938 #else
00939 Void_t*  public_mEMALIGn();
00940 #endif
00941 #ifdef libc_hidden_proto
00942 libc_hidden_proto (public_mEMALIGn)
00943 #endif
00944 
00945 /*
00946   valloc(size_t n);
00947   Equivalent to memalign(pagesize, n), where pagesize is the page
00948   size of the system. If the pagesize is unknown, 4096 is used.
00949 */
00950 #if __STD_C
00951 Void_t*  public_vALLOc(size_t);
00952 #else
00953 Void_t*  public_vALLOc();
00954 #endif
00955 
00956 
00957 
00958 /*
00959   mallopt(int parameter_number, int parameter_value)
00960   Sets tunable parameters The format is to provide a
00961   (parameter-number, parameter-value) pair.  mallopt then sets the
00962   corresponding parameter to the argument value if it can (i.e., so
00963   long as the value is meaningful), and returns 1 if successful else
00964   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
00965   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
00966   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
00967   so setting them has no effect. But this malloc also supports four
00968   other options in mallopt. See below for details.  Briefly, supported
00969   parameters are as follows (listed defaults are for "typical"
00970   configurations).
00971 
00972   Symbol            param #   default    allowed param values
00973   M_MXFAST          1         64         0-80  (0 disables fastbins)
00974   M_TRIM_THRESHOLD -1         128*1024   any   (-1U disables trimming)
00975   M_TOP_PAD        -2         0          any
00976   M_MMAP_THRESHOLD -3         128*1024   any   (or 0 if no MMAP support)
00977   M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
00978 */
00979 #if __STD_C
00980 int      public_mALLOPt(int, int);
00981 #else
00982 int      public_mALLOPt();
00983 #endif
00984 
00985 
00986 /*
00987   mallinfo()
00988   Returns (by copy) a struct containing various summary statistics:
00989 
00990   arena:     current total non-mmapped bytes allocated from system
00991   ordblks:   the number of free chunks
00992   smblks:    the number of fastbin blocks (i.e., small chunks that
00993                have been freed but not use resused or consolidated)
00994   hblks:     current number of mmapped regions
00995   hblkhd:    total bytes held in mmapped regions
00996   usmblks:   the maximum total allocated space. This will be greater
00997                 than current total if trimming has occurred.
00998   fsmblks:   total bytes held in fastbin blocks
00999   uordblks:  current total allocated space (normal or mmapped)
01000   fordblks:  total free space
01001   keepcost:  the maximum number of bytes that could ideally be released
01002                back to system via malloc_trim. ("ideally" means that
01003                it ignores page restrictions etc.)
01004 
01005   Because these fields are ints, but internal bookkeeping may
01006   be kept as longs, the reported values may wrap around zero and
01007   thus be inaccurate.
01008 */
01009 #if __STD_C
01010 struct mallinfo public_mALLINFo(void);
01011 #else
01012 struct mallinfo public_mALLINFo();
01013 #endif
01014 
01015 #ifndef _LIBC
01016 /*
01017   independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
01018 
01019   independent_calloc is similar to calloc, but instead of returning a
01020   single cleared space, it returns an array of pointers to n_elements
01021   independent elements that can hold contents of size elem_size, each
01022   of which starts out cleared, and can be independently freed,
01023   realloc'ed etc. The elements are guaranteed to be adjacently
01024   allocated (this is not guaranteed to occur with multiple callocs or
01025   mallocs), which may also improve cache locality in some
01026   applications.
01027 
01028   The "chunks" argument is optional (i.e., may be null, which is
01029   probably the most typical usage). If it is null, the returned array
01030   is itself dynamically allocated and should also be freed when it is
01031   no longer needed. Otherwise, the chunks array must be of at least
01032   n_elements in length. It is filled in with the pointers to the
01033   chunks.
01034 
01035   In either case, independent_calloc returns this pointer array, or
01036   null if the allocation failed.  If n_elements is zero and "chunks"
01037   is null, it returns a chunk representing an array with zero elements
01038   (which should be freed if not wanted).
01039 
01040   Each element must be individually freed when it is no longer
01041   needed. If you'd like to instead be able to free all at once, you
01042   should instead use regular calloc and assign pointers into this
01043   space to represent elements.  (In this case though, you cannot
01044   independently free elements.)
01045 
01046   independent_calloc simplifies and speeds up implementations of many
01047   kinds of pools.  It may also be useful when constructing large data
01048   structures that initially have a fixed number of fixed-sized nodes,
01049   but the number is not known at compile time, and some of the nodes
01050   may later need to be freed. For example:
01051 
01052   struct Node { int item; struct Node* next; };
01053 
01054   struct Node* build_list() {
01055     struct Node** pool;
01056     int n = read_number_of_nodes_needed();
01057     if (n <= 0) return 0;
01058     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
01059     if (pool == 0) die();
01060     // organize into a linked list...
01061     struct Node* first = pool[0];
01062     for (i = 0; i < n-1; ++i)
01063       pool[i]->next = pool[i+1];
01064     free(pool);     // Can now free the array (or not, if it is needed later)
01065     return first;
01066   }
01067 */
01068 #if __STD_C
01069 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
01070 #else
01071 Void_t** public_iCALLOc();
01072 #endif
01073 
01074 /*
01075   independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
01076 
01077   independent_comalloc allocates, all at once, a set of n_elements
01078   chunks with sizes indicated in the "sizes" array.    It returns
01079   an array of pointers to these elements, each of which can be
01080   independently freed, realloc'ed etc. The elements are guaranteed to
01081   be adjacently allocated (this is not guaranteed to occur with
01082   multiple callocs or mallocs), which may also improve cache locality
01083   in some applications.
01084 
01085   The "chunks" argument is optional (i.e., may be null). If it is null
01086   the returned array is itself dynamically allocated and should also
01087   be freed when it is no longer needed. Otherwise, the chunks array
01088   must be of at least n_elements in length. It is filled in with the
01089   pointers to the chunks.
01090 
01091   In either case, independent_comalloc returns this pointer array, or
01092   null if the allocation failed.  If n_elements is zero and chunks is
01093   null, it returns a chunk representing an array with zero elements
01094   (which should be freed if not wanted).
01095 
01096   Each element must be individually freed when it is no longer
01097   needed. If you'd like to instead be able to free all at once, you
01098   should instead use a single regular malloc, and assign pointers at
01099   particular offsets in the aggregate space. (In this case though, you
01100   cannot independently free elements.)
01101 
01102   independent_comallac differs from independent_calloc in that each
01103   element may have a different size, and also that it does not
01104   automatically clear elements.
01105 
01106   independent_comalloc can be used to speed up allocation in cases
01107   where several structs or objects must always be allocated at the
01108   same time.  For example:
01109 
01110   struct Head { ... }
01111   struct Foot { ... }
01112 
01113   void send_message(char* msg) {
01114     int msglen = strlen(msg);
01115     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
01116     void* chunks[3];
01117     if (independent_comalloc(3, sizes, chunks) == 0)
01118       die();
01119     struct Head* head = (struct Head*)(chunks[0]);
01120     char*        body = (char*)(chunks[1]);
01121     struct Foot* foot = (struct Foot*)(chunks[2]);
01122     // ...
01123   }
01124 
01125   In general though, independent_comalloc is worth using only for
01126   larger values of n_elements. For small values, you probably won't
01127   detect enough difference from series of malloc calls to bother.
01128 
01129   Overuse of independent_comalloc can increase overall memory usage,
01130   since it cannot reuse existing noncontiguous small chunks that
01131   might be available for some of the elements.
01132 */
01133 #if __STD_C
01134 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
01135 #else
01136 Void_t** public_iCOMALLOc();
01137 #endif
01138 
01139 #endif /* _LIBC */
01140 
01141 
01142 /*
01143   pvalloc(size_t n);
01144   Equivalent to valloc(minimum-page-that-holds(n)), that is,
01145   round up n to nearest pagesize.
01146  */
01147 #if __STD_C
01148 Void_t*  public_pVALLOc(size_t);
01149 #else
01150 Void_t*  public_pVALLOc();
01151 #endif
01152 
01153 /*
01154   cfree(Void_t* p);
01155   Equivalent to free(p).
01156 
01157   cfree is needed/defined on some systems that pair it with calloc,
01158   for odd historical reasons (such as: cfree is used in example
01159   code in the first edition of K&R).
01160 */
01161 #if __STD_C
01162 void     public_cFREe(Void_t*);
01163 #else
01164 void     public_cFREe();
01165 #endif
01166 
01167 /*
01168   malloc_trim(size_t pad);
01169 
01170   If possible, gives memory back to the system (via negative
01171   arguments to sbrk) if there is unused memory at the `high' end of
01172   the malloc pool. You can call this after freeing large blocks of
01173   memory to potentially reduce the system-level memory requirements
01174   of a program. However, it cannot guarantee to reduce memory. Under
01175   some allocation patterns, some large free blocks of memory will be
01176   locked between two used chunks, so they cannot be given back to
01177   the system.
01178 
01179   The `pad' argument to malloc_trim represents the amount of free
01180   trailing space to leave untrimmed. If this argument is zero,
01181   only the minimum amount of memory to maintain internal data
01182   structures will be left (one page or less). Non-zero arguments
01183   can be supplied to maintain enough trailing space to service
01184   future expected allocations without having to re-obtain memory
01185   from the system.
01186 
01187   Malloc_trim returns 1 if it actually released any memory, else 0.
01188   On systems that do not support "negative sbrks", it will always
01189   return 0.
01190 */
01191 #if __STD_C
01192 int      public_mTRIm(size_t);
01193 #else
01194 int      public_mTRIm();
01195 #endif
01196 
01197 /*
01198   malloc_usable_size(Void_t* p);
01199 
01200   Returns the number of bytes you can actually use in
01201   an allocated chunk, which may be more than you requested (although
01202   often not) due to alignment and minimum size constraints.
01203   You can use this many bytes without worrying about
01204   overwriting other allocated objects. This is not a particularly great
01205   programming practice. malloc_usable_size can be more useful in
01206   debugging and assertions, for example:
01207 
01208   p = malloc(n);
01209   assert(malloc_usable_size(p) >= 256);
01210 
01211 */
01212 #if __STD_C
01213 size_t   public_mUSABLe(Void_t*);
01214 #else
01215 size_t   public_mUSABLe();
01216 #endif
01217 
01218 /*
01219   malloc_stats();
01220   Prints on stderr the amount of space obtained from the system (both
01221   via sbrk and mmap), the maximum amount (which may be more than
01222   current if malloc_trim and/or munmap got called), and the current
01223   number of bytes allocated via malloc (or realloc, etc) but not yet
01224   freed. Note that this is the number of bytes allocated, not the
01225   number requested. It will be larger than the number requested
01226   because of alignment and bookkeeping overhead. Because it includes
01227   alignment wastage as being in use, this figure may be greater than
01228   zero even when no user-level chunks are allocated.
01229 
01230   The reported current and maximum system memory can be inaccurate if
01231   a program makes other calls to system memory allocation functions
01232   (normally sbrk) outside of malloc.
01233 
01234   malloc_stats prints only the most commonly interesting statistics.
01235   More information can be obtained by calling mallinfo.
01236 
01237 */
01238 #if __STD_C
01239 void     public_mSTATs(void);
01240 #else
01241 void     public_mSTATs();
01242 #endif
01243 
01244 /*
01245   malloc_get_state(void);
01246 
01247   Returns the state of all malloc variables in an opaque data
01248   structure.
01249 */
01250 #if __STD_C
01251 Void_t*  public_gET_STATe(void);
01252 #else
01253 Void_t*  public_gET_STATe();
01254 #endif
01255 
01256 /*
01257   malloc_set_state(Void_t* state);
01258 
01259   Restore the state of all malloc variables from data obtained with
01260   malloc_get_state().
01261 */
01262 #if __STD_C
01263 int      public_sET_STATe(Void_t*);
01264 #else
01265 int      public_sET_STATe();
01266 #endif
01267 
01268 #ifdef _LIBC
01269 /*
01270   posix_memalign(void **memptr, size_t alignment, size_t size);
01271 
01272   POSIX wrapper like memalign(), checking for validity of size.
01273 */
01274 int      __posix_memalign(void **, size_t, size_t);
01275 #endif
01276 
01277 /* mallopt tuning options */
01278 
01279 /*
01280   M_MXFAST is the maximum request size used for "fastbins", special bins
01281   that hold returned chunks without consolidating their spaces. This
01282   enables future requests for chunks of the same size to be handled
01283   very quickly, but can increase fragmentation, and thus increase the
01284   overall memory footprint of a program.
01285 
01286   This malloc manages fastbins very conservatively yet still
01287   efficiently, so fragmentation is rarely a problem for values less
01288   than or equal to the default.  The maximum supported value of MXFAST
01289   is 80. You wouldn't want it any higher than this anyway.  Fastbins
01290   are designed especially for use with many small structs, objects or
01291   strings -- the default handles structs/objects/arrays with sizes up
01292   to 8 4byte fields, or small strings representing words, tokens,
01293   etc. Using fastbins for larger objects normally worsens
01294   fragmentation without improving speed.
01295 
01296   M_MXFAST is set in REQUEST size units. It is internally used in
01297   chunksize units, which adds padding and alignment.  You can reduce
01298   M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
01299   algorithm to be a closer approximation of fifo-best-fit in all cases,
01300   not just for larger requests, but will generally cause it to be
01301   slower.
01302 */
01303 
01304 
01305 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
01306 #ifndef M_MXFAST
01307 #define M_MXFAST            1
01308 #endif
01309 
01310 #ifndef DEFAULT_MXFAST
01311 #define DEFAULT_MXFAST     64
01312 #endif
01313 
01314 
01315 /*
01316   M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
01317   to keep before releasing via malloc_trim in free().
01318 
01319   Automatic trimming is mainly useful in long-lived programs.
01320   Because trimming via sbrk can be slow on some systems, and can
01321   sometimes be wasteful (in cases where programs immediately
01322   afterward allocate more large chunks) the value should be high
01323   enough so that your overall system performance would improve by
01324   releasing this much memory.
01325 
01326   The trim threshold and the mmap control parameters (see below)
01327   can be traded off with one another. Trimming and mmapping are
01328   two different ways of releasing unused memory back to the
01329   system. Between these two, it is often possible to keep
01330   system-level demands of a long-lived program down to a bare
01331   minimum. For example, in one test suite of sessions measuring
01332   the XF86 X server on Linux, using a trim threshold of 128K and a
01333   mmap threshold of 192K led to near-minimal long term resource
01334   consumption.
01335 
01336   If you are using this malloc in a long-lived program, it should
01337   pay to experiment with these values.  As a rough guide, you
01338   might set to a value close to the average size of a process
01339   (program) running on your system.  Releasing this much memory
01340   would allow such a process to run in memory.  Generally, it's
01341   worth it to tune for trimming rather tham memory mapping when a
01342   program undergoes phases where several large chunks are
01343   allocated and released in ways that can reuse each other's
01344   storage, perhaps mixed with phases where there are no such
01345   chunks at all.  And in well-behaved long-lived programs,
01346   controlling release of large blocks via trimming versus mapping
01347   is usually faster.
01348 
01349   However, in most programs, these parameters serve mainly as
01350   protection against the system-level effects of carrying around
01351   massive amounts of unneeded memory. Since frequent calls to
01352   sbrk, mmap, and munmap otherwise degrade performance, the default
01353   parameters are set to relatively high values that serve only as
01354   safeguards.
01355 
01356   The trim value It must be greater than page size to have any useful
01357   effect.  To disable trimming completely, you can set to
01358   (unsigned long)(-1)
01359 
01360   Trim settings interact with fastbin (MXFAST) settings: Unless
01361   TRIM_FASTBINS is defined, automatic trimming never takes place upon
01362   freeing a chunk with size less than or equal to MXFAST. Trimming is
01363   instead delayed until subsequent freeing of larger chunks. However,
01364   you can still force an attempted trim by calling malloc_trim.
01365 
01366   Also, trimming is not generally possible in cases where
01367   the main arena is obtained via mmap.
01368 
01369   Note that the trick some people use of mallocing a huge space and
01370   then freeing it at program startup, in an attempt to reserve system
01371   memory, doesn't have the intended effect under automatic trimming,
01372   since that memory will immediately be returned to the system.
01373 */
01374 
01375 #define M_TRIM_THRESHOLD       -1
01376 
01377 #ifndef DEFAULT_TRIM_THRESHOLD
01378 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
01379 #endif
01380 
01381 /*
01382   M_TOP_PAD is the amount of extra `padding' space to allocate or
01383   retain whenever sbrk is called. It is used in two ways internally:
01384 
01385   * When sbrk is called to extend the top of the arena to satisfy
01386   a new malloc request, this much padding is added to the sbrk
01387   request.
01388 
01389   * When malloc_trim is called automatically from free(),
01390   it is used as the `pad' argument.
01391 
01392   In both cases, the actual amount of padding is rounded
01393   so that the end of the arena is always a system page boundary.
01394 
01395   The main reason for using padding is to avoid calling sbrk so
01396   often. Having even a small pad greatly reduces the likelihood
01397   that nearly every malloc request during program start-up (or
01398   after trimming) will invoke sbrk, which needlessly wastes
01399   time.
01400 
01401   Automatic rounding-up to page-size units is normally sufficient
01402   to avoid measurable overhead, so the default is 0.  However, in
01403   systems where sbrk is relatively slow, it can pay to increase
01404   this value, at the expense of carrying around more memory than
01405   the program needs.
01406 */
01407 
01408 #define M_TOP_PAD              -2
01409 
01410 #ifndef DEFAULT_TOP_PAD
01411 #define DEFAULT_TOP_PAD        (0)
01412 #endif
01413 
01414 /*
01415   MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
01416   adjusted MMAP_THRESHOLD.
01417 */
01418 
01419 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
01420 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
01421 #endif
01422 
01423 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
01424   /* For 32-bit platforms we cannot increase the maximum mmap
01425      threshold much because it is also the minimum value for the
01426      maximum heap size and its alignment.  Going above 512k (i.e., 1M
01427      for new heaps) wastes too much address space.  */
01428 # if __WORDSIZE == 32
01429 #  define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
01430 # else
01431 #  define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
01432 # endif
01433 #endif
01434 
01435 /*
01436   M_MMAP_THRESHOLD is the request size threshold for using mmap()
01437   to service a request. Requests of at least this size that cannot
01438   be allocated using already-existing space will be serviced via mmap.
01439   (If enough normal freed space already exists it is used instead.)
01440 
01441   Using mmap segregates relatively large chunks of memory so that
01442   they can be individually obtained and released from the host
01443   system. A request serviced through mmap is never reused by any
01444   other request (at least not directly; the system may just so
01445   happen to remap successive requests to the same locations).
01446 
01447   Segregating space in this way has the benefits that:
01448 
01449    1. Mmapped space can ALWAYS be individually released back
01450       to the system, which helps keep the system level memory
01451       demands of a long-lived program low.
01452    2. Mapped memory can never become `locked' between
01453       other chunks, as can happen with normally allocated chunks, which
01454       means that even trimming via malloc_trim would not release them.
01455    3. On some systems with "holes" in address spaces, mmap can obtain
01456       memory that sbrk cannot.
01457 
01458   However, it has the disadvantages that:
01459 
01460    1. The space cannot be reclaimed, consolidated, and then
01461       used to service later requests, as happens with normal chunks.
01462    2. It can lead to more wastage because of mmap page alignment
01463       requirements
01464    3. It causes malloc performance to be more dependent on host
01465       system memory management support routines which may vary in
01466       implementation quality and may impose arbitrary
01467       limitations. Generally, servicing a request via normal
01468       malloc steps is faster than going through a system's mmap.
01469 
01470   The advantages of mmap nearly always outweigh disadvantages for
01471   "large" chunks, but the value of "large" varies across systems.  The
01472   default is an empirically derived value that works well in most
01473   systems.
01474 
01475 
01476   Update in 2006:
01477   The above was written in 2001. Since then the world has changed a lot.
01478   Memory got bigger. Applications got bigger. The virtual address space
01479   layout in 32 bit linux changed.
01480 
01481   In the new situation, brk() and mmap space is shared and there are no
01482   artificial limits on brk size imposed by the kernel. What is more,
01483   applications have started using transient allocations larger than the
01484   128Kb as was imagined in 2001.
01485 
01486   The price for mmap is also high now; each time glibc mmaps from the
01487   kernel, the kernel is forced to zero out the memory it gives to the
01488   application. Zeroing memory is expensive and eats a lot of cache and
01489   memory bandwidth. This has nothing to do with the efficiency of the
01490   virtual memory system, by doing mmap the kernel just has no choice but
01491   to zero.
01492 
01493   In 2001, the kernel had a maximum size for brk() which was about 800
01494   megabytes on 32 bit x86, at that point brk() would hit the first
01495   mmaped shared libaries and couldn't expand anymore. With current 2.6
01496   kernels, the VA space layout is different and brk() and mmap
01497   both can span the entire heap at will.
01498 
01499   Rather than using a static threshold for the brk/mmap tradeoff,
01500   we are now using a simple dynamic one. The goal is still to avoid
01501   fragmentation. The old goals we kept are
01502   1) try to get the long lived large allocations to use mmap()
01503   2) really large allocations should always use mmap()
01504   and we're adding now:
01505   3) transient allocations should use brk() to avoid forcing the kernel
01506      having to zero memory over and over again
01507 
01508   The implementation works with a sliding threshold, which is by default
01509   limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
01510   out at 128Kb as per the 2001 default.
01511 
01512   This allows us to satisfy requirement 1) under the assumption that long
01513   lived allocations are made early in the process' lifespan, before it has
01514   started doing dynamic allocations of the same size (which will
01515   increase the threshold).
01516 
01517   The upperbound on the threshold satisfies requirement 2)
01518 
01519   The threshold goes up in value when the application frees memory that was
01520   allocated with the mmap allocator. The idea is that once the application
01521   starts freeing memory of a certain size, it's highly probable that this is
01522   a size the application uses for transient allocations. This estimator
01523   is there to satisfy the new third requirement.
01524 
01525 */
01526 
01527 #define M_MMAP_THRESHOLD      -3
01528 
01529 #ifndef DEFAULT_MMAP_THRESHOLD
01530 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
01531 #endif
01532 
01533 /*
01534   M_MMAP_MAX is the maximum number of requests to simultaneously
01535   service using mmap. This parameter exists because
01536   some systems have a limited number of internal tables for
01537   use by mmap, and using more than a few of them may degrade
01538   performance.
01539 
01540   The default is set to a value that serves only as a safeguard.
01541   Setting to 0 disables use of mmap for servicing large requests.  If
01542   HAVE_MMAP is not set, the default value is 0, and attempts to set it
01543   to non-zero values in mallopt will fail.
01544 */
01545 
01546 #define M_MMAP_MAX             -4
01547 
01548 #ifndef DEFAULT_MMAP_MAX
01549 #if HAVE_MMAP
01550 #define DEFAULT_MMAP_MAX       (65536)
01551 #else
01552 #define DEFAULT_MMAP_MAX       (0)
01553 #endif
01554 #endif
01555 
01556 #ifdef __cplusplus
01557 } /* end of extern "C" */
01558 #endif
01559 
01560 #include <malloc.h>
01561 
01562 #ifndef BOUNDED_N
01563 #define BOUNDED_N(ptr, sz) (ptr)
01564 #endif
01565 #ifndef RETURN_ADDRESS
01566 #define RETURN_ADDRESS(X_) (NULL)
01567 #endif
01568 
01569 /* On some platforms we can compile internal, not exported functions better.
01570    Let the environment provide a macro and define it to be empty if it
01571    is not available.  */
01572 #ifndef internal_function
01573 # define internal_function
01574 #endif
01575 
01576 /* Forward declarations.  */
01577 struct malloc_chunk;
01578 typedef struct malloc_chunk* mchunkptr;
01579 
01580 /* Internal routines.  */
01581 
01582 #if __STD_C
01583 
01584 Void_t*         _int_malloc(mstate, size_t);
01585 void            _int_free(mstate, Void_t*);
01586 Void_t*         _int_realloc(mstate, Void_t*, size_t);
01587 Void_t*         _int_memalign(mstate, size_t, size_t);
01588 Void_t*         _int_valloc(mstate, size_t);
01589 static Void_t*  _int_pvalloc(mstate, size_t);
01590 /*static Void_t*  cALLOc(size_t, size_t);*/
01591 #ifndef _LIBC
01592 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
01593 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
01594 #endif
01595 static int      mTRIm(mstate, size_t);
01596 static size_t   mUSABLe(Void_t*);
01597 static void     mSTATs(void);
01598 static int      mALLOPt(int, int);
01599 static struct mallinfo mALLINFo(mstate);
01600 static void malloc_printerr(int action, const char *str, void *ptr);
01601 
01602 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
01603 static int internal_function top_check(void);
01604 static void internal_function munmap_chunk(mchunkptr p);
01605 #if HAVE_MREMAP
01606 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
01607 #endif
01608 
01609 static Void_t*   malloc_check(size_t sz, const Void_t *caller);
01610 static void      free_check(Void_t* mem, const Void_t *caller);
01611 static Void_t*   realloc_check(Void_t* oldmem, size_t bytes,
01612                             const Void_t *caller);
01613 static Void_t*   memalign_check(size_t alignment, size_t bytes,
01614                             const Void_t *caller);
01615 #ifndef NO_THREADS
01616 # ifdef _LIBC
01617 #  if USE___THREAD || !defined SHARED
01618     /* These routines are never needed in this configuration.  */
01619 #   define NO_STARTER
01620 #  endif
01621 # endif
01622 # ifdef NO_STARTER
01623 #  undef NO_STARTER
01624 # else
01625 static Void_t*   malloc_starter(size_t sz, const Void_t *caller);
01626 static Void_t*   memalign_starter(size_t aln, size_t sz, const Void_t *caller);
01627 static void      free_starter(Void_t* mem, const Void_t *caller);
01628 # endif
01629 static Void_t*   malloc_atfork(size_t sz, const Void_t *caller);
01630 static void      free_atfork(Void_t* mem, const Void_t *caller);
01631 #endif
01632 
01633 #else
01634 
01635 Void_t*         _int_malloc();
01636 void            _int_free();
01637 Void_t*         _int_realloc();
01638 Void_t*         _int_memalign();
01639 Void_t*         _int_valloc();
01640 Void_t*         _int_pvalloc();
01641 /*static Void_t*  cALLOc();*/
01642 static Void_t** _int_icalloc();
01643 static Void_t** _int_icomalloc();
01644 static int      mTRIm();
01645 static size_t   mUSABLe();
01646 static void     mSTATs();
01647 static int      mALLOPt();
01648 static struct mallinfo mALLINFo();
01649 
01650 #endif
01651 
01652 
01653 
01654 
01655 /* ------------- Optional versions of memcopy ---------------- */
01656 
01657 
01658 #if USE_MEMCPY
01659 
01660 /*
01661   Note: memcpy is ONLY invoked with non-overlapping regions,
01662   so the (usually slower) memmove is not needed.
01663 */
01664 
01665 #define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
01666 #define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
01667 
01668 #else /* !USE_MEMCPY */
01669 
01670 /* Use Duff's device for good zeroing/copying performance. */
01671 
01672 #define MALLOC_ZERO(charp, nbytes)                                            \
01673 do {                                                                          \
01674   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
01675   unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
01676   long mcn;                                                                   \
01677   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
01678   switch (mctmp) {                                                            \
01679     case 0: for(;;) { *mzp++ = 0;                                             \
01680     case 7:           *mzp++ = 0;                                             \
01681     case 6:           *mzp++ = 0;                                             \
01682     case 5:           *mzp++ = 0;                                             \
01683     case 4:           *mzp++ = 0;                                             \
01684     case 3:           *mzp++ = 0;                                             \
01685     case 2:           *mzp++ = 0;                                             \
01686     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
01687   }                                                                           \
01688 } while(0)
01689 
01690 #define MALLOC_COPY(dest,src,nbytes)                                          \
01691 do {                                                                          \
01692   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
01693   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
01694   unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
01695   long mcn;                                                                   \
01696   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
01697   switch (mctmp) {                                                            \
01698     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
01699     case 7:           *mcdst++ = *mcsrc++;                                    \
01700     case 6:           *mcdst++ = *mcsrc++;                                    \
01701     case 5:           *mcdst++ = *mcsrc++;                                    \
01702     case 4:           *mcdst++ = *mcsrc++;                                    \
01703     case 3:           *mcdst++ = *mcsrc++;                                    \
01704     case 2:           *mcdst++ = *mcsrc++;                                    \
01705     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
01706   }                                                                           \
01707 } while(0)
01708 
01709 #endif
01710 
01711 /* ------------------ MMAP support ------------------  */
01712 
01713 
01714 #if HAVE_MMAP
01715 
01716 #include <fcntl.h>
01717 #ifndef LACKS_SYS_MMAN_H
01718 #include <sys/mman.h>
01719 #endif
01720 
01721 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
01722 # define MAP_ANONYMOUS MAP_ANON
01723 #endif
01724 #if !defined(MAP_FAILED)
01725 # define MAP_FAILED ((char*)-1)
01726 #endif
01727 
01728 #ifndef MAP_NORESERVE
01729 # ifdef MAP_AUTORESRV
01730 #  define MAP_NORESERVE MAP_AUTORESRV
01731 # else
01732 #  define MAP_NORESERVE 0
01733 # endif
01734 #endif
01735 
01736 /*
01737    Nearly all versions of mmap support MAP_ANONYMOUS,
01738    so the following is unlikely to be needed, but is
01739    supplied just in case.
01740 */
01741 
01742 #ifndef MAP_ANONYMOUS
01743 
01744 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
01745 
01746 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
01747  (dev_zero_fd = open("/dev/zero", O_RDWR), \
01748   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
01749    mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
01750 
01751 #else
01752 
01753 #define MMAP(addr, size, prot, flags) \
01754  (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
01755 
01756 #endif
01757 
01758 
01759 #endif /* HAVE_MMAP */
01760 
01761 
01762 /*
01763   -----------------------  Chunk representations -----------------------
01764 */
01765 
01766 
01767 /*
01768   This struct declaration is misleading (but accurate and necessary).
01769   It declares a "view" into memory allowing access to necessary
01770   fields at known offsets from a given base. See explanation below.
01771 */
01772 
01773 struct malloc_chunk {
01774 
01775   INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
01776   INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
01777 
01778   struct malloc_chunk* fd;         /* double links -- used only if free. */
01779   struct malloc_chunk* bk;
01780 
01781   /* Only used for large blocks: pointer to next larger size.  */
01782   struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
01783   struct malloc_chunk* bk_nextsize;
01784 };
01785 
01786 
01787 /*
01788    malloc_chunk details:
01789 
01790     (The following includes lightly edited explanations by Colin Plumb.)
01791 
01792     Chunks of memory are maintained using a `boundary tag' method as
01793     described in e.g., Knuth or Standish.  (See the paper by Paul
01794     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
01795     survey of such techniques.)  Sizes of free chunks are stored both
01796     in the front of each chunk and at the end.  This makes
01797     consolidating fragmented chunks into bigger chunks very fast.  The
01798     size fields also hold bits representing whether chunks are free or
01799     in use.
01800 
01801     An allocated chunk looks like this:
01802 
01803 
01804     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01805             |             Size of previous chunk, if allocated            | |
01806             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01807             |             Size of chunk, in bytes                       |M|P|
01808       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01809             |             User data starts here...                          .
01810             .                                                               .
01811             .             (malloc_usable_size() bytes)                      .
01812             .                                                               |
01813 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01814             |             Size of chunk                                     |
01815             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01816 
01817 
01818     Where "chunk" is the front of the chunk for the purpose of most of
01819     the malloc code, but "mem" is the pointer that is returned to the
01820     user.  "Nextchunk" is the beginning of the next contiguous chunk.
01821 
01822     Chunks always begin on even word boundries, so the mem portion
01823     (which is returned to the user) is also on an even word boundary, and
01824     thus at least double-word aligned.
01825 
01826     Free chunks are stored in circular doubly-linked lists, and look like this:
01827 
01828     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01829             |             Size of previous chunk                            |
01830             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01831     `head:' |             Size of chunk, in bytes                         |P|
01832       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01833             |             Forward pointer to next chunk in list             |
01834             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01835             |             Back pointer to previous chunk in list            |
01836             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01837             |             Unused space (may be 0 bytes long)                .
01838             .                                                               .
01839             .                                                               |
01840 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01841     `foot:' |             Size of chunk, in bytes                           |
01842             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
01843 
01844     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
01845     chunk size (which is always a multiple of two words), is an in-use
01846     bit for the *previous* chunk.  If that bit is *clear*, then the
01847     word before the current chunk size contains the previous chunk
01848     size, and can be used to find the front of the previous chunk.
01849     The very first chunk allocated always has this bit set,
01850     preventing access to non-existent (or non-owned) memory. If
01851     prev_inuse is set for any given chunk, then you CANNOT determine
01852     the size of the previous chunk, and might even get a memory
01853     addressing fault when trying to do so.
01854 
01855     Note that the `foot' of the current chunk is actually represented
01856     as the prev_size of the NEXT chunk. This makes it easier to
01857     deal with alignments etc but can be very confusing when trying
01858     to extend or adapt this code.
01859 
01860     The two exceptions to all this are
01861 
01862      1. The special chunk `top' doesn't bother using the
01863         trailing size field since there is no next contiguous chunk
01864         that would have to index off it. After initialization, `top'
01865         is forced to always exist.  If it would become less than
01866         MINSIZE bytes long, it is replenished.
01867 
01868      2. Chunks allocated via mmap, which have the second-lowest-order
01869         bit M (IS_MMAPPED) set in their size fields.  Because they are
01870         allocated one-by-one, each must contain its own trailing size field.
01871 
01872 */
01873 
01874 /*
01875   ---------- Size and alignment checks and conversions ----------
01876 */
01877 
01878 /* conversion from malloc headers to user pointers, and back */
01879 
01880 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
01881 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
01882 
01883 /* The smallest possible chunk */
01884 #define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
01885 
01886 /* The smallest size we can malloc is an aligned minimal chunk */
01887 
01888 #define MINSIZE  \
01889   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
01890 
01891 /* Check if m has acceptable alignment */
01892 
01893 #define aligned_OK(m)  (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
01894 
01895 #define misaligned_chunk(p) \
01896   ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
01897    & MALLOC_ALIGN_MASK)
01898 
01899 
01900 /*
01901    Check if a request is so large that it would wrap around zero when
01902    padded and aligned. To simplify some other code, the bound is made
01903    low enough so that adding MINSIZE will also not wrap around zero.
01904 */
01905 
01906 #define REQUEST_OUT_OF_RANGE(req)                                 \
01907   ((unsigned long)(req) >=                                        \
01908    (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
01909 
01910 /* pad request bytes into a usable size -- internal version */
01911 
01912 #define request2size(req)                                         \
01913   (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
01914    MINSIZE :                                                      \
01915    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
01916 
01917 /*  Same, except also perform argument check */
01918 
01919 #define checked_request2size(req, sz)                             \
01920   if (REQUEST_OUT_OF_RANGE(req)) {                                \
01921     MALLOC_FAILURE_ACTION;                                        \
01922     return 0;                                                     \
01923   }                                                               \
01924   (sz) = request2size(req);
01925 
01926 /*
01927   --------------- Physical chunk operations ---------------
01928 */
01929 
01930 
01931 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
01932 #define PREV_INUSE 0x1
01933 
01934 /* extract inuse bit of previous chunk */
01935 #define prev_inuse(p)       ((p)->size & PREV_INUSE)
01936 
01937 
01938 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
01939 #define IS_MMAPPED 0x2
01940 
01941 /* check for mmap()'ed chunk */
01942 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
01943 
01944 
01945 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
01946    from a non-main arena.  This is only set immediately before handing
01947    the chunk to the user, if necessary.  */
01948 #define NON_MAIN_ARENA 0x4
01949 
01950 /* check for chunk from non-main arena */
01951 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
01952 
01953 
01954 /*
01955   Bits to mask off when extracting size
01956 
01957   Note: IS_MMAPPED is intentionally not masked off from size field in
01958   macros for which mmapped chunks should never be seen. This should
01959   cause helpful core dumps to occur if it is tried by accident by
01960   people extending or adapting this malloc.
01961 */
01962 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
01963 
01964 /* Get size, ignoring use bits */
01965 #define chunksize(p)         ((p)->size & ~(SIZE_BITS))
01966 
01967 
01968 /* Ptr to next physical malloc_chunk. */
01969 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
01970 
01971 /* Ptr to previous physical malloc_chunk */
01972 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
01973 
01974 /* Treat space at ptr + offset as a chunk */
01975 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
01976 
01977 /* extract p's inuse bit */
01978 #define inuse(p)\
01979 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
01980 
01981 /* set/clear chunk as being inuse without otherwise disturbing */
01982 #define set_inuse(p)\
01983 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
01984 
01985 #define clear_inuse(p)\
01986 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
01987 
01988 
01989 /* check/set/clear inuse bits in known places */
01990 #define inuse_bit_at_offset(p, s)\
01991  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
01992 
01993 #define set_inuse_bit_at_offset(p, s)\
01994  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
01995 
01996 #define clear_inuse_bit_at_offset(p, s)\
01997  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
01998 
01999 
02000 /* Set size at head, without disturbing its use bit */
02001 #define set_head_size(p, s)  ((p)->size = (((p)->size & SIZE_BITS) | (s)))
02002 
02003 /* Set size/use field */
02004 #define set_head(p, s)       ((p)->size = (s))
02005 
02006 /* Set size at footer (only when chunk is not in use) */
02007 #define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
02008 
02009 
02010 /*
02011   -------------------- Internal data structures --------------------
02012 
02013    All internal state is held in an instance of malloc_state defined
02014    below. There are no other static variables, except in two optional
02015    cases:
02016    * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
02017    * If HAVE_MMAP is true, but mmap doesn't support
02018      MAP_ANONYMOUS, a dummy file descriptor for mmap.
02019 
02020    Beware of lots of tricks that minimize the total bookkeeping space
02021    requirements. The result is a little over 1K bytes (for 4byte
02022    pointers and size_t.)
02023 */
02024 
02025 /*
02026   Bins
02027 
02028     An array of bin headers for free chunks. Each bin is doubly
02029     linked.  The bins are approximately proportionally (log) spaced.
02030     There are a lot of these bins (128). This may look excessive, but
02031     works very well in practice.  Most bins hold sizes that are
02032     unusual as malloc request sizes, but are more usual for fragments
02033     and consolidated sets of chunks, which is what these bins hold, so
02034     they can be found quickly.  All procedures maintain the invariant
02035     that no consolidated chunk physically borders another one, so each
02036     chunk in a list is known to be preceeded and followed by either
02037     inuse chunks or the ends of memory.
02038 
02039     Chunks in bins are kept in size order, with ties going to the
02040     approximately least recently used chunk. Ordering isn't needed
02041     for the small bins, which all contain the same-sized chunks, but
02042     facilitates best-fit allocation for larger chunks. These lists
02043     are just sequential. Keeping them in order almost never requires
02044     enough traversal to warrant using fancier ordered data
02045     structures.
02046 
02047     Chunks of the same size are linked with the most
02048     recently freed at the front, and allocations are taken from the
02049     back.  This results in LRU (FIFO) allocation order, which tends
02050     to give each chunk an equal opportunity to be consolidated with
02051     adjacent freed chunks, resulting in larger free chunks and less
02052     fragmentation.
02053 
02054     To simplify use in double-linked lists, each bin header acts
02055     as a malloc_chunk. This avoids special-casing for headers.
02056     But to conserve space and improve locality, we allocate
02057     only the fd/bk pointers of bins, and then use repositioning tricks
02058     to treat these as the fields of a malloc_chunk*.
02059 */
02060 
02061 typedef struct malloc_chunk* mbinptr;
02062 
02063 /* addressing -- note that bin_at(0) does not exist */
02064 #define bin_at(m, i) \
02065   (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))                         \
02066             - offsetof (struct malloc_chunk, fd))
02067 
02068 /* analog of ++bin */
02069 #define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
02070 
02071 /* Reminders about list directionality within bins */
02072 #define first(b)     ((b)->fd)
02073 #define last(b)      ((b)->bk)
02074 
02075 /* Take a chunk off a bin list */
02076 #define unlink(P, BK, FD) {                                            \
02077   FD = P->fd;                                                          \
02078   BK = P->bk;                                                          \
02079   if (__builtin_expect (FD->bk != P || BK->fd != P, 0))                \
02080     malloc_printerr (check_action, "corrupted double-linked list", P); \
02081   else {                                                               \
02082     FD->bk = BK;                                                       \
02083     BK->fd = FD;                                                       \
02084     if (!in_smallbin_range (P->size)                                  \
02085        && __builtin_expect (P->fd_nextsize != NULL, 0)) {             \
02086       assert (P->fd_nextsize->bk_nextsize == P);               \
02087       assert (P->bk_nextsize->fd_nextsize == P);               \
02088       if (FD->fd_nextsize == NULL) {                                  \
02089        if (P->fd_nextsize == P)                                \
02090          FD->fd_nextsize = FD->bk_nextsize = FD;               \
02091        else {                                                  \
02092          FD->fd_nextsize = P->fd_nextsize;                            \
02093          FD->bk_nextsize = P->bk_nextsize;                            \
02094          P->fd_nextsize->bk_nextsize = FD;                            \
02095          P->bk_nextsize->fd_nextsize = FD;                            \
02096        }                                                       \
02097       }       else {                                                  \
02098        P->fd_nextsize->bk_nextsize = P->bk_nextsize;                  \
02099        P->bk_nextsize->fd_nextsize = P->fd_nextsize;                  \
02100       }                                                               \
02101     }                                                          \
02102   }                                                                    \
02103 }
02104 
02105 /*
02106   Indexing
02107 
02108     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
02109     8 bytes apart. Larger bins are approximately logarithmically spaced:
02110 
02111     64 bins of size       8
02112     32 bins of size      64
02113     16 bins of size     512
02114      8 bins of size    4096
02115      4 bins of size   32768
02116      2 bins of size  262144
02117      1 bin  of size what's left
02118 
02119     There is actually a little bit of slop in the numbers in bin_index
02120     for the sake of speed. This makes no difference elsewhere.
02121 
02122     The bins top out around 1MB because we expect to service large
02123     requests via mmap.
02124 */
02125 
02126 #define NBINS             128
02127 #define NSMALLBINS         64
02128 #define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
02129 #define MIN_LARGE_SIZE    (NSMALLBINS * SMALLBIN_WIDTH)
02130 
02131 #define in_smallbin_range(sz)  \
02132   ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
02133 
02134 #define smallbin_index(sz) \
02135   (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
02136 
02137 #define largebin_index_32(sz)                                                \
02138 (((((unsigned long)(sz)) >>  6) <= 38)?  56 + (((unsigned long)(sz)) >>  6): \
02139  ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
02140  ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
02141  ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
02142  ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
02143                                         126)
02144 
02145 // XXX It remains to be seen whether it is good to keep the widths of
02146 // XXX the buckets the same or whether it should be scaled by a factor
02147 // XXX of two as well.
02148 #define largebin_index_64(sz)                                                \
02149 (((((unsigned long)(sz)) >>  6) <= 48)?  48 + (((unsigned long)(sz)) >>  6): \
02150  ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
02151  ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
02152  ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
02153  ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
02154                                         126)
02155 
02156 #define largebin_index(sz) \
02157   (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
02158 
02159 #define bin_index(sz) \
02160  ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
02161 
02162 
02163 /*
02164   Unsorted chunks
02165 
02166     All remainders from chunk splits, as well as all returned chunks,
02167     are first placed in the "unsorted" bin. They are then placed
02168     in regular bins after malloc gives them ONE chance to be used before
02169     binning. So, basically, the unsorted_chunks list acts as a queue,
02170     with chunks being placed on it in free (and malloc_consolidate),
02171     and taken off (to be either used or placed in bins) in malloc.
02172 
02173     The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
02174     does not have to be taken into account in size comparisons.
02175 */
02176 
02177 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
02178 #define unsorted_chunks(M)          (bin_at(M, 1))
02179 
02180 /*
02181   Top
02182 
02183     The top-most available chunk (i.e., the one bordering the end of
02184     available memory) is treated specially. It is never included in
02185     any bin, is used only if no other chunk is available, and is
02186     released back to the system if it is very large (see
02187     M_TRIM_THRESHOLD).  Because top initially
02188     points to its own bin with initial zero size, thus forcing
02189     extension on the first malloc request, we avoid having any special
02190     code in malloc to check whether it even exists yet. But we still
02191     need to do so when getting memory from system, so we make
02192     initial_top treat the bin as a legal but unusable chunk during the
02193     interval between initialization and the first call to
02194     sYSMALLOc. (This is somewhat delicate, since it relies on
02195     the 2 preceding words to be zero during this interval as well.)
02196 */
02197 
02198 /* Conveniently, the unsorted bin can be used as dummy top on first call */
02199 #define initial_top(M)              (unsorted_chunks(M))
02200 
02201 /*
02202   Binmap
02203 
02204     To help compensate for the large number of bins, a one-level index
02205     structure is used for bin-by-bin searching.  `binmap' is a
02206     bitvector recording whether bins are definitely empty so they can
02207     be skipped over during during traversals.  The bits are NOT always
02208     cleared as soon as bins are empty, but instead only
02209     when they are noticed to be empty during traversal in malloc.
02210 */
02211 
02212 /* Conservatively use 32 bits per map word, even if on 64bit system */
02213 #define BINMAPSHIFT      5
02214 #define BITSPERMAP       (1U << BINMAPSHIFT)
02215 #define BINMAPSIZE       (NBINS / BITSPERMAP)
02216 
02217 #define idx2block(i)     ((i) >> BINMAPSHIFT)
02218 #define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
02219 
02220 #define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
02221 #define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
02222 #define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
02223 
02224 /*
02225   Fastbins
02226 
02227     An array of lists holding recently freed small chunks.  Fastbins
02228     are not doubly linked.  It is faster to single-link them, and
02229     since chunks are never removed from the middles of these lists,
02230     double linking is not necessary. Also, unlike regular bins, they
02231     are not even processed in FIFO order (they use faster LIFO) since
02232     ordering doesn't much matter in the transient contexts in which
02233     fastbins are normally used.
02234 
02235     Chunks in fastbins keep their inuse bit set, so they cannot
02236     be consolidated with other free chunks. malloc_consolidate
02237     releases all chunks in fastbins and consolidates them with
02238     other free chunks.
02239 */
02240 
02241 typedef struct malloc_chunk* mfastbinptr;
02242 
02243 /* offset 2 to use otherwise unindexable first 2 bins */
02244 #define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
02245 
02246 /* The maximum fastbin request size we support */
02247 #define MAX_FAST_SIZE     80
02248 
02249 #define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
02250 
02251 /*
02252   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
02253   that triggers automatic consolidation of possibly-surrounding
02254   fastbin chunks. This is a heuristic, so the exact value should not
02255   matter too much. It is defined at half the default trim threshold as a
02256   compromise heuristic to only attempt consolidation if it is likely
02257   to lead to trimming. However, it is not dynamically tunable, since
02258   consolidation reduces fragmentation surrounding large chunks even
02259   if trimming is not used.
02260 */
02261 
02262 #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
02263 
02264 /*
02265   Since the lowest 2 bits in max_fast don't matter in size comparisons,
02266   they are used as flags.
02267 */
02268 
02269 /*
02270   FASTCHUNKS_BIT held in max_fast indicates that there are probably
02271   some fastbin chunks. It is set true on entering a chunk into any
02272   fastbin, and cleared only in malloc_consolidate.
02273 
02274   The truth value is inverted so that have_fastchunks will be true
02275   upon startup (since statics are zero-filled), simplifying
02276   initialization checks.
02277 */
02278 
02279 #define FASTCHUNKS_BIT        (1U)
02280 
02281 #define have_fastchunks(M)     (((M)->flags &  FASTCHUNKS_BIT) == 0)
02282 #define clear_fastchunks(M)    ((M)->flags |=  FASTCHUNKS_BIT)
02283 #define set_fastchunks(M)      ((M)->flags &= ~FASTCHUNKS_BIT)
02284 
02285 /*
02286   NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
02287   regions.  Otherwise, contiguity is exploited in merging together,
02288   when possible, results from consecutive MORECORE calls.
02289 
02290   The initial value comes from MORECORE_CONTIGUOUS, but is
02291   changed dynamically if mmap is ever used as an sbrk substitute.
02292 */
02293 
02294 #define NONCONTIGUOUS_BIT     (2U)
02295 
02296 #define contiguous(M)          (((M)->flags &  NONCONTIGUOUS_BIT) == 0)
02297 #define noncontiguous(M)       (((M)->flags &  NONCONTIGUOUS_BIT) != 0)
02298 #define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
02299 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
02300 
02301 /*
02302    Set value of max_fast.
02303    Use impossibly small value if 0.
02304    Precondition: there are no existing fastbin chunks.
02305    Setting the value clears fastchunk bit but preserves noncontiguous bit.
02306 */
02307 
02308 #define set_max_fast(s) \
02309   global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
02310 #define get_max_fast() global_max_fast
02311 
02312 
02313 /*
02314    ----------- Internal state representation and initialization -----------
02315 */
02316 
02317 struct malloc_state {
02318   /* Serialize access.  */
02319   mutex_t mutex;
02320 
02321   /* Flags (formerly in max_fast).  */
02322   int flags;
02323 
02324 #if THREAD_STATS
02325   /* Statistics for locking.  Only used if THREAD_STATS is defined.  */
02326   long stat_lock_direct, stat_lock_loop, stat_lock_wait;
02327 #endif
02328 
02329   /* Fastbins */
02330   mfastbinptr      fastbins[NFASTBINS];
02331 
02332   /* Base of the topmost chunk -- not otherwise kept in a bin */
02333   mchunkptr        top;
02334 
02335   /* The remainder from the most recent split of a small request */
02336   mchunkptr        last_remainder;
02337 
02338   /* Normal bins packed as described above */
02339   mchunkptr        bins[NBINS * 2 - 2];
02340 
02341   /* Bitmap of bins */
02342   unsigned int     binmap[BINMAPSIZE];
02343 
02344   /* Linked list */
02345   struct malloc_state *next;
02346 
02347   /* Memory allocated from the system in this arena.  */
02348   INTERNAL_SIZE_T system_mem;
02349   INTERNAL_SIZE_T max_system_mem;
02350 };
02351 
02352 struct malloc_par {
02353   /* Tunable parameters */
02354   unsigned long    trim_threshold;
02355   INTERNAL_SIZE_T  top_pad;
02356   INTERNAL_SIZE_T  mmap_threshold;
02357 
02358   /* Memory map support */
02359   int              n_mmaps;
02360   int              n_mmaps_max;
02361   int              max_n_mmaps;
02362   /* the mmap_threshold is dynamic, until the user sets
02363      it manually, at which point we need to disable any
02364      dynamic behavior. */
02365   int              no_dyn_threshold;
02366 
02367   /* Cache malloc_getpagesize */
02368   unsigned int     pagesize;
02369 
02370   /* Statistics */
02371   INTERNAL_SIZE_T  mmapped_mem;
02372   /*INTERNAL_SIZE_T  sbrked_mem;*/
02373   /*INTERNAL_SIZE_T  max_sbrked_mem;*/
02374   INTERNAL_SIZE_T  max_mmapped_mem;
02375   INTERNAL_SIZE_T  max_total_mem; /* only kept for NO_THREADS */
02376 
02377   /* First address handed out by MORECORE/sbrk.  */
02378   char*            sbrk_base;
02379 };
02380 
02381 /* There are several instances of this struct ("arenas") in this
02382    malloc.  If you are adapting this malloc in a way that does NOT use
02383    a static or mmapped malloc_state, you MUST explicitly zero-fill it
02384    before using. This malloc relies on the property that malloc_state
02385    is initialized to all zeroes (as is true of C statics).  */
02386 
02387 static struct malloc_state main_arena;
02388 
02389 /* There is only one instance of the malloc parameters.  */
02390 
02391 static struct malloc_par mp_;
02392 
02393 
02394 /* Maximum size of memory handled in fastbins.  */
02395 static INTERNAL_SIZE_T global_max_fast;
02396 
02397 /*
02398   Initialize a malloc_state struct.
02399 
02400   This is called only from within malloc_consolidate, which needs
02401   be called in the same contexts anyway.  It is never called directly
02402   outside of malloc_consolidate because some optimizing compilers try
02403   to inline it at all call points, which turns out not to be an
02404   optimization at all. (Inlining it in malloc_consolidate is fine though.)
02405 */
02406 
02407 #if __STD_C
02408 static void malloc_init_state(mstate av)
02409 #else
02410 static void malloc_init_state(av) mstate av;
02411 #endif
02412 {
02413   int     i;
02414   mbinptr bin;
02415 
02416   /* Establish circular links for normal bins */
02417   for (i = 1; i < NBINS; ++i) {
02418     bin = bin_at(av,i);
02419     bin->fd = bin->bk = bin;
02420   }
02421 
02422 #if MORECORE_CONTIGUOUS
02423   if (av != &main_arena)
02424 #endif
02425     set_noncontiguous(av);
02426   if (av == &main_arena)
02427     set_max_fast(DEFAULT_MXFAST);
02428   av->flags |= FASTCHUNKS_BIT;
02429 
02430   av->top            = initial_top(av);
02431 }
02432 
02433 /*
02434    Other internal utilities operating on mstates
02435 */
02436 
02437 #if __STD_C
02438 static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate);
02439 static int      sYSTRIm(size_t, mstate);
02440 static void     malloc_consolidate(mstate);
02441 #ifndef _LIBC
02442 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
02443 #endif
02444 #else
02445 static Void_t*  sYSMALLOc();
02446 static int      sYSTRIm();
02447 static void     malloc_consolidate();
02448 static Void_t** iALLOc();
02449 #endif
02450 
02451 
02452 /* -------------- Early definitions for debugging hooks ---------------- */
02453 
02454 /* Define and initialize the hook variables.  These weak definitions must
02455    appear before any use of the variables in a function (arena.c uses one).  */
02456 #ifndef weak_variable
02457 #ifndef _LIBC
02458 #define weak_variable 
02459 #else
02460 /* In GNU libc we want the hook variables to be weak definitions to
02461    avoid a problem with Emacs.  */
02462 #define weak_variable weak_function
02463 #endif
02464 #endif
02465 
02466 /* Forward declarations.  */
02467 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
02468                                        const __malloc_ptr_t caller));
02469 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
02470                                         const __malloc_ptr_t caller));
02471 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
02472                                          const __malloc_ptr_t caller));
02473 
02474 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
02475 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
02476                                const __malloc_ptr_t) = NULL;
02477 __malloc_ptr_t weak_variable (*__malloc_hook)
02478      (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
02479 __malloc_ptr_t weak_variable (*__realloc_hook)
02480      (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
02481      = realloc_hook_ini;
02482 __malloc_ptr_t weak_variable (*__memalign_hook)
02483      (size_t __alignment, size_t __size, const __malloc_ptr_t)
02484      = memalign_hook_ini;
02485 void weak_variable (*__after_morecore_hook) (void) = NULL;
02486 
02487 
02488 /* ---------------- Error behavior ------------------------------------ */
02489 
02490 #ifndef DEFAULT_CHECK_ACTION
02491 #define DEFAULT_CHECK_ACTION 3
02492 #endif
02493 
02494 static int check_action = DEFAULT_CHECK_ACTION;
02495 
02496 
02497 /* ------------------ Testing support ----------------------------------*/
02498 
02499 static int perturb_byte;
02500 
02501 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
02502 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
02503 
02504 
02505 /* ------------------- Support for multiple arenas -------------------- */
02506 #include "arena.c"
02507 
02508 /*
02509   Debugging support
02510 
02511   These routines make a number of assertions about the states
02512   of data structures that should be true at all times. If any
02513   are not true, it's very likely that a user program has somehow
02514   trashed memory. (It's also possible that there is a coding error
02515   in malloc. In which case, please report it!)
02516 */
02517 
02518 #if ! MALLOC_DEBUG
02519 
02520 #define check_chunk(A,P)
02521 #define check_free_chunk(A,P)
02522 #define check_inuse_chunk(A,P)
02523 #define check_remalloced_chunk(A,P,N)
02524 #define check_malloced_chunk(A,P,N)
02525 #define check_malloc_state(A)
02526 
02527 #else
02528 
02529 #define check_chunk(A,P)              do_check_chunk(A,P)
02530 #define check_free_chunk(A,P)         do_check_free_chunk(A,P)
02531 #define check_inuse_chunk(A,P)        do_check_inuse_chunk(A,P)
02532 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
02533 #define check_malloced_chunk(A,P,N)   do_check_malloced_chunk(A,P,N)
02534 #define check_malloc_state(A)         do_check_malloc_state(A)
02535 
02536 /*
02537   Properties of all chunks
02538 */
02539 
02540 #if __STD_C
02541 static void do_check_chunk(mstate av, mchunkptr p)
02542 #else
02543 static void do_check_chunk(av, p) mstate av; mchunkptr p;
02544 #endif
02545 {
02546   unsigned long sz = chunksize(p);
02547   /* min and max possible addresses assuming contiguous allocation */
02548   char* max_address = (char*)(av->top) + chunksize(av->top);
02549   char* min_address = max_address - av->system_mem;
02550 
02551   if (!chunk_is_mmapped(p)) {
02552 
02553     /* Has legal address ... */
02554     if (p != av->top) {
02555       if (contiguous(av)) {
02556         assert(((char*)p) >= min_address);
02557         assert(((char*)p + sz) <= ((char*)(av->top)));
02558       }
02559     }
02560     else {
02561       /* top size is always at least MINSIZE */
02562       assert((unsigned long)(sz) >= MINSIZE);
02563       /* top predecessor always marked inuse */
02564       assert(prev_inuse(p));
02565     }
02566 
02567   }
02568   else {
02569 #if HAVE_MMAP
02570     /* address is outside main heap  */
02571     if (contiguous(av) && av->top != initial_top(av)) {
02572       assert(((char*)p) < min_address || ((char*)p) >= max_address);
02573     }
02574     /* chunk is page-aligned */
02575     assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
02576     /* mem is aligned */
02577     assert(aligned_OK(chunk2mem(p)));
02578 #else
02579     /* force an appropriate assert violation if debug set */
02580     assert(!chunk_is_mmapped(p));
02581 #endif
02582   }
02583 }
02584 
02585 /*
02586   Properties of free chunks
02587 */
02588 
02589 #if __STD_C
02590 static void do_check_free_chunk(mstate av, mchunkptr p)
02591 #else
02592 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
02593 #endif
02594 {
02595   INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
02596   mchunkptr next = chunk_at_offset(p, sz);
02597 
02598   do_check_chunk(av, p);
02599 
02600   /* Chunk must claim to be free ... */
02601   assert(!inuse(p));
02602   assert (!chunk_is_mmapped(p));
02603 
02604   /* Unless a special marker, must have OK fields */
02605   if ((unsigned long)(sz) >= MINSIZE)
02606   {
02607     assert((sz & MALLOC_ALIGN_MASK) == 0);
02608     assert(aligned_OK(chunk2mem(p)));
02609     /* ... matching footer field */
02610     assert(next->prev_size == sz);
02611     /* ... and is fully consolidated */
02612     assert(prev_inuse(p));
02613     assert (next == av->top || inuse(next));
02614 
02615     /* ... and has minimally sane links */
02616     assert(p->fd->bk == p);
02617     assert(p->bk->fd == p);
02618   }
02619   else /* markers are always of size SIZE_SZ */
02620     assert(sz == SIZE_SZ);
02621 }
02622 
02623 /*
02624   Properties of inuse chunks
02625 */
02626 
02627 #if __STD_C
02628 static void do_check_inuse_chunk(mstate av, mchunkptr p)
02629 #else
02630 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
02631 #endif
02632 {
02633   mchunkptr next;
02634 
02635   do_check_chunk(av, p);
02636 
02637   if (chunk_is_mmapped(p))
02638     return; /* mmapped chunks have no next/prev */
02639 
02640   /* Check whether it claims to be in use ... */
02641   assert(inuse(p));
02642 
02643   next = next_chunk(p);
02644 
02645   /* ... and is surrounded by OK chunks.
02646     Since more things can be checked with free chunks than inuse ones,
02647     if an inuse chunk borders them and debug is on, it's worth doing them.
02648   */
02649   if (!prev_inuse(p))  {
02650     /* Note that we cannot even look at prev unless it is not inuse */
02651     mchunkptr prv = prev_chunk(p);
02652     assert(next_chunk(prv) == p);
02653     do_check_free_chunk(av, prv);
02654   }
02655 
02656   if (next == av->top) {
02657     assert(prev_inuse(next));
02658     assert(chunksize(next) >= MINSIZE);
02659   }
02660   else if (!inuse(next))
02661     do_check_free_chunk(av, next);
02662 }
02663 
02664 /*
02665   Properties of chunks recycled from fastbins
02666 */
02667 
02668 #if __STD_C
02669 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
02670 #else
02671 static void do_check_remalloced_chunk(av, p, s)
02672 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
02673 #endif
02674 {
02675   INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
02676 
02677   if (!chunk_is_mmapped(p)) {
02678     assert(av == arena_for_chunk(p));
02679     if (chunk_non_main_arena(p))
02680       assert(av != &main_arena);
02681     else
02682       assert(av == &main_arena);
02683   }
02684 
02685   do_check_inuse_chunk(av, p);
02686 
02687   /* Legal size ... */
02688   assert((sz & MALLOC_ALIGN_MASK) == 0);
02689   assert((unsigned long)(sz) >= MINSIZE);
02690   /* ... and alignment */
02691   assert(aligned_OK(chunk2mem(p)));
02692   /* chunk is less than MINSIZE more than request */
02693   assert((long)(sz) - (long)(s) >= 0);
02694   assert((long)(sz) - (long)(s + MINSIZE) < 0);
02695 }
02696 
02697 /*
02698   Properties of nonrecycled chunks at the point they are malloced
02699 */
02700 
02701 #if __STD_C
02702 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
02703 #else
02704 static void do_check_malloced_chunk(av, p, s)
02705 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
02706 #endif
02707 {
02708   /* same as recycled case ... */
02709   do_check_remalloced_chunk(av, p, s);
02710 
02711   /*
02712     ... plus,  must obey implementation invariant that prev_inuse is
02713     always true of any allocated chunk; i.e., that each allocated
02714     chunk borders either a previously allocated and still in-use
02715     chunk, or the base of its memory arena. This is ensured
02716     by making all allocations from the the `lowest' part of any found
02717     chunk.  This does not necessarily hold however for chunks
02718     recycled via fastbins.
02719   */
02720 
02721   assert(prev_inuse(p));
02722 }
02723 
02724 
02725 /*
02726   Properties of malloc_state.
02727 
02728   This may be useful for debugging malloc, as well as detecting user
02729   programmer errors that somehow write into malloc_state.
02730 
02731   If you are extending or experimenting with this malloc, you can
02732   probably figure out how to hack this routine to print out or
02733   display chunk addresses, sizes, bins, and other instrumentation.
02734 */
02735 
02736 static void do_check_malloc_state(mstate av)
02737 {
02738   int i;
02739   mchunkptr p;
02740   mchunkptr q;
02741   mbinptr b;
02742   unsigned int idx;
02743   INTERNAL_SIZE_T size;
02744   unsigned long total = 0;
02745   int max_fast_bin;
02746 
02747   /* internal size_t must be no wider than pointer type */
02748   assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
02749 
02750   /* alignment is a power of 2 */
02751   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
02752 
02753   /* cannot run remaining checks until fully initialized */
02754   if (av->top == 0 || av->top == initial_top(av))
02755     return;
02756 
02757   /* pagesize is a power of 2 */
02758   assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
02759 
02760   /* A contiguous main_arena is consistent with sbrk_base.  */
02761   if (av == &main_arena && contiguous(av))
02762     assert((char*)mp_.sbrk_base + av->system_mem ==
02763           (char*)av->top + chunksize(av->top));
02764 
02765   /* properties of fastbins */
02766 
02767   /* max_fast is in allowed range */
02768   assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
02769 
02770   max_fast_bin = fastbin_index(get_max_fast ());
02771 
02772   for (i = 0; i < NFASTBINS; ++i) {
02773     p = av->fastbins[i];
02774 
02775     /* The following test can only be performed for the main arena.
02776        While mallopt calls malloc_consolidate to get rid of all fast
02777        bins (especially those larger than the new maximum) this does
02778        only happen for the main arena.  Trying to do this for any
02779        other arena would mean those arenas have to be locked and
02780        malloc_consolidate be called for them.  This is excessive.  And
02781        even if this is acceptable to somebody it still cannot solve
02782        the problem completely since if the arena is locked a
02783        concurrent malloc call might create a new arena which then
02784        could use the newly invalid fast bins.  */
02785 
02786     /* all bins past max_fast are empty */
02787     if (av == &main_arena && i > max_fast_bin)
02788       assert(p == 0);
02789 
02790     while (p != 0) {
02791       /* each chunk claims to be inuse */
02792       do_check_inuse_chunk(av, p);
02793       total += chunksize(p);
02794       /* chunk belongs in this bin */
02795       assert(fastbin_index(chunksize(p)) == i);
02796       p = p->fd;
02797     }
02798   }
02799 
02800   if (total != 0)
02801     assert(have_fastchunks(av));
02802   else if (!have_fastchunks(av))
02803     assert(total == 0);
02804 
02805   /* check normal bins */
02806   for (i = 1; i < NBINS; ++i) {
02807     b = bin_at(av,i);
02808 
02809     /* binmap is accurate (except for bin 1 == unsorted_chunks) */
02810     if (i >= 2) {
02811       unsigned int binbit = get_binmap(av,i);
02812       int empty = last(b) == b;
02813       if (!binbit)
02814         assert(empty);
02815       else if (!empty)
02816         assert(binbit);
02817     }
02818 
02819     for (p = last(b); p != b; p = p->bk) {
02820       /* each chunk claims to be free */
02821       do_check_free_chunk(av, p);
02822       size = chunksize(p);
02823       total += size;
02824       if (i >= 2) {
02825         /* chunk belongs in bin */
02826         idx = bin_index(size);
02827         assert(idx == i);
02828         /* lists are sorted */
02829         assert(p->bk == b ||
02830                (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
02831 
02832        if (!in_smallbin_range(size))
02833          {
02834            if (p->fd_nextsize != NULL)
02835              {
02836               if (p->fd_nextsize == p)
02837                 assert (p->bk_nextsize == p);
02838               else
02839                 {
02840                   if (p->fd_nextsize == first (b))
02841                     assert (chunksize (p) < chunksize (p->fd_nextsize));
02842                   else
02843                     assert (chunksize (p) > chunksize (p->fd_nextsize));
02844 
02845                   if (p == first (b))
02846                     assert (chunksize (p) > chunksize (p->bk_nextsize));
02847                   else
02848                     assert (chunksize (p) < chunksize (p->bk_nextsize));
02849                 }
02850              }
02851            else
02852              assert (p->bk_nextsize == NULL);
02853          }
02854       } else if (!in_smallbin_range(size))
02855        assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
02856       /* chunk is followed by a legal chain of inuse chunks */
02857       for (q = next_chunk(p);
02858            (q != av->top && inuse(q) &&
02859              (unsigned long)(chunksize(q)) >= MINSIZE);
02860            q = next_chunk(q))
02861         do_check_inuse_chunk(av, q);
02862     }
02863   }
02864 
02865   /* top chunk is OK */
02866   check_chunk(av, av->top);
02867 
02868   /* sanity checks for statistics */
02869 
02870 #ifdef NO_THREADS
02871   assert(total <= (unsigned long)(mp_.max_total_mem));
02872   assert(mp_.n_mmaps >= 0);
02873 #endif
02874   assert(mp_.n_mmaps <= mp_.max_n_mmaps);
02875 
02876   assert((unsigned long)(av->system_mem) <=
02877          (unsigned long)(av->max_system_mem));
02878 
02879   assert((unsigned long)(mp_.mmapped_mem) <=
02880          (unsigned long)(mp_.max_mmapped_mem));
02881 
02882 #ifdef NO_THREADS
02883   assert((unsigned long)(mp_.max_total_mem) >=
02884          (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
02885 #endif
02886 }
02887 #endif
02888 
02889 
02890 /* ----------------- Support for debugging hooks -------------------- */
02891 #include "hooks.c"
02892 
02893 
02894 /* ----------- Routines dealing with system allocation -------------- */
02895 
02896 /*
02897   sysmalloc handles malloc cases requiring more memory from the system.
02898   On entry, it is assumed that av->top does not have enough
02899   space to service request for nb bytes, thus requiring that av->top
02900   be extended or replaced.
02901 */
02902 
02903 #if __STD_C
02904 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
02905 #else
02906 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
02907 #endif
02908 {
02909   mchunkptr       old_top;        /* incoming value of av->top */
02910   INTERNAL_SIZE_T old_size;       /* its size */
02911   char*           old_end;        /* its end address */
02912 
02913   long            size;           /* arg to first MORECORE or mmap call */
02914   char*           brk;            /* return value from MORECORE */
02915 
02916   long            correction;     /* arg to 2nd MORECORE call */
02917   char*           snd_brk;        /* 2nd return val */
02918 
02919   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
02920   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
02921   char*           aligned_brk;    /* aligned offset into brk */
02922 
02923   mchunkptr       p;              /* the allocated/returned chunk */
02924   mchunkptr       remainder;      /* remainder from allocation */
02925   unsigned long   remainder_size; /* its size */
02926 
02927   unsigned long   sum;            /* for updating stats */
02928 
02929   size_t          pagemask  = mp_.pagesize - 1;
02930   bool            tried_mmap = false;
02931 
02932 
02933 #if HAVE_MMAP
02934 
02935   /*
02936     If have mmap, and the request size meets the mmap threshold, and
02937     the system supports mmap, and there are few enough currently
02938     allocated mmapped regions, try to directly map this request
02939     rather than expanding top.
02940   */
02941 
02942   if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
02943       (mp_.n_mmaps < mp_.n_mmaps_max)) {
02944 
02945     char* mm;             /* return value from mmap call*/
02946 
02947   try_mmap:
02948     /*
02949       Round up size to nearest page.  For mmapped chunks, the overhead
02950       is one SIZE_SZ unit larger than for normal chunks, because there
02951       is no following chunk whose prev_size field could be used.
02952     */
02953 #if 1
02954     /* See the front_misalign handling below, for glibc there is no
02955        need for further alignments.  */
02956     size = (nb + SIZE_SZ + pagemask) & ~pagemask;
02957 #else
02958     size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
02959 #endif
02960     tried_mmap = true;
02961 
02962     /* Don't try if size wraps around 0 */
02963     if ((unsigned long)(size) > (unsigned long)(nb)) {
02964 
02965       mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
02966 
02967       if (mm != MAP_FAILED) {
02968 
02969         /*
02970           The offset to the start of the mmapped region is stored
02971           in the prev_size field of the chunk. This allows us to adjust
02972           returned start address to meet alignment requirements here
02973           and in memalign(), and still be able to compute proper
02974           address argument for later munmap in free() and realloc().
02975         */
02976 
02977 #if 1
02978        /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
02979           MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
02980           aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
02981         assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
02982 #else
02983         front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
02984         if (front_misalign > 0) {
02985           correction = MALLOC_ALIGNMENT - front_misalign;
02986           p = (mchunkptr)(mm + correction);
02987           p->prev_size = correction;
02988           set_head(p, (size - correction) |IS_MMAPPED);
02989         }
02990         else
02991 #endif
02992          {
02993            p = (mchunkptr)mm;
02994            set_head(p, size|IS_MMAPPED);
02995          }
02996 
02997         /* update statistics */
02998 
02999         if (++mp_.n_mmaps > mp_.max_n_mmaps)
03000           mp_.max_n_mmaps = mp_.n_mmaps;
03001 
03002         sum = mp_.mmapped_mem += size;
03003         if (sum > (unsigned long)(mp_.max_mmapped_mem))
03004           mp_.max_mmapped_mem = sum;
03005 #ifdef NO_THREADS
03006         sum += av->system_mem;
03007         if (sum > (unsigned long)(mp_.max_total_mem))
03008           mp_.max_total_mem = sum;
03009 #endif
03010 
03011         check_chunk(av, p);
03012 
03013         return chunk2mem(p);
03014       }
03015     }
03016   }
03017 #endif
03018 
03019   /* Record incoming configuration of top */
03020 
03021   old_top  = av->top;
03022   old_size = chunksize(old_top);
03023   old_end  = (char*)(chunk_at_offset(old_top, old_size));
03024 
03025   brk = snd_brk = (char*)(MORECORE_FAILURE);
03026 
03027   /*
03028      If not the first time through, we require old_size to be
03029      at least MINSIZE and to have prev_inuse set.
03030   */
03031 
03032   assert((old_top == initial_top(av) && old_size == 0) ||
03033          ((unsigned long) (old_size) >= MINSIZE &&
03034           prev_inuse(old_top) &&
03035          ((unsigned long)old_end & pagemask) == 0));
03036 
03037   /* Precondition: not enough current space to satisfy nb request */
03038   assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
03039 
03040   /* Precondition: all fastbins are consolidated */
03041   assert(!have_fastchunks(av));
03042 
03043 
03044   if (av != &main_arena) {
03045 
03046     heap_info *old_heap, *heap;
03047     size_t old_heap_size;
03048 
03049     /* First try to extend the current heap. */
03050     old_heap = heap_for_ptr(old_top);
03051     old_heap_size = old_heap->size;
03052     if ((long) (MINSIZE + nb - old_size) > 0
03053        && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
03054       av->system_mem += old_heap->size - old_heap_size;
03055       arena_mem += old_heap->size - old_heap_size;
03056 #if 0
03057       if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
03058         max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
03059 #endif
03060       set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
03061               | PREV_INUSE);
03062     }
03063     else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
03064       /* Use a newly allocated heap.  */
03065       heap->ar_ptr = av;
03066       heap->prev = old_heap;
03067       av->system_mem += heap->size;
03068       arena_mem += heap->size;
03069 #if 0
03070       if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
03071        max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
03072 #endif
03073       /* Set up the new top.  */
03074       top(av) = chunk_at_offset(heap, sizeof(*heap));
03075       set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
03076 
03077       /* Setup fencepost and free the old top chunk. */
03078       /* The fencepost takes at least MINSIZE bytes, because it might
03079         become the top chunk again later.  Note that a footer is set
03080         up, too, although the chunk is marked in use. */
03081       old_size -= MINSIZE;
03082       set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
03083       if (old_size >= MINSIZE) {
03084        set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
03085        set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
03086        set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
03087        _int_free(av, chunk2mem(old_top));
03088       } else {
03089        set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
03090        set_foot(old_top, (old_size + 2*SIZE_SZ));
03091       }
03092     }
03093     else if (!tried_mmap)
03094       /* We can at least try to use to mmap memory.  */
03095       goto try_mmap;
03096 
03097   } else { /* av == main_arena */
03098 
03099 
03100   /* Request enough space for nb + pad + overhead */
03101 
03102   size = nb + mp_.top_pad + MINSIZE;
03103 
03104   /*
03105     If contiguous, we can subtract out existing space that we hope to
03106     combine with new space. We add it back later only if
03107     we don't actually get contiguous space.
03108   */
03109 
03110   if (contiguous(av))
03111     size -= old_size;
03112 
03113   /*
03114     Round to a multiple of page size.
03115     If MORECORE is not contiguous, this ensures that we only call it
03116     with whole-page arguments.  And if MORECORE is contiguous and
03117     this is not first time through, this preserves page-alignment of
03118     previous calls. Otherwise, we correct to page-align below.
03119   */
03120 
03121   size = (size + pagemask) & ~pagemask;
03122 
03123   /*
03124     Don't try to call MORECORE if argument is so big as to appear
03125     negative. Note that since mmap takes size_t arg, it may succeed
03126     below even if we cannot call MORECORE.
03127   */
03128 
03129   if (size > 0)
03130     brk = (char*)(MORECORE(size));
03131 
03132   if (brk != (char*)(MORECORE_FAILURE)) {
03133     /* Call the `morecore' hook if necessary.  */
03134     if (__after_morecore_hook)
03135       (*__after_morecore_hook) ();
03136   } else {
03137   /*
03138     If have mmap, try using it as a backup when MORECORE fails or
03139     cannot be used. This is worth doing on systems that have "holes" in
03140     address space, so sbrk cannot extend to give contiguous space, but
03141     space is available elsewhere.  Note that we ignore mmap max count
03142     and threshold limits, since the space will not be used as a
03143     segregated mmap region.
03144   */
03145 
03146 #if HAVE_MMAP
03147     /* Cannot merge with old top, so add its size back in */
03148     if (contiguous(av))
03149       size = (size + old_size + pagemask) & ~pagemask;
03150 
03151     /* If we are relying on mmap as backup, then use larger units */
03152     if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
03153       size = MMAP_AS_MORECORE_SIZE;
03154 
03155     /* Don't try if size wraps around 0 */
03156     if ((unsigned long)(size) > (unsigned long)(nb)) {
03157 
03158       char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
03159 
03160       if (mbrk != MAP_FAILED) {
03161 
03162         /* We do not need, and cannot use, another sbrk call to find end */
03163         brk = mbrk;
03164         snd_brk = brk + size;
03165 
03166         /*
03167            Record that we no longer have a contiguous sbrk region.
03168            After the first time mmap is used as backup, we do not
03169            ever rely on contiguous space since this could incorrectly
03170            bridge regions.
03171         */
03172         set_noncontiguous(av);
03173       }
03174     }
03175 #endif
03176   }
03177 
03178   if (brk != (char*)(MORECORE_FAILURE)) {
03179     if (mp_.sbrk_base == 0)
03180       mp_.sbrk_base = brk;
03181     av->system_mem += size;
03182 
03183     /*
03184       If MORECORE extends previous space, we can likewise extend top size.
03185     */
03186 
03187     if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
03188       set_head(old_top, (size + old_size) | PREV_INUSE);
03189 
03190     else if (contiguous(av) && old_size && brk < old_end) {
03191       /* Oops!  Someone else killed our space..  Can't touch anything.  */
03192       assert(0);
03193     }
03194 
03195     /*
03196       Otherwise, make adjustments:
03197 
03198       * If the first time through or noncontiguous, we need to call sbrk
03199         just to find out where the end of memory lies.
03200 
03201       * We need to ensure that all returned chunks from malloc will meet
03202         MALLOC_ALIGNMENT
03203 
03204       * If there was an intervening foreign sbrk, we need to adjust sbrk
03205         request size to account for fact that we will not be able to
03206         combine new space with existing space in old_top.
03207 
03208       * Almost all systems internally allocate whole pages at a time, in
03209         which case we might as well use the whole last page of request.
03210         So we allocate enough more memory to hit a page boundary now,
03211         which in turn causes future contiguous calls to page-align.
03212     */
03213 
03214     else {
03215       front_misalign = 0;
03216       end_misalign = 0;
03217       correction = 0;
03218       aligned_brk = brk;
03219 
03220       /* handle contiguous cases */
03221       if (contiguous(av)) {
03222 
03223        /* Count foreign sbrk as system_mem.  */
03224        if (old_size)
03225          av->system_mem += brk - old_end;
03226 
03227         /* Guarantee alignment of first new chunk made from this space */
03228 
03229         front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
03230         if (front_misalign > 0) {
03231 
03232           /*
03233             Skip over some bytes to arrive at an aligned position.
03234             We don't need to specially mark these wasted front bytes.
03235             They will never be accessed anyway because
03236             prev_inuse of av->top (and any chunk created from its start)
03237             is always true after initialization.
03238           */
03239 
03240           correction = MALLOC_ALIGNMENT - front_misalign;
03241           aligned_brk += correction;
03242         }
03243 
03244         /*
03245           If this isn't adjacent to existing space, then we will not
03246           be able to merge with old_top space, so must add to 2nd request.
03247         */
03248 
03249         correction += old_size;
03250 
03251         /* Extend the end address to hit a page boundary */
03252         end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
03253         correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
03254 
03255         assert(correction >= 0);
03256         snd_brk = (char*)(MORECORE(correction));
03257 
03258         /*
03259           If can't allocate correction, try to at least find out current
03260           brk.  It might be enough to proceed without failing.
03261 
03262           Note that if second sbrk did NOT fail, we assume that space
03263           is contiguous with first sbrk. This is a safe assumption unless
03264           program is multithreaded but doesn't use locks and a foreign sbrk
03265           occurred between our first and second calls.
03266         */
03267 
03268         if (snd_brk == (char*)(MORECORE_FAILURE)) {
03269           correction = 0;
03270           snd_brk = (char*)(MORECORE(0));
03271         } else
03272          /* Call the `morecore' hook if necessary.  */
03273          if (__after_morecore_hook)
03274            (*__after_morecore_hook) ();
03275       }
03276 
03277       /* handle non-contiguous cases */
03278       else {
03279         /* MORECORE/mmap must correctly align */
03280         assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
03281 
03282         /* Find out current end of memory */
03283         if (snd_brk == (char*)(MORECORE_FAILURE)) {
03284           snd_brk = (char*)(MORECORE(0));
03285         }
03286       }
03287 
03288       /* Adjust top based on results of second sbrk */
03289       if (snd_brk != (char*)(MORECORE_FAILURE)) {
03290         av->top = (mchunkptr)aligned_brk;
03291         set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
03292         av->system_mem += correction;
03293 
03294         /*
03295           If not the first time through, we either have a
03296           gap due to foreign sbrk or a non-contiguous region.  Insert a
03297           double fencepost at old_top to prevent consolidation with space
03298           we don't own. These fenceposts are artificial chunks that are
03299           marked as inuse and are in any case too small to use.  We need
03300           two to make sizes and alignments work out.
03301         */
03302 
03303         if (old_size != 0) {
03304           /*
03305              Shrink old_top to insert fenceposts, keeping size a
03306              multiple of MALLOC_ALIGNMENT. We know there is at least
03307              enough space in old_top to do this.
03308           */
03309           old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
03310           set_head(old_top, old_size | PREV_INUSE);
03311 
03312           /*
03313             Note that the following assignments completely overwrite
03314             old_top when old_size was previously MINSIZE.  This is
03315             intentional. We need the fencepost, even if old_top otherwise gets
03316             lost.
03317           */
03318           chunk_at_offset(old_top, old_size            )->size =
03319             (2*SIZE_SZ)|PREV_INUSE;
03320 
03321           chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
03322             (2*SIZE_SZ)|PREV_INUSE;
03323 
03324           /* If possible, release the rest. */
03325           if (old_size >= MINSIZE) {
03326             _int_free(av, chunk2mem(old_top));
03327           }
03328 
03329         }
03330       }
03331     }
03332 
03333     /* Update statistics */
03334 #ifdef NO_THREADS
03335     sum = av->system_mem + mp_.mmapped_mem;
03336     if (sum > (unsigned long)(mp_.max_total_mem))
03337       mp_.max_total_mem = sum;
03338 #endif
03339 
03340   }
03341 
03342   } /* if (av !=  &main_arena) */
03343 
03344   if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
03345     av->max_system_mem = av->system_mem;
03346   check_malloc_state(av);
03347 
03348   /* finally, do the allocation */
03349   p = av->top;
03350   size = chunksize(p);
03351 
03352   /* check that one of the above allocation paths succeeded */
03353   if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
03354     remainder_size = size - nb;
03355     remainder = chunk_at_offset(p, nb);
03356     av->top = remainder;
03357     set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
03358     set_head(remainder, remainder_size | PREV_INUSE);
03359     check_malloced_chunk(av, p, nb);
03360     return chunk2mem(p);
03361   }
03362 
03363   /* catch all failure paths */
03364   MALLOC_FAILURE_ACTION;
03365   return 0;
03366 }
03367 
03368 
03369 /*
03370   sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back
03371   to the system (via negative arguments to sbrk) if there is unused
03372   memory at the `high' end of the malloc pool. It is called
03373   automatically by free() when top space exceeds the trim
03374   threshold. It is also called by the public malloc_trim routine.  It
03375   returns 1 if it actually released any memory, else 0.
03376 */
03377 
03378 #if __STD_C
03379 static int sYSTRIm(size_t pad, mstate av)
03380 #else
03381 static int sYSTRIm(pad, av) size_t pad; mstate av;
03382 #endif
03383 {
03384   long  top_size;        /* Amount of top-most memory */
03385   long  extra;           /* Amount to release */
03386   long  released;        /* Amount actually released */
03387   char* current_brk;     /* address returned by pre-check sbrk call */
03388   char* new_brk;         /* address returned by post-check sbrk call */
03389   size_t pagesz;
03390 
03391   pagesz = mp_.pagesize;
03392   top_size = chunksize(av->top);
03393 
03394   /* Release in pagesize units, keeping at least one page */
03395   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
03396 
03397   if (extra > 0) {
03398 
03399     /*
03400       Only proceed if end of memory is where we last set it.
03401       This avoids problems if there were foreign sbrk calls.
03402     */
03403     current_brk = (char*)(MORECORE(0));
03404     if (current_brk == (char*)(av->top) + top_size) {
03405 
03406       /*
03407         Attempt to release memory. We ignore MORECORE return value,
03408         and instead call again to find out where new end of memory is.
03409         This avoids problems if first call releases less than we asked,
03410         of if failure somehow altered brk value. (We could still
03411         encounter problems if it altered brk in some very bad way,
03412         but the only thing we can do is adjust anyway, which will cause
03413         some downstream failure.)
03414       */
03415 
03416       MORECORE(-extra);
03417       /* Call the `morecore' hook if necessary.  */
03418       if (__after_morecore_hook)
03419        (*__after_morecore_hook) ();
03420       new_brk = (char*)(MORECORE(0));
03421 
03422       if (new_brk != (char*)MORECORE_FAILURE) {
03423         released = (long)(current_brk - new_brk);
03424 
03425         if (released != 0) {
03426           /* Success. Adjust top. */
03427           av->system_mem -= released;
03428           set_head(av->top, (top_size - released) | PREV_INUSE);
03429           check_malloc_state(av);
03430           return 1;
03431         }
03432       }
03433     }
03434   }
03435   return 0;
03436 }
03437 
03438 #ifdef HAVE_MMAP
03439 
03440 static void
03441 internal_function
03442 #if __STD_C
03443 munmap_chunk(mchunkptr p)
03444 #else
03445 munmap_chunk(p) mchunkptr p;
03446 #endif
03447 {
03448   INTERNAL_SIZE_T size = chunksize(p);
03449 
03450   assert (chunk_is_mmapped(p));
03451 #if 0
03452   assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
03453   assert((mp_.n_mmaps > 0));
03454 #endif
03455 
03456   uintptr_t block = (uintptr_t) p - p->prev_size;
03457   size_t total_size = p->prev_size + size;
03458   /* Unfortunately we have to do the compilers job by hand here.  Normally
03459      we would test BLOCK and TOTAL-SIZE separately for compliance with the
03460      page size.  But gcc does not recognize the optimization possibility
03461      (in the moment at least) so we combine the two values into one before
03462      the bit test.  */
03463   if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
03464     {
03465       malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
03466                      chunk2mem (p));
03467       return;
03468     }
03469 
03470   mp_.n_mmaps--;
03471   mp_.mmapped_mem -= total_size;
03472 
03473   int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
03474 
03475   /* munmap returns non-zero on failure */
03476   assert(ret == 0);
03477 }
03478 
03479 #if HAVE_MREMAP
03480 
03481 static mchunkptr
03482 internal_function
03483 #if __STD_C
03484 mremap_chunk(mchunkptr p, size_t new_size)
03485 #else
03486 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
03487 #endif
03488 {
03489   size_t page_mask = mp_.pagesize - 1;
03490   INTERNAL_SIZE_T offset = p->prev_size;
03491   INTERNAL_SIZE_T size = chunksize(p);
03492   char *cp;
03493 
03494   assert (chunk_is_mmapped(p));
03495 #if 0
03496   assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
03497   assert((mp_.n_mmaps > 0));
03498 #endif
03499   assert(((size + offset) & (mp_.pagesize-1)) == 0);
03500 
03501   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
03502   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
03503 
03504   /* No need to remap if the number of pages does not change.  */
03505   if (size + offset == new_size)
03506     return p;
03507 
03508   cp = (char *)mremap((char *)p - offset, size + offset, new_size,
03509                       MREMAP_MAYMOVE);
03510 
03511   if (cp == MAP_FAILED) return 0;
03512 
03513   p = (mchunkptr)(cp + offset);
03514 
03515   assert(aligned_OK(chunk2mem(p)));
03516 
03517   assert((p->prev_size == offset));
03518   set_head(p, (new_size - offset)|IS_MMAPPED);
03519 
03520   mp_.mmapped_mem -= size + offset;
03521   mp_.mmapped_mem += new_size;
03522   if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
03523     mp_.max_mmapped_mem = mp_.mmapped_mem;
03524 #ifdef NO_THREADS
03525   if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
03526       mp_.max_total_mem)
03527     mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
03528 #endif
03529   return p;
03530 }
03531 
03532 #endif /* HAVE_MREMAP */
03533 
03534 #endif /* HAVE_MMAP */
03535 
03536 /*------------------------ Public wrappers. --------------------------------*/
03537 
03538 Void_t*
03539 public_mALLOc(size_t bytes)
03540 {
03541   mstate ar_ptr;
03542   Void_t *victim;
03543 
03544   __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
03545   if (hook != NULL)
03546     return (*hook)(bytes, RETURN_ADDRESS (0));
03547 
03548   arena_get(ar_ptr, bytes);
03549   if(!ar_ptr)
03550     return 0;
03551   victim = _int_malloc(ar_ptr, bytes);
03552   if(!victim) {
03553     /* Maybe the failure is due to running out of mmapped areas. */
03554     if(ar_ptr != &main_arena) {
03555       (void)mutex_unlock(&ar_ptr->mutex);
03556       ar_ptr = &main_arena;
03557       (void)mutex_lock(&ar_ptr->mutex);
03558       victim = _int_malloc(ar_ptr, bytes);
03559       (void)mutex_unlock(&ar_ptr->mutex);
03560     } else {
03561 #if USE_ARENAS
03562       /* ... or sbrk() has failed and there is still a chance to mmap() */
03563       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
03564       (void)mutex_unlock(&main_arena.mutex);
03565       if(ar_ptr) {
03566         victim = _int_malloc(ar_ptr, bytes);
03567         (void)mutex_unlock(&ar_ptr->mutex);
03568       }
03569 #endif
03570     }
03571   } else
03572     (void)mutex_unlock(&ar_ptr->mutex);
03573   assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
03574         ar_ptr == arena_for_chunk(mem2chunk(victim)));
03575   return victim;
03576 }
03577 #ifdef libc_hidden_def
03578 libc_hidden_def(public_mALLOc)
03579 #endif
03580 
03581 void
03582 public_fREe(Void_t* mem)
03583 {
03584   mstate ar_ptr;
03585   mchunkptr p;                          /* chunk corresponding to mem */
03586 
03587   void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
03588   if (hook != NULL) {
03589     (*hook)(mem, RETURN_ADDRESS (0));
03590     return;
03591   }
03592 
03593   if (mem == 0)                              /* free(0) has no effect */
03594     return;
03595 
03596   p = mem2chunk(mem);
03597 
03598 #if HAVE_MMAP
03599   if (chunk_is_mmapped(p))                       /* release mmapped memory. */
03600   {
03601     /* see if the dynamic brk/mmap threshold needs adjusting */
03602     if (!mp_.no_dyn_threshold
03603        && p->size > mp_.mmap_threshold
03604         && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
03605       {
03606        mp_.mmap_threshold = chunksize (p);
03607        mp_.trim_threshold = 2 * mp_.mmap_threshold;
03608       }
03609     munmap_chunk(p);
03610     return;
03611   }
03612 #endif
03613 
03614   ar_ptr = arena_for_chunk(p);
03615 #if THREAD_STATS
03616   if(!mutex_trylock(&ar_ptr->mutex))
03617     ++(ar_ptr->stat_lock_direct);
03618   else {
03619     (void)mutex_lock(&ar_ptr->mutex);
03620     ++(ar_ptr->stat_lock_wait);
03621   }
03622 #else
03623   (void)mutex_lock(&ar_ptr->mutex);
03624 #endif
03625   _int_free(ar_ptr, mem);
03626   (void)mutex_unlock(&ar_ptr->mutex);
03627 }
03628 #ifdef libc_hidden_def
03629 libc_hidden_def (public_fREe)
03630 #endif
03631 
03632 Void_t*
03633 public_rEALLOc(Void_t* oldmem, size_t bytes)
03634 {
03635   mstate ar_ptr;
03636   INTERNAL_SIZE_T    nb;      /* padded request size */
03637 
03638   mchunkptr oldp;             /* chunk corresponding to oldmem */
03639   INTERNAL_SIZE_T    oldsize; /* its size */
03640 
03641   Void_t* newp;             /* chunk to return */
03642 
03643   __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
03644     __realloc_hook;
03645   if (hook != NULL)
03646     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
03647 
03648 #if REALLOC_ZERO_BYTES_FREES
03649   if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
03650 #endif
03651 
03652   /* realloc of null is supposed to be same as malloc */
03653   if (oldmem == 0) return public_mALLOc(bytes);
03654 
03655   oldp    = mem2chunk(oldmem);
03656   oldsize = chunksize(oldp);
03657 
03658   /* Little security check which won't hurt performance: the
03659      allocator never wrapps around at the end of the address space.
03660      Therefore we can exclude some size values which might appear
03661      here by accident or by "design" from some intruder.  */
03662   if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
03663       || __builtin_expect (misaligned_chunk (oldp), 0))
03664     {
03665       malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
03666       return NULL;
03667     }
03668 
03669   checked_request2size(bytes, nb);
03670 
03671 #if HAVE_MMAP
03672   if (chunk_is_mmapped(oldp))
03673   {
03674     Void_t* newmem;
03675 
03676 #if HAVE_MREMAP
03677     newp = mremap_chunk(oldp, nb);
03678     if(newp) return chunk2mem(newp);
03679 #endif
03680     /* Note the extra SIZE_SZ overhead. */
03681     if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
03682     /* Must alloc, copy, free. */
03683     newmem = public_mALLOc(bytes);
03684     if (newmem == 0) return 0; /* propagate failure */
03685     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
03686     munmap_chunk(oldp);
03687     return newmem;
03688   }
03689 #endif
03690 
03691   ar_ptr = arena_for_chunk(oldp);
03692 #if THREAD_STATS
03693   if(!mutex_trylock(&ar_ptr->mutex))
03694     ++(ar_ptr->stat_lock_direct);
03695   else {
03696     (void)mutex_lock(&ar_ptr->mutex);
03697     ++(ar_ptr->stat_lock_wait);
03698   }
03699 #else
03700   (void)mutex_lock(&ar_ptr->mutex);
03701 #endif
03702 
03703 #ifndef NO_THREADS
03704   /* As in malloc(), remember this arena for the next allocation. */
03705   tsd_setspecific(arena_key, (Void_t *)ar_ptr);
03706 #endif
03707 
03708   newp = _int_realloc(ar_ptr, oldmem, bytes);
03709 
03710   (void)mutex_unlock(&ar_ptr->mutex);
03711   assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
03712         ar_ptr == arena_for_chunk(mem2chunk(newp)));
03713 
03714   if (newp == NULL)
03715     {
03716       /* Try harder to allocate memory in other arenas.  */
03717       newp = public_mALLOc(bytes);
03718       if (newp != NULL)
03719        {
03720          MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
03721 #if THREAD_STATS
03722          if(!mutex_trylock(&ar_ptr->mutex))
03723            ++(ar_ptr->stat_lock_direct);
03724          else {
03725            (void)mutex_lock(&ar_ptr->mutex);
03726            ++(ar_ptr->stat_lock_wait);
03727          }
03728 #else
03729          (void)mutex_lock(&ar_ptr->mutex);
03730 #endif
03731          _int_free(ar_ptr, oldmem);
03732          (void)mutex_unlock(&ar_ptr->mutex);
03733        }
03734     }
03735 
03736   return newp;
03737 }
03738 #ifdef libc_hidden_def
03739 libc_hidden_def (public_rEALLOc)
03740 #endif
03741 
03742 Void_t*
03743 public_mEMALIGn(size_t alignment, size_t bytes)
03744 {
03745   mstate ar_ptr;
03746   Void_t *p;
03747 
03748   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
03749                                    __const __malloc_ptr_t)) =
03750     __memalign_hook;
03751   if (hook != NULL)
03752     return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
03753 
03754   /* If need less alignment than we give anyway, just relay to malloc */
03755   if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
03756 
03757   /* Otherwise, ensure that it is at least a minimum chunk size */
03758   if (alignment <  MINSIZE) alignment = MINSIZE;
03759 
03760   arena_get(ar_ptr, bytes + alignment + MINSIZE);
03761   if(!ar_ptr)
03762     return 0;
03763   p = _int_memalign(ar_ptr, alignment, bytes);
03764   if(!p) {
03765     /* Maybe the failure is due to running out of mmapped areas. */
03766     if(ar_ptr != &main_arena) {
03767       (void)mutex_unlock(&ar_ptr->mutex);
03768       ar_ptr = &main_arena;
03769       (void)mutex_lock(&ar_ptr->mutex);
03770       p = _int_memalign(ar_ptr, alignment, bytes);
03771       (void)mutex_unlock(&ar_ptr->mutex);
03772     } else {
03773 #if USE_ARENAS
03774       /* ... or sbrk() has failed and there is still a chance to mmap() */
03775       mstate prev = ar_ptr->next ? ar_ptr : 0;
03776       (void)mutex_unlock(&ar_ptr->mutex);
03777       ar_ptr = arena_get2(prev, bytes);
03778       if(ar_ptr) {
03779         p = _int_memalign(ar_ptr, alignment, bytes);
03780         (void)mutex_unlock(&ar_ptr->mutex);
03781       }
03782 #endif
03783     }
03784   } else
03785     (void)mutex_unlock(&ar_ptr->mutex);
03786   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
03787         ar_ptr == arena_for_chunk(mem2chunk(p)));
03788   return p;
03789 }
03790 #ifdef libc_hidden_def
03791 libc_hidden_def (public_mEMALIGn)
03792 #endif
03793 
03794 Void_t*
03795 public_vALLOc(size_t bytes)
03796 {
03797   mstate ar_ptr;
03798   Void_t *p;
03799 
03800   if(__malloc_initialized < 0)
03801     ptmalloc_init ();
03802 
03803   size_t pagesz = mp_.pagesize;
03804 
03805   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
03806                                    __const __malloc_ptr_t)) =
03807     __memalign_hook;
03808   if (hook != NULL)
03809     return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
03810 
03811   arena_get(ar_ptr, bytes + pagesz + MINSIZE);
03812   if(!ar_ptr)
03813     return 0;
03814   p = _int_valloc(ar_ptr, bytes);
03815   (void)mutex_unlock(&ar_ptr->mutex);
03816   if(!p) {
03817     /* Maybe the failure is due to running out of mmapped areas. */
03818     if(ar_ptr != &main_arena) {
03819       (void)mutex_lock(&main_arena.mutex);
03820       p = _int_memalign(&main_arena, pagesz, bytes);
03821       (void)mutex_unlock(&main_arena.mutex);
03822     } else {
03823 #if USE_ARENAS
03824       /* ... or sbrk() has failed and there is still a chance to mmap() */
03825       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
03826       if(ar_ptr) {
03827         p = _int_memalign(ar_ptr, pagesz, bytes);
03828         (void)mutex_unlock(&ar_ptr->mutex);
03829       }
03830 #endif
03831     }
03832   }
03833   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
03834         ar_ptr == arena_for_chunk(mem2chunk(p)));
03835 
03836   return p;
03837 }
03838 
03839 Void_t*
03840 public_pVALLOc(size_t bytes)
03841 {
03842   mstate ar_ptr;
03843   Void_t *p;
03844 
03845   if(__malloc_initialized < 0)
03846     ptmalloc_init ();
03847 
03848   size_t pagesz = mp_.pagesize;
03849   size_t page_mask = mp_.pagesize - 1;
03850   size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
03851 
03852   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
03853                                    __const __malloc_ptr_t)) =
03854     __memalign_hook;
03855   if (hook != NULL)
03856     return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
03857 
03858   arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
03859   p = _int_pvalloc(ar_ptr, bytes);
03860   (void)mutex_unlock(&ar_ptr->mutex);
03861   if(!p) {
03862     /* Maybe the failure is due to running out of mmapped areas. */
03863     if(ar_ptr != &main_arena) {
03864       (void)mutex_lock(&main_arena.mutex);
03865       p = _int_memalign(&main_arena, pagesz, rounded_bytes);
03866       (void)mutex_unlock(&main_arena.mutex);
03867     } else {
03868 #if USE_ARENAS
03869       /* ... or sbrk() has failed and there is still a chance to mmap() */
03870       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
03871                        bytes + 2*pagesz + MINSIZE);
03872       if(ar_ptr) {
03873         p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
03874         (void)mutex_unlock(&ar_ptr->mutex);
03875       }
03876 #endif
03877     }
03878   }
03879   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
03880         ar_ptr == arena_for_chunk(mem2chunk(p)));
03881 
03882   return p;
03883 }
03884 
03885 Void_t*
03886 public_cALLOc(size_t n, size_t elem_size)
03887 {
03888   mstate av;
03889   mchunkptr oldtop, p;
03890   INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
03891   Void_t* mem;
03892   unsigned long clearsize;
03893   unsigned long nclears;
03894   INTERNAL_SIZE_T* d;
03895   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
03896     __malloc_hook;
03897 
03898   /* size_t is unsigned so the behavior on overflow is defined.  */
03899   bytes = n * elem_size;
03900 #define HALF_INTERNAL_SIZE_T \
03901   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
03902   if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
03903     if (elem_size != 0 && bytes / elem_size != n) {
03904       MALLOC_FAILURE_ACTION;
03905       return 0;
03906     }
03907   }
03908 
03909   if (hook != NULL) {
03910     sz = bytes;
03911     mem = (*hook)(sz, RETURN_ADDRESS (0));
03912     if(mem == 0)
03913       return 0;
03914 #ifdef HAVE_MEMCPY
03915     return memset(mem, 0, sz);
03916 #else
03917     while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
03918     return mem;
03919 #endif
03920   }
03921 
03922   sz = bytes;
03923 
03924   arena_get(av, sz);
03925   if(!av)
03926     return 0;
03927 
03928   /* Check if we hand out the top chunk, in which case there may be no
03929      need to clear. */
03930 #if MORECORE_CLEARS
03931   oldtop = top(av);
03932   oldtopsize = chunksize(top(av));
03933 #if MORECORE_CLEARS < 2
03934   /* Only newly allocated memory is guaranteed to be cleared.  */
03935   if (av == &main_arena &&
03936       oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
03937     oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
03938 #endif
03939   if (av != &main_arena)
03940     {
03941       heap_info *heap = heap_for_ptr (oldtop);
03942       if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
03943        oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
03944     }
03945 #endif
03946   mem = _int_malloc(av, sz);
03947 
03948   /* Only clearing follows, so we can unlock early. */
03949   (void)mutex_unlock(&av->mutex);
03950 
03951   assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
03952         av == arena_for_chunk(mem2chunk(mem)));
03953 
03954   if (mem == 0) {
03955     /* Maybe the failure is due to running out of mmapped areas. */
03956     if(av != &main_arena) {
03957       (void)mutex_lock(&main_arena.mutex);
03958       mem = _int_malloc(&main_arena, sz);
03959       (void)mutex_unlock(&main_arena.mutex);
03960     } else {
03961 #if USE_ARENAS
03962       /* ... or sbrk() has failed and there is still a chance to mmap() */
03963       (void)mutex_lock(&main_arena.mutex);
03964       av = arena_get2(av->next ? av : 0, sz);
03965       (void)mutex_unlock(&main_arena.mutex);
03966       if(av) {
03967         mem = _int_malloc(av, sz);
03968         (void)mutex_unlock(&av->mutex);
03969       }
03970 #endif
03971     }
03972     if (mem == 0) return 0;
03973   }
03974   p = mem2chunk(mem);
03975 
03976   /* Two optional cases in which clearing not necessary */
03977 #if HAVE_MMAP
03978   if (chunk_is_mmapped (p))
03979     {
03980       if (__builtin_expect (perturb_byte, 0))
03981        MALLOC_ZERO (mem, sz);
03982       return mem;
03983     }
03984 #endif
03985 
03986   csz = chunksize(p);
03987 
03988 #if MORECORE_CLEARS
03989   if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
03990     /* clear only the bytes from non-freshly-sbrked memory */
03991     csz = oldtopsize;
03992   }
03993 #endif
03994 
03995   /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
03996      contents have an odd number of INTERNAL_SIZE_T-sized words;
03997      minimally 3.  */
03998   d = (INTERNAL_SIZE_T*)mem;
03999   clearsize = csz - SIZE_SZ;
04000   nclears = clearsize / sizeof(INTERNAL_SIZE_T);
04001   assert(nclears >= 3);
04002 
04003   if (nclears > 9)
04004     MALLOC_ZERO(d, clearsize);
04005 
04006   else {
04007     *(d+0) = 0;
04008     *(d+1) = 0;
04009     *(d+2) = 0;
04010     if (nclears > 4) {
04011       *(d+3) = 0;
04012       *(d+4) = 0;
04013       if (nclears > 6) {
04014        *(d+5) = 0;
04015        *(d+6) = 0;
04016        if (nclears > 8) {
04017          *(d+7) = 0;
04018          *(d+8) = 0;
04019        }
04020       }
04021     }
04022   }
04023 
04024   return mem;
04025 }
04026 
04027 #ifndef _LIBC
04028 
04029 Void_t**
04030 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
04031 {
04032   mstate ar_ptr;
04033   Void_t** m;
04034 
04035   arena_get(ar_ptr, n*elem_size);
04036   if(!ar_ptr)
04037     return 0;
04038 
04039   m = _int_icalloc(ar_ptr, n, elem_size, chunks);
04040   (void)mutex_unlock(&ar_ptr->mutex);
04041   return m;
04042 }
04043 
04044 Void_t**
04045 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
04046 {
04047   mstate ar_ptr;
04048   Void_t** m;
04049 
04050   arena_get(ar_ptr, 0);
04051   if(!ar_ptr)
04052     return 0;
04053 
04054   m = _int_icomalloc(ar_ptr, n, sizes, chunks);
04055   (void)mutex_unlock(&ar_ptr->mutex);
04056   return m;
04057 }
04058 
04059 void
04060 public_cFREe(Void_t* m)
04061 {
04062   public_fREe(m);
04063 }
04064 
04065 #endif /* _LIBC */
04066 
04067 int
04068 public_mTRIm(size_t s)
04069 {
04070   int result = 0;
04071 
04072   if(__malloc_initialized < 0)
04073     ptmalloc_init ();
04074 
04075   mstate ar_ptr = &main_arena;
04076   do
04077     {
04078       (void) mutex_lock (&ar_ptr->mutex);
04079       result |= mTRIm (ar_ptr, s);
04080       (void) mutex_unlock (&ar_ptr->mutex);
04081 
04082       ar_ptr = ar_ptr->next;
04083     }
04084   while (ar_ptr != &main_arena);
04085 
04086   return result;
04087 }
04088 
04089 size_t
04090 public_mUSABLe(Void_t* m)
04091 {
04092   size_t result;
04093 
04094   result = mUSABLe(m);
04095   return result;
04096 }
04097 
04098 void
04099 public_mSTATs()
04100 {
04101   mSTATs();
04102 }
04103 
04104 struct mallinfo public_mALLINFo()
04105 {
04106   struct mallinfo m;
04107 
04108   if(__malloc_initialized < 0)
04109     ptmalloc_init ();
04110   (void)mutex_lock(&main_arena.mutex);
04111   m = mALLINFo(&main_arena);
04112   (void)mutex_unlock(&main_arena.mutex);
04113   return m;
04114 }
04115 
04116 int
04117 public_mALLOPt(int p, int v)
04118 {
04119   int result;
04120   result = mALLOPt(p, v);
04121   return result;
04122 }
04123 
04124 /*
04125   ------------------------------ malloc ------------------------------
04126 */
04127 
04128 Void_t*
04129 _int_malloc(mstate av, size_t bytes)
04130 {
04131   INTERNAL_SIZE_T nb;               /* normalized request size */
04132   unsigned int    idx;              /* associated bin index */
04133   mbinptr         bin;              /* associated bin */
04134   mfastbinptr*    fb;               /* associated fastbin */
04135 
04136   mchunkptr       victim;           /* inspected/selected chunk */
04137   INTERNAL_SIZE_T size;             /* its size */
04138   int             victim_index;     /* its bin index */
04139 
04140   mchunkptr       remainder;        /* remainder from a split */
04141   unsigned long   remainder_size;   /* its size */
04142 
04143   unsigned int    block;            /* bit map traverser */
04144   unsigned int    bit;              /* bit map traverser */
04145   unsigned int    map;              /* current word of binmap */
04146 
04147   mchunkptr       fwd;              /* misc temp for linking */
04148   mchunkptr       bck;              /* misc temp for linking */
04149 
04150   /*
04151     Convert request size to internal form by adding SIZE_SZ bytes
04152     overhead plus possibly more to obtain necessary alignment and/or
04153     to obtain a size of at least MINSIZE, the smallest allocatable
04154     size. Also, checked_request2size traps (returning 0) request sizes
04155     that are so large that they wrap around zero when padded and
04156     aligned.
04157   */
04158 
04159   checked_request2size(bytes, nb);
04160 
04161   /*
04162     If the size qualifies as a fastbin, first check corresponding bin.
04163     This code is safe to execute even if av is not yet initialized, so we
04164     can try it without checking, which saves some time on this fast path.
04165   */
04166 
04167   if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
04168     long int idx = fastbin_index(nb);
04169     fb = &(av->fastbins[idx]);
04170     if ( (victim = *fb) != 0) {
04171       if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
04172        malloc_printerr (check_action, "malloc(): memory corruption (fast)",
04173                       chunk2mem (victim));
04174       *fb = victim->fd;
04175       check_remalloced_chunk(av, victim, nb);
04176       void *p = chunk2mem(victim);
04177       if (__builtin_expect (perturb_byte, 0))
04178        alloc_perturb (p, bytes);
04179       return p;
04180     }
04181   }
04182 
04183   /*
04184     If a small request, check regular bin.  Since these "smallbins"
04185     hold one size each, no searching within bins is necessary.
04186     (For a large request, we need to wait until unsorted chunks are
04187     processed to find best fit. But for small ones, fits are exact
04188     anyway, so we can check now, which is faster.)
04189   */
04190 
04191   if (in_smallbin_range(nb)) {
04192     idx = smallbin_index(nb);
04193     bin = bin_at(av,idx);
04194 
04195     if ( (victim = last(bin)) != bin) {
04196       if (victim == 0) /* initialization check */
04197         malloc_consolidate(av);
04198       else {
04199         bck = victim->bk;
04200         set_inuse_bit_at_offset(victim, nb);
04201         bin->bk = bck;
04202         bck->fd = bin;
04203 
04204         if (av != &main_arena)
04205          victim->size |= NON_MAIN_ARENA;
04206         check_malloced_chunk(av, victim, nb);
04207        void *p = chunk2mem(victim);
04208        if (__builtin_expect (perturb_byte, 0))
04209          alloc_perturb (p, bytes);
04210        return p;
04211       }
04212     }
04213   }
04214 
04215   /*
04216      If this is a large request, consolidate fastbins before continuing.
04217      While it might look excessive to kill all fastbins before
04218      even seeing if there is space available, this avoids
04219      fragmentation problems normally associated with fastbins.
04220      Also, in practice, programs tend to have runs of either small or
04221      large requests, but less often mixtures, so consolidation is not
04222      invoked all that often in most programs. And the programs that
04223      it is called frequently in otherwise tend to fragment.
04224   */
04225 
04226   else {
04227     idx = largebin_index(nb);
04228     if (have_fastchunks(av))
04229       malloc_consolidate(av);
04230   }
04231 
04232   /*
04233     Process recently freed or remaindered chunks, taking one only if
04234     it is exact fit, or, if this a small request, the chunk is remainder from
04235     the most recent non-exact fit.  Place other traversed chunks in
04236     bins.  Note that this step is the only place in any routine where
04237     chunks are placed in bins.
04238 
04239     The outer loop here is needed because we might not realize until
04240     near the end of malloc that we should have consolidated, so must
04241     do so and retry. This happens at most once, and only when we would
04242     otherwise need to expand memory to service a "small" request.
04243   */
04244 
04245   for(;;) {
04246 
04247     int iters = 0;
04248     while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
04249       bck = victim->bk;
04250       if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
04251          || __builtin_expect (victim->size > av->system_mem, 0))
04252        malloc_printerr (check_action, "malloc(): memory corruption",
04253                       chunk2mem (victim));
04254       size = chunksize(victim);
04255 
04256       /*
04257          If a small request, try to use last remainder if it is the
04258          only chunk in unsorted bin.  This helps promote locality for
04259          runs of consecutive small requests. This is the only
04260          exception to best-fit, and applies only when there is
04261          no exact fit for a small chunk.
04262       */
04263 
04264       if (in_smallbin_range(nb) &&
04265           bck == unsorted_chunks(av) &&
04266           victim == av->last_remainder &&
04267           (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
04268 
04269         /* split and reattach remainder */
04270         remainder_size = size - nb;
04271         remainder = chunk_at_offset(victim, nb);
04272         unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
04273         av->last_remainder = remainder;
04274         remainder->bk = remainder->fd = unsorted_chunks(av);
04275        if (!in_smallbin_range(remainder_size))
04276          {
04277            remainder->fd_nextsize = NULL;
04278            remainder->bk_nextsize = NULL;
04279          }
04280 
04281         set_head(victim, nb | PREV_INUSE |
04282                (av != &main_arena ? NON_MAIN_ARENA : 0));
04283         set_head(remainder, remainder_size | PREV_INUSE);
04284         set_foot(remainder, remainder_size);
04285 
04286         check_malloced_chunk(av, victim, nb);
04287        void *p = chunk2mem(victim);
04288        if (__builtin_expect (perturb_byte, 0))
04289          alloc_perturb (p, bytes);
04290        return p;
04291       }
04292 
04293       /* remove from unsorted list */
04294       unsorted_chunks(av)->bk = bck;
04295       bck->fd = unsorted_chunks(av);
04296 
04297       /* Take now instead of binning if exact fit */
04298 
04299       if (size == nb) {
04300         set_inuse_bit_at_offset(victim, size);
04301        if (av != &main_arena)
04302          victim->size |= NON_MAIN_ARENA;
04303         check_malloced_chunk(av, victim, nb);
04304        void *p = chunk2mem(victim);
04305        if (__builtin_expect (perturb_byte, 0))
04306          alloc_perturb (p, bytes);
04307        return p;
04308       }
04309 
04310       /* place chunk in bin */
04311 
04312       if (in_smallbin_range(size)) {
04313         victim_index = smallbin_index(size);
04314         bck = bin_at(av, victim_index);
04315         fwd = bck->fd;
04316       }
04317       else {
04318         victim_index = largebin_index(size);
04319         bck = bin_at(av, victim_index);
04320         fwd = bck->fd;
04321 
04322         /* maintain large bins in sorted order */
04323         if (fwd != bck) {
04324          /* Or with inuse bit to speed comparisons */
04325           size |= PREV_INUSE;
04326           /* if smaller than smallest, bypass loop below */
04327          assert((bck->bk->size & NON_MAIN_ARENA) == 0);
04328          if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
04329             fwd = bck;
04330             bck = bck->bk;
04331 
04332            victim->fd_nextsize = fwd->fd;
04333            victim->bk_nextsize = fwd->fd->bk_nextsize;
04334            fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
04335           }
04336           else {
04337            assert((fwd->size & NON_MAIN_ARENA) == 0);
04338            while ((unsigned long) size < fwd->size)
04339              {
04340               fwd = fwd->fd_nextsize;
04341               assert((fwd->size & NON_MAIN_ARENA) == 0);
04342              }
04343 
04344            if ((unsigned long) size == (unsigned long) fwd->size)
04345              /* Always insert in the second position.  */
04346              fwd = fwd->fd;
04347            else
04348              {
04349               victim->fd_nextsize = fwd;
04350               victim->bk_nextsize = fwd->bk_nextsize;
04351               fwd->bk_nextsize = victim;
04352               victim->bk_nextsize->fd_nextsize = victim;
04353              }
04354            bck = fwd->bk;
04355           }
04356        } else
04357          victim->fd_nextsize = victim->bk_nextsize = victim;
04358       }
04359 
04360       mark_bin(av, victim_index);
04361       victim->bk = bck;
04362       victim->fd = fwd;
04363       fwd->bk = victim;
04364       bck->fd = victim;
04365 
04366 #define MAX_ITERS    10000
04367       if (++iters >= MAX_ITERS)
04368        break;
04369     }
04370 
04371     /*
04372       If a large request, scan through the chunks of current bin in
04373       sorted order to find smallest that fits.  Use the skip list for this.
04374     */
04375 
04376     if (!in_smallbin_range(nb)) {
04377       bin = bin_at(av, idx);
04378 
04379       /* skip scan if empty or largest chunk is too small */
04380       if ((victim = first(bin)) != bin &&
04381           (unsigned long)(victim->size) >= (unsigned long)(nb)) {
04382 
04383        victim = victim->bk_nextsize;
04384         while (((unsigned long)(size = chunksize(victim)) <
04385                 (unsigned long)(nb)))
04386           victim = victim->bk_nextsize;
04387 
04388        /* Avoid removing the first entry for a size so that the skip
04389           list does not have to be rerouted.  */
04390        if (victim != last(bin) && victim->size == victim->fd->size)
04391          victim = victim->fd;
04392 
04393         remainder_size = size - nb;
04394         unlink(victim, bck, fwd);
04395 
04396         /* Exhaust */
04397         if (remainder_size < MINSIZE)  {
04398           set_inuse_bit_at_offset(victim, size);
04399          if (av != &main_arena)
04400            victim->size |= NON_MAIN_ARENA;
04401         }
04402         /* Split */
04403         else {
04404           remainder = chunk_at_offset(victim, nb);
04405           /* We cannot assume the unsorted list is empty and therefore
04406              have to perform a complete insert here.  */
04407          bck = unsorted_chunks(av);
04408          fwd = bck->fd;
04409          remainder->bk = bck;
04410          remainder->fd = fwd;
04411          bck->fd = remainder;
04412          fwd->bk = remainder;
04413          if (!in_smallbin_range(remainder_size))
04414            {
04415              remainder->fd_nextsize = NULL;
04416              remainder->bk_nextsize = NULL;
04417            }
04418           set_head(victim, nb | PREV_INUSE |
04419                  (av != &main_arena ? NON_MAIN_ARENA : 0));
04420           set_head(remainder, remainder_size | PREV_INUSE);
04421           set_foot(remainder, remainder_size);
04422         }
04423        check_malloced_chunk(av, victim, nb);
04424        void *p = chunk2mem(victim);
04425        if (__builtin_expect (perturb_byte, 0))
04426          alloc_perturb (p, bytes);
04427        return p;
04428       }
04429     }
04430 
04431     /*
04432       Search for a chunk by scanning bins, starting with next largest
04433       bin. This search is strictly by best-fit; i.e., the smallest
04434       (with ties going to approximately the least recently used) chunk
04435       that fits is selected.
04436 
04437       The bitmap avoids needing to check that most blocks are nonempty.
04438       The particular case of skipping all bins during warm-up phases
04439       when no chunks have been returned yet is faster than it might look.
04440     */
04441 
04442     ++idx;
04443     bin = bin_at(av,idx);
04444     block = idx2block(idx);
04445     map = av->binmap[block];
04446     bit = idx2bit(idx);
04447 
04448     for (;;) {
04449 
04450       /* Skip rest of block if there are no more set bits in this block.  */
04451       if (bit > map || bit == 0) {
04452         do {
04453           if (++block >= BINMAPSIZE)  /* out of bins */
04454             goto use_top;
04455         } while ( (map = av->binmap[block]) == 0);
04456 
04457         bin = bin_at(av, (block << BINMAPSHIFT));
04458         bit = 1;
04459       }
04460 
04461       /* Advance to bin with set bit. There must be one. */
04462       while ((bit & map) == 0) {
04463         bin = next_bin(bin);
04464         bit <<= 1;
04465         assert(bit != 0);
04466       }
04467 
04468       /* Inspect the bin. It is likely to be non-empty */
04469       victim = last(bin);
04470 
04471       /*  If a false alarm (empty bin), clear the bit. */
04472       if (victim == bin) {
04473         av->binmap[block] = map &= ~bit; /* Write through */
04474         bin = next_bin(bin);
04475         bit <<= 1;
04476       }
04477 
04478       else {
04479         size = chunksize(victim);
04480 
04481         /*  We know the first chunk in this bin is big enough to use. */
04482         assert((unsigned long)(size) >= (unsigned long)(nb));
04483 
04484         remainder_size = size - nb;
04485 
04486         /* unlink */
04487         unlink(victim, bck, fwd);
04488 
04489         /* Exhaust */
04490         if (remainder_size < MINSIZE) {
04491           set_inuse_bit_at_offset(victim, size);
04492          if (av != &main_arena)
04493            victim->size |= NON_MAIN_ARENA;
04494         }
04495 
04496         /* Split */
04497         else {
04498           remainder = chunk_at_offset(victim, nb);
04499 
04500          /* We cannot assume the unsorted list is empty and therefore
04501             have to perform a complete insert here.  */
04502          bck = unsorted_chunks(av);
04503          fwd = bck->fd;
04504          remainder->bk = bck;
04505          remainder->fd = fwd;
04506          bck->fd = remainder;
04507          fwd->bk = remainder;
04508 
04509           /* advertise as last remainder */
04510           if (in_smallbin_range(nb))
04511             av->last_remainder = remainder;
04512          if (!in_smallbin_range(remainder_size))
04513            {
04514              remainder->fd_nextsize = NULL;
04515              remainder->bk_nextsize = NULL;
04516            }
04517           set_head(victim, nb | PREV_INUSE |
04518                  (av != &main_arena ? NON_MAIN_ARENA : 0));
04519           set_head(remainder, remainder_size | PREV_INUSE);
04520           set_foot(remainder, remainder_size);
04521         }
04522        check_malloced_chunk(av, victim, nb);
04523        void *p = chunk2mem(victim);
04524        if (__builtin_expect (perturb_byte, 0))
04525          alloc_perturb (p, bytes);
04526        return p;
04527       }
04528     }
04529 
04530   use_top:
04531     /*
04532       If large enough, split off the chunk bordering the end of memory
04533       (held in av->top). Note that this is in accord with the best-fit
04534       search rule.  In effect, av->top is treated as larger (and thus
04535       less well fitting) than any other available chunk since it can
04536       be extended to be as large as necessary (up to system
04537       limitations).
04538 
04539       We require that av->top always exists (i.e., has size >=
04540       MINSIZE) after initialization, so if it would otherwise be
04541       exhausted by current request, it is replenished. (The main
04542       reason for ensuring it exists is that we may need MINSIZE space
04543       to put in fenceposts in sysmalloc.)
04544     */
04545 
04546     victim = av->top;
04547     size = chunksize(victim);
04548 
04549     if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
04550       remainder_size = size - nb;
04551       remainder = chunk_at_offset(victim, nb);
04552       av->top = remainder;
04553       set_head(victim, nb | PREV_INUSE |
04554               (av != &main_arena ? NON_MAIN_ARENA : 0));
04555       set_head(remainder, remainder_size | PREV_INUSE);
04556 
04557       check_malloced_chunk(av, victim, nb);
04558       void *p = chunk2mem(victim);
04559       if (__builtin_expect (perturb_byte, 0))
04560        alloc_perturb (p, bytes);
04561       return p;
04562     }
04563 
04564     /*
04565       If there is space available in fastbins, consolidate and retry,
04566       to possibly avoid expanding memory. This can occur only if nb is
04567       in smallbin range so we didn't consolidate upon entry.
04568     */
04569 
04570     else if (have_fastchunks(av)) {
04571       assert(in_smallbin_range(nb));
04572       malloc_consolidate(av);
04573       idx = smallbin_index(nb); /* restore original bin index */
04574     }
04575 
04576     /*
04577        Otherwise, relay to handle system-dependent cases
04578     */
04579     else {
04580       void *p = sYSMALLOc(nb, av);
04581       if (p != NULL && __builtin_expect (perturb_byte, 0))
04582        alloc_perturb (p, bytes);
04583       return p;
04584     }
04585   }
04586 }
04587 
04588 /*
04589   ------------------------------ free ------------------------------
04590 */
04591 
04592 void
04593 _int_free(mstate av, Void_t* mem)
04594 {
04595   mchunkptr       p;           /* chunk corresponding to mem */
04596   INTERNAL_SIZE_T size;        /* its size */
04597   mfastbinptr*    fb;          /* associated fastbin */
04598   mchunkptr       nextchunk;   /* next contiguous chunk */
04599   INTERNAL_SIZE_T nextsize;    /* its size */
04600   int             nextinuse;   /* true if nextchunk is used */
04601   INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
04602   mchunkptr       bck;         /* misc temp for linking */
04603   mchunkptr       fwd;         /* misc temp for linking */
04604 
04605   const char *errstr = NULL;
04606 
04607   p = mem2chunk(mem);
04608   size = chunksize(p);
04609 
04610   /* Little security check which won't hurt performance: the
04611      allocator never wrapps around at the end of the address space.
04612      Therefore we can exclude some size values which might appear
04613      here by accident or by "design" from some intruder.  */
04614   if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
04615       || __builtin_expect (misaligned_chunk (p), 0))
04616     {
04617       errstr = "free(): invalid pointer";
04618     errout:
04619       malloc_printerr (check_action, errstr, mem);
04620       return;
04621     }
04622   /* We know that each chunk is at least MINSIZE bytes in size.  */
04623   if (__builtin_expect (size < MINSIZE, 0))
04624     {
04625       errstr = "free(): invalid size";
04626       goto errout;
04627     }
04628 
04629   check_inuse_chunk(av, p);
04630 
04631   /*
04632     If eligible, place chunk on a fastbin so it can be found
04633     and used quickly in malloc.
04634   */
04635 
04636   if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
04637 
04638 #if TRIM_FASTBINS
04639       /*
04640        If TRIM_FASTBINS set, don't place chunks
04641        bordering top into fastbins
04642       */
04643       && (chunk_at_offset(p, size) != av->top)
04644 #endif
04645       ) {
04646 
04647     if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
04648        || __builtin_expect (chunksize (chunk_at_offset (p, size))
04649                           >= av->system_mem, 0))
04650       {
04651        errstr = "free(): invalid next size (fast)";
04652        goto errout;
04653       }
04654 
04655     set_fastchunks(av);
04656     fb = &(av->fastbins[fastbin_index(size)]);
04657     /* Another simple check: make sure the top of the bin is not the
04658        record we are going to add (i.e., double free).  */
04659     if (__builtin_expect (*fb == p, 0))
04660       {
04661        errstr = "double free or corruption (fasttop)";
04662        goto errout;
04663       }
04664 
04665     if (__builtin_expect (perturb_byte, 0))
04666       free_perturb (mem, size - SIZE_SZ);
04667 
04668     p->fd = *fb;
04669     *fb = p;
04670   }
04671 
04672   /*
04673     Consolidate other non-mmapped chunks as they arrive.
04674   */
04675 
04676   else if (!chunk_is_mmapped(p)) {
04677     nextchunk = chunk_at_offset(p, size);
04678 
04679     /* Lightweight tests: check whether the block is already the
04680        top block.  */
04681     if (__builtin_expect (p == av->top, 0))
04682       {
04683        errstr = "double free or corruption (top)";
04684        goto errout;
04685       }
04686     /* Or whether the next chunk is beyond the boundaries of the arena.  */
04687     if (__builtin_expect (contiguous (av)
04688                        && (char *) nextchunk
04689                        >= ((char *) av->top + chunksize(av->top)), 0))
04690       {
04691        errstr = "double free or corruption (out)";
04692        goto errout;
04693       }
04694     /* Or whether the block is actually not marked used.  */
04695     if (__builtin_expect (!prev_inuse(nextchunk), 0))
04696       {
04697        errstr = "double free or corruption (!prev)";
04698        goto errout;
04699       }
04700 
04701     nextsize = chunksize(nextchunk);
04702     if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
04703        || __builtin_expect (nextsize >= av->system_mem, 0))
04704       {
04705        errstr = "free(): invalid next size (normal)";
04706        goto errout;
04707       }
04708 
04709     if (__builtin_expect (perturb_byte, 0))
04710       free_perturb (mem, size - SIZE_SZ);
04711 
04712     /* consolidate backward */
04713     if (!prev_inuse(p)) {
04714       prevsize = p->prev_size;
04715       size += prevsize;
04716       p = chunk_at_offset(p, -((long) prevsize));
04717       unlink(p, bck, fwd);
04718     }
04719 
04720     if (nextchunk != av->top) {
04721       /* get and clear inuse bit */
04722       nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
04723 
04724       /* consolidate forward */
04725       if (!nextinuse) {
04726        unlink(nextchunk, bck, fwd);
04727        size += nextsize;
04728       } else
04729        clear_inuse_bit_at_offset(nextchunk, 0);
04730 
04731       /*
04732        Place the chunk in unsorted chunk list. Chunks are
04733        not placed into regular bins until after they have
04734        been given one chance to be used in malloc.
04735       */
04736 
04737       bck = unsorted_chunks(av);
04738       fwd = bck->fd;
04739       p->fd = fwd;
04740       p->bk = bck;
04741       if (!in_smallbin_range(size))
04742        {
04743          p->fd_nextsize = NULL;
04744          p->bk_nextsize = NULL;
04745        }
04746       bck->fd = p;
04747       fwd->bk = p;
04748 
04749       set_head(p, size | PREV_INUSE);
04750       set_foot(p, size);
04751 
04752       check_free_chunk(av, p);
04753     }
04754 
04755     /*
04756       If the chunk borders the current high end of memory,
04757       consolidate into top
04758     */
04759 
04760     else {
04761       size += nextsize;
04762       set_head(p, size | PREV_INUSE);
04763       av->top = p;
04764       check_chunk(av, p);
04765     }
04766 
04767     /*
04768       If freeing a large space, consolidate possibly-surrounding
04769       chunks. Then, if the total unused topmost memory exceeds trim
04770       threshold, ask malloc_trim to reduce top.
04771 
04772       Unless max_fast is 0, we don't know if there are fastbins
04773       bordering top, so we cannot tell for sure whether threshold
04774       has been reached unless fastbins are consolidated.  But we
04775       don't want to consolidate on each free.  As a compromise,
04776       consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
04777       is reached.
04778     */
04779 
04780     if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
04781       if (have_fastchunks(av))
04782        malloc_consolidate(av);
04783 
04784       if (av == &main_arena) {
04785 #ifndef MORECORE_CANNOT_TRIM
04786        if ((unsigned long)(chunksize(av->top)) >=
04787            (unsigned long)(mp_.trim_threshold))
04788          sYSTRIm(mp_.top_pad, av);
04789 #endif
04790       } else {
04791        /* Always try heap_trim(), even if the top chunk is not
04792           large, because the corresponding heap might go away.  */
04793        heap_info *heap = heap_for_ptr(top(av));
04794 
04795        assert(heap->ar_ptr == av);
04796        heap_trim(heap, mp_.top_pad);
04797       }
04798     }
04799 
04800   }
04801   /*
04802     If the chunk was allocated via mmap, release via munmap(). Note
04803     that if HAVE_MMAP is false but chunk_is_mmapped is true, then
04804     user must have overwritten memory. There's nothing we can do to
04805     catch this error unless MALLOC_DEBUG is set, in which case
04806     check_inuse_chunk (above) will have triggered error.
04807   */
04808 
04809   else {
04810 #if HAVE_MMAP
04811     munmap_chunk (p);
04812 #endif
04813   }
04814 }
04815 
04816 /*
04817   ------------------------- malloc_consolidate -------------------------
04818 
04819   malloc_consolidate is a specialized version of free() that tears
04820   down chunks held in fastbins.  Free itself cannot be used for this
04821   purpose since, among other things, it might place chunks back onto
04822   fastbins.  So, instead, we need to use a minor variant of the same
04823   code.
04824 
04825   Also, because this routine needs to be called the first time through
04826   malloc anyway, it turns out to be the perfect place to trigger
04827   initialization code.
04828 */
04829 
04830 #if __STD_C
04831 static void malloc_consolidate(mstate av)
04832 #else
04833 static void malloc_consolidate(av) mstate av;
04834 #endif
04835 {
04836   mfastbinptr*    fb;                 /* current fastbin being consolidated */
04837   mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
04838   mchunkptr       p;                  /* current chunk being consolidated */
04839   mchunkptr       nextp;              /* next chunk to consolidate */
04840   mchunkptr       unsorted_bin;       /* bin header */
04841   mchunkptr       first_unsorted;     /* chunk to link to */
04842 
04843   /* These have same use as in free() */
04844   mchunkptr       nextchunk;
04845   INTERNAL_SIZE_T size;
04846   INTERNAL_SIZE_T nextsize;
04847   INTERNAL_SIZE_T prevsize;
04848   int             nextinuse;
04849   mchunkptr       bck;
04850   mchunkptr       fwd;
04851 
04852   /*
04853     If max_fast is 0, we know that av hasn't
04854     yet been initialized, in which case do so below
04855   */
04856 
04857   if (get_max_fast () != 0) {
04858     clear_fastchunks(av);
04859 
04860     unsorted_bin = unsorted_chunks(av);
04861 
04862     /*
04863       Remove each chunk from fast bin and consolidate it, placing it
04864       then in unsorted bin. Among other reasons for doing this,
04865       placing in unsorted bin avoids needing to calculate actual bins
04866       until malloc is sure that chunks aren't immediately going to be
04867       reused anyway.
04868     */
04869 
04870 #if 0
04871     /* It is wrong to limit the fast bins to search using get_max_fast
04872        because, except for the main arena, all the others might have
04873        blocks in the high fast bins.  It's not worth it anyway, just
04874        search all bins all the time.  */
04875     maxfb = &(av->fastbins[fastbin_index(get_max_fast ())]);
04876 #else
04877     maxfb = &(av->fastbins[NFASTBINS - 1]);
04878 #endif
04879     fb = &(av->fastbins[0]);
04880     do {
04881       if ( (p = *fb) != 0) {
04882         *fb = 0;
04883 
04884         do {
04885           check_inuse_chunk(av, p);
04886           nextp = p->fd;
04887 
04888           /* Slightly streamlined version of consolidation code in free() */
04889           size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
04890           nextchunk = chunk_at_offset(p, size);
04891           nextsize = chunksize(nextchunk);
04892 
04893           if (!prev_inuse(p)) {
04894             prevsize = p->prev_size;
04895             size += prevsize;
04896             p = chunk_at_offset(p, -((long) prevsize));
04897             unlink(p, bck, fwd);
04898           }
04899 
04900           if (nextchunk != av->top) {
04901             nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
04902 
04903             if (!nextinuse) {
04904               size += nextsize;
04905               unlink(nextchunk, bck, fwd);
04906             } else
04907              clear_inuse_bit_at_offset(nextchunk, 0);
04908 
04909             first_unsorted = unsorted_bin->fd;
04910             unsorted_bin->fd = p;
04911             first_unsorted->bk = p;
04912 
04913             if (!in_smallbin_range (size)) {
04914              p->fd_nextsize = NULL;
04915              p->bk_nextsize = NULL;
04916            }
04917 
04918             set_head(p, size | PREV_INUSE);
04919             p->bk = unsorted_bin;
04920             p->fd = first_unsorted;
04921             set_foot(p, size);
04922           }
04923 
04924           else {
04925             size += nextsize;
04926             set_head(p, size | PREV_INUSE);
04927             av->top = p;
04928           }
04929 
04930         } while ( (p = nextp) != 0);
04931 
04932       }
04933     } while (fb++ != maxfb);
04934   }
04935   else {
04936     malloc_init_state(av);
04937     check_malloc_state(av);
04938   }
04939 }
04940 
04941 /*
04942   ------------------------------ realloc ------------------------------
04943 */
04944 
04945 Void_t*
04946 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
04947 {
04948   INTERNAL_SIZE_T  nb;              /* padded request size */
04949 
04950   mchunkptr        oldp;            /* chunk corresponding to oldmem */
04951   INTERNAL_SIZE_T  oldsize;         /* its size */
04952 
04953   mchunkptr        newp;            /* chunk to return */
04954   INTERNAL_SIZE_T  newsize;         /* its size */
04955   Void_t*          newmem;          /* corresponding user mem */
04956 
04957   mchunkptr        next;            /* next contiguous chunk after oldp */
04958 
04959   mchunkptr        remainder;       /* extra space at end of newp */
04960   unsigned long    remainder_size;  /* its size */
04961 
04962   mchunkptr        bck;             /* misc temp for linking */
04963   mchunkptr        fwd;             /* misc temp for linking */
04964 
04965   unsigned long    copysize;        /* bytes to copy */
04966   unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
04967   INTERNAL_SIZE_T* s;               /* copy source */
04968   INTERNAL_SIZE_T* d;               /* copy destination */
04969 
04970   const char *errstr = NULL;
04971 
04972 
04973   checked_request2size(bytes, nb);
04974 
04975   oldp    = mem2chunk(oldmem);
04976   oldsize = chunksize(oldp);
04977 
04978   /* Simple tests for old block integrity.  */
04979   if (__builtin_expect (misaligned_chunk (oldp), 0))
04980     {
04981       errstr = "realloc(): invalid pointer";
04982     errout:
04983       malloc_printerr (check_action, errstr, oldmem);
04984       return NULL;
04985     }
04986   if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
04987       || __builtin_expect (oldsize >= av->system_mem, 0))
04988     {
04989       errstr = "realloc(): invalid old size";
04990       goto errout;
04991     }
04992 
04993   check_inuse_chunk(av, oldp);
04994 
04995   if (!chunk_is_mmapped(oldp)) {
04996 
04997     next = chunk_at_offset(oldp, oldsize);
04998     INTERNAL_SIZE_T nextsize = chunksize(next);
04999     if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
05000        || __builtin_expect (nextsize >= av->system_mem, 0))
05001       {
05002        errstr = "realloc(): invalid next size";
05003        goto errout;
05004       }
05005 
05006     if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
05007       /* already big enough; split below */
05008       newp = oldp;
05009       newsize = oldsize;
05010     }
05011 
05012     else {
05013       /* Try to expand forward into top */
05014       if (next == av->top &&
05015           (unsigned long)(newsize = oldsize + nextsize) >=
05016           (unsigned long)(nb + MINSIZE)) {
05017         set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
05018         av->top = chunk_at_offset(oldp, nb);
05019         set_head(av->top, (newsize - nb) | PREV_INUSE);
05020        check_inuse_chunk(av, oldp);
05021         return chunk2mem(oldp);
05022       }
05023 
05024       /* Try to expand forward into next chunk;  split off remainder below */
05025       else if (next != av->top &&
05026                !inuse(next) &&
05027                (unsigned long)(newsize = oldsize + nextsize) >=
05028                (unsigned long)(nb)) {
05029         newp = oldp;
05030         unlink(next, bck, fwd);
05031       }
05032 
05033       /* allocate, copy, free */
05034       else {
05035         newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
05036         if (newmem == 0)
05037           return 0; /* propagate failure */
05038 
05039         newp = mem2chunk(newmem);
05040         newsize = chunksize(newp);
05041 
05042         /*
05043           Avoid copy if newp is next chunk after oldp.
05044         */
05045         if (newp == next) {
05046           newsize += oldsize;
05047           newp = oldp;
05048         }
05049         else {
05050           /*
05051             Unroll copy of <= 36 bytes (72 if 8byte sizes)
05052             We know that contents have an odd number of
05053             INTERNAL_SIZE_T-sized words; minimally 3.
05054           */
05055 
05056           copysize = oldsize - SIZE_SZ;
05057           s = (INTERNAL_SIZE_T*)(oldmem);
05058           d = (INTERNAL_SIZE_T*)(newmem);
05059           ncopies = copysize / sizeof(INTERNAL_SIZE_T);
05060           assert(ncopies >= 3);
05061 
05062           if (ncopies > 9)
05063             MALLOC_COPY(d, s, copysize);
05064 
05065           else {
05066             *(d+0) = *(s+0);
05067             *(d+1) = *(s+1);
05068             *(d+2) = *(s+2);
05069             if (ncopies > 4) {
05070               *(d+3) = *(s+3);
05071               *(d+4) = *(s+4);
05072               if (ncopies > 6) {
05073                 *(d+5) = *(s+5);
05074                 *(d+6) = *(s+6);
05075                 if (ncopies > 8) {
05076                   *(d+7) = *(s+7);
05077                   *(d+8) = *(s+8);
05078                 }
05079               }
05080             }
05081           }
05082 
05083           _int_free(av, oldmem);
05084           check_inuse_chunk(av, newp);
05085           return chunk2mem(newp);
05086         }
05087       }
05088     }
05089 
05090     /* If possible, free extra space in old or extended chunk */
05091 
05092     assert((unsigned long)(newsize) >= (unsigned long)(nb));
05093 
05094     remainder_size = newsize - nb;
05095 
05096     if (remainder_size < MINSIZE) { /* not enough extra to split off */
05097       set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
05098       set_inuse_bit_at_offset(newp, newsize);
05099     }
05100     else { /* split remainder */
05101       remainder = chunk_at_offset(newp, nb);
05102       set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
05103       set_head(remainder, remainder_size | PREV_INUSE |
05104               (av != &main_arena ? NON_MAIN_ARENA : 0));
05105       /* Mark remainder as inuse so free() won't complain */
05106       set_inuse_bit_at_offset(remainder, remainder_size);
05107       _int_free(av, chunk2mem(remainder));
05108     }
05109 
05110     check_inuse_chunk(av, newp);
05111     return chunk2mem(newp);
05112   }
05113 
05114   /*
05115     Handle mmap cases
05116   */
05117 
05118   else {
05119 #if HAVE_MMAP
05120 
05121 #if HAVE_MREMAP
05122     INTERNAL_SIZE_T offset = oldp->prev_size;
05123     size_t pagemask = mp_.pagesize - 1;
05124     char *cp;
05125     unsigned long sum;
05126 
05127     /* Note the extra SIZE_SZ overhead */
05128     newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
05129 
05130     /* don't need to remap if still within same page */
05131     if (oldsize == newsize - offset)
05132       return oldmem;
05133 
05134     cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
05135 
05136     if (cp != MAP_FAILED) {
05137 
05138       newp = (mchunkptr)(cp + offset);
05139       set_head(newp, (newsize - offset)|IS_MMAPPED);
05140 
05141       assert(aligned_OK(chunk2mem(newp)));
05142       assert((newp->prev_size == offset));
05143 
05144       /* update statistics */
05145       sum = mp_.mmapped_mem += newsize - oldsize;
05146       if (sum > (unsigned long)(mp_.max_mmapped_mem))
05147         mp_.max_mmapped_mem = sum;
05148 #ifdef NO_THREADS
05149       sum += main_arena.system_mem;
05150       if (sum > (unsigned long)(mp_.max_total_mem))
05151         mp_.max_total_mem = sum;
05152 #endif
05153 
05154       return chunk2mem(newp);
05155     }
05156 #endif
05157 
05158     /* Note the extra SIZE_SZ overhead. */
05159     if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
05160       newmem = oldmem; /* do nothing */
05161     else {
05162       /* Must alloc, copy, free. */
05163       newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
05164       if (newmem != 0) {
05165         MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
05166         _int_free(av, oldmem);
05167       }
05168     }
05169     return newmem;
05170 
05171 #else
05172     /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
05173     check_malloc_state(av);
05174     MALLOC_FAILURE_ACTION;
05175     return 0;
05176 #endif
05177   }
05178 }
05179 
05180 /*
05181   ------------------------------ memalign ------------------------------
05182 */
05183 
05184 Void_t*
05185 _int_memalign(mstate av, size_t alignment, size_t bytes)
05186 {
05187   INTERNAL_SIZE_T nb;             /* padded  request size */
05188   char*           m;              /* memory returned by malloc call */
05189   mchunkptr       p;              /* corresponding chunk */
05190   char*           brk;            /* alignment point within p */
05191   mchunkptr       newp;           /* chunk to return */
05192   INTERNAL_SIZE_T newsize;        /* its size */
05193   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
05194   mchunkptr       remainder;      /* spare room at end to split off */
05195   unsigned long   remainder_size; /* its size */
05196   INTERNAL_SIZE_T size;
05197 
05198   /* If need less alignment than we give anyway, just relay to malloc */
05199 
05200   if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
05201 
05202   /* Otherwise, ensure that it is at least a minimum chunk size */
05203 
05204   if (alignment <  MINSIZE) alignment = MINSIZE;
05205 
05206   /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
05207   if ((alignment & (alignment - 1)) != 0) {
05208     size_t a = MALLOC_ALIGNMENT * 2;
05209     while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
05210     alignment = a;
05211   }
05212 
05213   checked_request2size(bytes, nb);
05214 
05215   /*
05216     Strategy: find a spot within that chunk that meets the alignment
05217     request, and then possibly free the leading and trailing space.
05218   */
05219 
05220 
05221   /* Call malloc with worst case padding to hit alignment. */
05222 
05223   m  = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
05224 
05225   if (m == 0) return 0; /* propagate failure */
05226 
05227   p = mem2chunk(m);
05228 
05229   if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
05230 
05231     /*
05232       Find an aligned spot inside chunk.  Since we need to give back
05233       leading space in a chunk of at least MINSIZE, if the first
05234       calculation places us at a spot with less than MINSIZE leader,
05235       we can move to the next aligned spot -- we've allocated enough
05236       total room so that this is always possible.
05237     */
05238 
05239     brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
05240                            -((signed long) alignment));
05241     if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
05242       brk += alignment;
05243 
05244     newp = (mchunkptr)brk;
05245     leadsize = brk - (char*)(p);
05246     newsize = chunksize(p) - leadsize;
05247 
05248     /* For mmapped chunks, just adjust offset */
05249     if (chunk_is_mmapped(p)) {
05250       newp->prev_size = p->prev_size + leadsize;
05251       set_head(newp, newsize|IS_MMAPPED);
05252       return chunk2mem(newp);
05253     }
05254 
05255     /* Otherwise, give back leader, use the rest */
05256     set_head(newp, newsize | PREV_INUSE |
05257             (av != &main_arena ? NON_MAIN_ARENA : 0));
05258     set_inuse_bit_at_offset(newp, newsize);
05259     set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
05260     _int_free(av, chunk2mem(p));
05261     p = newp;
05262 
05263     assert (newsize >= nb &&
05264             (((unsigned long)(chunk2mem(p))) % alignment) == 0);
05265   }
05266 
05267   /* Also give back spare room at the end */
05268   if (!chunk_is_mmapped(p)) {
05269     size = chunksize(p);
05270     if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
05271       remainder_size = size - nb;
05272       remainder = chunk_at_offset(p, nb);
05273       set_head(remainder, remainder_size | PREV_INUSE |
05274               (av != &main_arena ? NON_MAIN_ARENA : 0));
05275       set_head_size(p, nb);
05276       _int_free(av, chunk2mem(remainder));
05277     }
05278   }
05279 
05280   check_inuse_chunk(av, p);
05281   return chunk2mem(p);
05282 }
05283 
05284 #if 0
05285 /*
05286   ------------------------------ calloc ------------------------------
05287 */
05288 
05289 #if __STD_C
05290 Void_t* cALLOc(size_t n_elements, size_t elem_size)
05291 #else
05292 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
05293 #endif
05294 {
05295   mchunkptr p;
05296   unsigned long clearsize;
05297   unsigned long nclears;
05298   INTERNAL_SIZE_T* d;
05299 
05300   Void_t* mem = mALLOc(n_elements * elem_size);
05301 
05302   if (mem != 0) {
05303     p = mem2chunk(mem);
05304 
05305 #if MMAP_CLEARS
05306     if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
05307 #endif
05308     {
05309       /*
05310         Unroll clear of <= 36 bytes (72 if 8byte sizes)
05311         We know that contents have an odd number of
05312         INTERNAL_SIZE_T-sized words; minimally 3.
05313       */
05314 
05315       d = (INTERNAL_SIZE_T*)mem;
05316       clearsize = chunksize(p) - SIZE_SZ;
05317       nclears = clearsize / sizeof(INTERNAL_SIZE_T);
05318       assert(nclears >= 3);
05319 
05320       if (nclears > 9)
05321         MALLOC_ZERO(d, clearsize);
05322 
05323       else {
05324         *(d+0) = 0;
05325         *(d+1) = 0;
05326         *(d+2) = 0;
05327         if (nclears > 4) {
05328           *(d+3) = 0;
05329           *(d+4) = 0;
05330           if (nclears > 6) {
05331             *(d+5) = 0;
05332             *(d+6) = 0;
05333             if (nclears > 8) {
05334               *(d+7) = 0;
05335               *(d+8) = 0;
05336             }
05337           }
05338         }
05339       }
05340     }
05341   }
05342   return mem;
05343 }
05344 #endif /* 0 */
05345 
05346 #ifndef _LIBC
05347 /*
05348   ------------------------- independent_calloc -------------------------
05349 */
05350 
05351 Void_t**
05352 #if __STD_C
05353 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
05354 #else
05355 _int_icalloc(av, n_elements, elem_size, chunks)
05356 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
05357 #endif
05358 {
05359   size_t sz = elem_size; /* serves as 1-element array */
05360   /* opts arg of 3 means all elements are same size, and should be cleared */
05361   return iALLOc(av, n_elements, &sz, 3, chunks);
05362 }
05363 
05364 /*
05365   ------------------------- independent_comalloc -------------------------
05366 */
05367 
05368 Void_t**
05369 #if __STD_C
05370 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
05371 #else
05372 _int_icomalloc(av, n_elements, sizes, chunks)
05373 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
05374 #endif
05375 {
05376   return iALLOc(av, n_elements, sizes, 0, chunks);
05377 }
05378 
05379 
05380 /*
05381   ------------------------------ ialloc ------------------------------
05382   ialloc provides common support for independent_X routines, handling all of
05383   the combinations that can result.
05384 
05385   The opts arg has:
05386     bit 0 set if all elements are same size (using sizes[0])
05387     bit 1 set if elements should be zeroed
05388 */
05389 
05390 
05391 static Void_t**
05392 #if __STD_C
05393 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
05394 #else
05395 iALLOc(av, n_elements, sizes, opts, chunks)
05396 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
05397 #endif
05398 {
05399   INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
05400   INTERNAL_SIZE_T contents_size;  /* total size of elements */
05401   INTERNAL_SIZE_T array_size;     /* request size of pointer array */
05402   Void_t*         mem;            /* malloced aggregate space */
05403   mchunkptr       p;              /* corresponding chunk */
05404   INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
05405   Void_t**        marray;         /* either "chunks" or malloced ptr array */
05406   mchunkptr       array_chunk;    /* chunk for malloced ptr array */
05407   int             mmx;            /* to disable mmap */
05408   INTERNAL_SIZE_T size;
05409   INTERNAL_SIZE_T size_flags;
05410   size_t          i;
05411 
05412   /* Ensure initialization/consolidation */
05413   if (have_fastchunks(av)) malloc_consolidate(av);
05414 
05415   /* compute array length, if needed */
05416   if (chunks != 0) {
05417     if (n_elements == 0)
05418       return chunks; /* nothing to do */
05419     marray = chunks;
05420     array_size = 0;
05421   }
05422   else {
05423     /* if empty req, must still return chunk representing empty array */
05424     if (n_elements == 0)
05425       return (Void_t**) _int_malloc(av, 0);
05426     marray = 0;
05427     array_size = request2size(n_elements * (sizeof(Void_t*)));
05428   }
05429 
05430   /* compute total element size */
05431   if (opts & 0x1) { /* all-same-size */
05432     element_size = request2size(*sizes);
05433     contents_size = n_elements * element_size;
05434   }
05435   else { /* add up all the sizes */
05436     element_size = 0;
05437     contents_size = 0;
05438     for (i = 0; i != n_elements; ++i)
05439       contents_size += request2size(sizes[i]);
05440   }
05441 
05442   /* subtract out alignment bytes from total to minimize overallocation */
05443   size = contents_size + array_size - MALLOC_ALIGN_MASK;
05444 
05445   /*
05446      Allocate the aggregate chunk.
05447      But first disable mmap so malloc won't use it, since
05448      we would not be able to later free/realloc space internal
05449      to a segregated mmap region.
05450   */
05451   mmx = mp_.n_mmaps_max;   /* disable mmap */
05452   mp_.n_mmaps_max = 0;
05453   mem = _int_malloc(av, size);
05454   mp_.n_mmaps_max = mmx;   /* reset mmap */
05455   if (mem == 0)
05456     return 0;
05457 
05458   p = mem2chunk(mem);
05459   assert(!chunk_is_mmapped(p));
05460   remainder_size = chunksize(p);
05461 
05462   if (opts & 0x2) {       /* optionally clear the elements */
05463     MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
05464   }
05465 
05466   size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
05467 
05468   /* If not provided, allocate the pointer array as final part of chunk */
05469   if (marray == 0) {
05470     array_chunk = chunk_at_offset(p, contents_size);
05471     marray = (Void_t**) (chunk2mem(array_chunk));
05472     set_head(array_chunk, (remainder_size - contents_size) | size_flags);
05473     remainder_size = contents_size;
05474   }
05475 
05476   /* split out elements */
05477   for (i = 0; ; ++i) {
05478     marray[i] = chunk2mem(p);
05479     if (i != n_elements-1) {
05480       if (element_size != 0)
05481         size = element_size;
05482       else
05483         size = request2size(sizes[i]);
05484       remainder_size -= size;
05485       set_head(p, size | size_flags);
05486       p = chunk_at_offset(p, size);
05487     }
05488     else { /* the final element absorbs any overallocation slop */
05489       set_head(p, remainder_size | size_flags);
05490       break;
05491     }
05492   }
05493 
05494 #if MALLOC_DEBUG
05495   if (marray != chunks) {
05496     /* final element must have exactly exhausted chunk */
05497     if (element_size != 0)
05498       assert(remainder_size == element_size);
05499     else
05500       assert(remainder_size == request2size(sizes[i]));
05501     check_inuse_chunk(av, mem2chunk(marray));
05502   }
05503 
05504   for (i = 0; i != n_elements; ++i)
05505     check_inuse_chunk(av, mem2chunk(marray[i]));
05506 #endif
05507 
05508   return marray;
05509 }
05510 #endif /* _LIBC */
05511 
05512 
05513 /*
05514   ------------------------------ valloc ------------------------------
05515 */
05516 
05517 Void_t*
05518 #if __STD_C
05519 _int_valloc(mstate av, size_t bytes)
05520 #else
05521 _int_valloc(av, bytes) mstate av; size_t bytes;
05522 #endif
05523 {
05524   /* Ensure initialization/consolidation */
05525   if (have_fastchunks(av)) malloc_consolidate(av);
05526   return _int_memalign(av, mp_.pagesize, bytes);
05527 }
05528 
05529 /*
05530   ------------------------------ pvalloc ------------------------------
05531 */
05532 
05533 
05534 Void_t*
05535 #if __STD_C
05536 _int_pvalloc(mstate av, size_t bytes)
05537 #else
05538 _int_pvalloc(av, bytes) mstate av, size_t bytes;
05539 #endif
05540 {
05541   size_t pagesz;
05542 
05543   /* Ensure initialization/consolidation */
05544   if (have_fastchunks(av)) malloc_consolidate(av);
05545   pagesz = mp_.pagesize;
05546   return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
05547 }
05548 
05549 
05550 /*
05551   ------------------------------ malloc_trim ------------------------------
05552 */
05553 
05554 #if __STD_C
05555 static int mTRIm(mstate av, size_t pad)
05556 #else
05557 static int mTRIm(av, pad) mstate av; size_t pad;
05558 #endif
05559 {
05560   /* Ensure initialization/consolidation */
05561   malloc_consolidate (av);
05562 
05563   const size_t ps = mp_.pagesize;
05564   int psindex = bin_index (ps);
05565   const size_t psm1 = ps - 1;
05566 
05567   int result = 0;
05568   for (int i = 1; i < NBINS; ++i)
05569     if (i == 1 || i >= psindex)
05570       {
05571         mbinptr bin = bin_at (av, i);
05572 
05573         for (mchunkptr p = last (bin); p != bin; p = p->bk)
05574          {
05575            INTERNAL_SIZE_T size = chunksize (p);
05576 
05577            if (size > psm1 + sizeof (struct malloc_chunk))
05578              {
05579               /* See whether the chunk contains at least one unused page.  */
05580               char *paligned_mem = (char *) (((uintptr_t) p
05581                                           + sizeof (struct malloc_chunk)
05582                                           + psm1) & ~psm1);
05583 
05584               assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
05585               assert ((char *) p + size > paligned_mem);
05586 
05587               /* This is the size we could potentially free.  */
05588               size -= paligned_mem - (char *) p;
05589 
05590               if (size > psm1)
05591                 {
05592 #ifdef MALLOC_DEBUG
05593                   /* When debugging we simulate destroying the memory
05594                      content.  */
05595                   memset (paligned_mem, 0x89, size & ~psm1);
05596 #endif
05597                   madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
05598 
05599                   result = 1;
05600                 }
05601              }
05602          }
05603       }
05604 
05605 #ifndef MORECORE_CANNOT_TRIM
05606   return result | (av == &main_arena ? sYSTRIm (pad, av) : 0);
05607 #else
05608   return result;
05609 #endif
05610 }
05611 
05612 
05613 /*
05614   ------------------------- malloc_usable_size -------------------------
05615 */
05616 
05617 #if __STD_C
05618 size_t mUSABLe(Void_t* mem)
05619 #else
05620 size_t mUSABLe(mem) Void_t* mem;
05621 #endif
05622 {
05623   mchunkptr p;
05624   if (mem != 0) {
05625     p = mem2chunk(mem);
05626     if (chunk_is_mmapped(p))
05627       return chunksize(p) - 2*SIZE_SZ;
05628     else if (inuse(p))
05629       return chunksize(p) - SIZE_SZ;
05630   }
05631   return 0;
05632 }
05633 
05634 /*
05635   ------------------------------ mallinfo ------------------------------
05636 */
05637 
05638 struct mallinfo mALLINFo(mstate av)
05639 {
05640   struct mallinfo mi;
05641   size_t i;
05642   mbinptr b;
05643   mchunkptr p;
05644   INTERNAL_SIZE_T avail;
05645   INTERNAL_SIZE_T fastavail;
05646   int nblocks;
05647   int nfastblocks;
05648 
05649   /* Ensure initialization */
05650   if (av->top == 0)  malloc_consolidate(av);
05651 
05652   check_malloc_state(av);
05653 
05654   /* Account for top */
05655   avail = chunksize(av->top);
05656   nblocks = 1;  /* top always exists */
05657 
05658   /* traverse fastbins */
05659   nfastblocks = 0;
05660   fastavail = 0;
05661 
05662   for (i = 0; i < NFASTBINS; ++i) {
05663     for (p = av->fastbins[i]; p != 0; p = p->fd) {
05664       ++nfastblocks;
05665       fastavail += chunksize(p);
05666     }
05667   }
05668 
05669   avail += fastavail;
05670 
05671   /* traverse regular bins */
05672   for (i = 1; i < NBINS; ++i) {
05673     b = bin_at(av, i);
05674     for (p = last(b); p != b; p = p->bk) {
05675       ++nblocks;
05676       avail += chunksize(p);
05677     }
05678   }
05679 
05680   mi.smblks = nfastblocks;
05681   mi.ordblks = nblocks;
05682   mi.fordblks = avail;
05683   mi.uordblks = av->system_mem - avail;
05684   mi.arena = av->system_mem;
05685   mi.hblks = mp_.n_mmaps;
05686   mi.hblkhd = mp_.mmapped_mem;
05687   mi.fsmblks = fastavail;
05688   mi.keepcost = chunksize(av->top);
05689   mi.usmblks = mp_.max_total_mem;
05690   return mi;
05691 }
05692 
05693 /*
05694   ------------------------------ malloc_stats ------------------------------
05695 */
05696 
05697 void mSTATs()
05698 {
05699   int i;
05700   mstate ar_ptr;
05701   struct mallinfo mi;
05702   unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
05703 #if THREAD_STATS
05704   long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
05705 #endif
05706 
05707   if(__malloc_initialized < 0)
05708     ptmalloc_init ();
05709 #ifdef _LIBC
05710   _IO_flockfile (stderr);
05711   int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
05712   ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
05713 #endif
05714   for (i=0, ar_ptr = &main_arena;; i++) {
05715     (void)mutex_lock(&ar_ptr->mutex);
05716     mi = mALLINFo(ar_ptr);
05717     fprintf(stderr, "Arena %d:\n", i);
05718     fprintf(stderr, "system bytes     = %10u\n", (unsigned int)mi.arena);
05719     fprintf(stderr, "in use bytes     = %10u\n", (unsigned int)mi.uordblks);
05720 #if MALLOC_DEBUG > 1
05721     if (i > 0)
05722       dump_heap(heap_for_ptr(top(ar_ptr)));
05723 #endif
05724     system_b += mi.arena;
05725     in_use_b += mi.uordblks;
05726 #if THREAD_STATS
05727     stat_lock_direct += ar_ptr->stat_lock_direct;
05728     stat_lock_loop += ar_ptr->stat_lock_loop;
05729     stat_lock_wait += ar_ptr->stat_lock_wait;
05730 #endif
05731     (void)mutex_unlock(&ar_ptr->mutex);
05732     ar_ptr = ar_ptr->next;
05733     if(ar_ptr == &main_arena) break;
05734   }
05735 #if HAVE_MMAP
05736   fprintf(stderr, "Total (incl. mmap):\n");
05737 #else
05738   fprintf(stderr, "Total:\n");
05739 #endif
05740   fprintf(stderr, "system bytes     = %10u\n", system_b);
05741   fprintf(stderr, "in use bytes     = %10u\n", in_use_b);
05742 #ifdef NO_THREADS
05743   fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
05744 #endif
05745 #if HAVE_MMAP
05746   fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
05747   fprintf(stderr, "max mmap bytes   = %10lu\n",
05748          (unsigned long)mp_.max_mmapped_mem);
05749 #endif
05750 #if THREAD_STATS
05751   fprintf(stderr, "heaps created    = %10d\n",  stat_n_heaps);
05752   fprintf(stderr, "locked directly  = %10ld\n", stat_lock_direct);
05753   fprintf(stderr, "locked in loop   = %10ld\n", stat_lock_loop);
05754   fprintf(stderr, "locked waiting   = %10ld\n", stat_lock_wait);
05755   fprintf(stderr, "locked total     = %10ld\n",
05756           stat_lock_direct + stat_lock_loop + stat_lock_wait);
05757 #endif
05758 #ifdef _LIBC
05759   ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
05760   _IO_funlockfile (stderr);
05761 #endif
05762 }
05763 
05764 
05765 /*
05766   ------------------------------ mallopt ------------------------------
05767 */
05768 
05769 #if __STD_C
05770 int mALLOPt(int param_number, int value)
05771 #else
05772 int mALLOPt(param_number, value) int param_number; int value;
05773 #endif
05774 {
05775   mstate av = &main_arena;
05776   int res = 1;
05777 
05778   if(__malloc_initialized < 0)
05779     ptmalloc_init ();
05780   (void)mutex_lock(&av->mutex);
05781   /* Ensure initialization/consolidation */
05782   malloc_consolidate(av);
05783 
05784   switch(param_number) {
05785   case M_MXFAST:
05786     if (value >= 0 && value <= MAX_FAST_SIZE) {
05787       set_max_fast(value);
05788     }
05789     else
05790       res = 0;
05791     break;
05792 
05793   case M_TRIM_THRESHOLD:
05794     mp_.trim_threshold = value;
05795     mp_.no_dyn_threshold = 1;
05796     break;
05797 
05798   case M_TOP_PAD:
05799     mp_.top_pad = value;
05800     mp_.no_dyn_threshold = 1;
05801     break;
05802 
05803   case M_MMAP_THRESHOLD:
05804 #if USE_ARENAS
05805     /* Forbid setting the threshold too high. */
05806     if((unsigned long)value > HEAP_MAX_SIZE/2)
05807       res = 0;
05808     else
05809 #endif
05810       mp_.mmap_threshold = value;
05811       mp_.no_dyn_threshold = 1;
05812     break;
05813 
05814   case M_MMAP_MAX:
05815 #if !HAVE_MMAP
05816     if (value != 0)
05817       res = 0;
05818     else
05819 #endif
05820       mp_.n_mmaps_max = value;
05821       mp_.no_dyn_threshold = 1;
05822     break;
05823 
05824   case M_CHECK_ACTION:
05825     check_action = value;
05826     break;
05827 
05828   case M_PERTURB:
05829     perturb_byte = value;
05830     break;
05831   }
05832   (void)mutex_unlock(&av->mutex);
05833   return res;
05834 }
05835 
05836 
05837 /*
05838   -------------------- Alternative MORECORE functions --------------------
05839 */
05840 
05841 
05842 /*
05843   General Requirements for MORECORE.
05844 
05845   The MORECORE function must have the following properties:
05846 
05847   If MORECORE_CONTIGUOUS is false:
05848 
05849     * MORECORE must allocate in multiples of pagesize. It will
05850       only be called with arguments that are multiples of pagesize.
05851 
05852     * MORECORE(0) must return an address that is at least
05853       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
05854 
05855   else (i.e. If MORECORE_CONTIGUOUS is true):
05856 
05857     * Consecutive calls to MORECORE with positive arguments
05858       return increasing addresses, indicating that space has been
05859       contiguously extended.
05860 
05861     * MORECORE need not allocate in multiples of pagesize.
05862       Calls to MORECORE need not have args of multiples of pagesize.
05863 
05864     * MORECORE need not page-align.
05865 
05866   In either case:
05867 
05868     * MORECORE may allocate more memory than requested. (Or even less,
05869       but this will generally result in a malloc failure.)
05870 
05871     * MORECORE must not allocate memory when given argument zero, but
05872       instead return one past the end address of memory from previous
05873       nonzero call. This malloc does NOT call MORECORE(0)
05874       until at least one call with positive arguments is made, so
05875       the initial value returned is not important.
05876 
05877     * Even though consecutive calls to MORECORE need not return contiguous
05878       addresses, it must be OK for malloc'ed chunks to span multiple
05879       regions in those cases where they do happen to be contiguous.
05880 
05881     * MORECORE need not handle negative arguments -- it may instead
05882       just return MORECORE_FAILURE when given negative arguments.
05883       Negative arguments are always multiples of pagesize. MORECORE
05884       must not misinterpret negative args as large positive unsigned
05885       args. You can suppress all such calls from even occurring by defining
05886       MORECORE_CANNOT_TRIM,
05887 
05888   There is some variation across systems about the type of the
05889   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
05890   actually be size_t, because sbrk supports negative args, so it is
05891   normally the signed type of the same width as size_t (sometimes
05892   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
05893   matter though. Internally, we use "long" as arguments, which should
05894   work across all reasonable possibilities.
05895 
05896   Additionally, if MORECORE ever returns failure for a positive
05897   request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
05898   system allocator. This is a useful backup strategy for systems with
05899   holes in address spaces -- in this case sbrk cannot contiguously
05900   expand the heap, but mmap may be able to map noncontiguous space.
05901 
05902   If you'd like mmap to ALWAYS be used, you can define MORECORE to be
05903   a function that always returns MORECORE_FAILURE.
05904 
05905   If you are using this malloc with something other than sbrk (or its
05906   emulation) to supply memory regions, you probably want to set
05907   MORECORE_CONTIGUOUS as false.  As an example, here is a custom
05908   allocator kindly contributed for pre-OSX macOS.  It uses virtually
05909   but not necessarily physically contiguous non-paged memory (locked
05910   in, present and won't get swapped out).  You can use it by
05911   uncommenting this section, adding some #includes, and setting up the
05912   appropriate defines above:
05913 
05914       #define MORECORE osMoreCore
05915       #define MORECORE_CONTIGUOUS 0
05916 
05917   There is also a shutdown routine that should somehow be called for
05918   cleanup upon program exit.
05919 
05920   #define MAX_POOL_ENTRIES 100
05921   #define MINIMUM_MORECORE_SIZE  (64 * 1024)
05922   static int next_os_pool;
05923   void *our_os_pools[MAX_POOL_ENTRIES];
05924 
05925   void *osMoreCore(int size)
05926   {
05927     void *ptr = 0;
05928     static void *sbrk_top = 0;
05929 
05930     if (size > 0)
05931     {
05932       if (size < MINIMUM_MORECORE_SIZE)
05933          size = MINIMUM_MORECORE_SIZE;
05934       if (CurrentExecutionLevel() == kTaskLevel)
05935          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
05936       if (ptr == 0)
05937       {
05938         return (void *) MORECORE_FAILURE;
05939       }
05940       // save ptrs so they can be freed during cleanup
05941       our_os_pools[next_os_pool] = ptr;
05942       next_os_pool++;
05943       ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
05944       sbrk_top = (char *) ptr + size;
05945       return ptr;
05946     }
05947     else if (size < 0)
05948     {
05949       // we don't currently support shrink behavior
05950       return (void *) MORECORE_FAILURE;
05951     }
05952     else
05953     {
05954       return sbrk_top;
05955     }
05956   }
05957 
05958   // cleanup any allocated memory pools
05959   // called as last thing before shutting down driver
05960 
05961   void osCleanupMem(void)
05962   {
05963     void **ptr;
05964 
05965     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
05966       if (*ptr)
05967       {
05968          PoolDeallocate(*ptr);
05969          *ptr = 0;
05970       }
05971   }
05972 
05973 */
05974 
05975 
05976 /* Helper code.  */
05977 
05978 extern char **__libc_argv attribute_hidden;
05979 
05980 static void
05981 malloc_printerr(int action, const char *str, void *ptr)
05982 {
05983   if ((action & 5) == 5)
05984     __libc_message (action & 2, "%s\n", str);
05985   else if (action & 1)
05986     {
05987       char buf[2 * sizeof (uintptr_t) + 1];
05988 
05989       buf[sizeof (buf) - 1] = '\0';
05990       char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
05991       while (cp > buf)
05992        *--cp = '0';
05993 
05994       __libc_message (action & 2,
05995                     "*** glibc detected *** %s: %s: 0x%s ***\n",
05996                     __libc_argv[0] ?: "<unknown>", str, cp);
05997     }
05998   else if (action & 2)
05999     abort ();
06000 }
06001 
06002 #ifdef _LIBC
06003 # include <sys/param.h>
06004 
06005 /* We need a wrapper function for one of the additions of POSIX.  */
06006 int
06007 __posix_memalign (void **memptr, size_t alignment, size_t size)
06008 {
06009   void *mem;
06010   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
06011                                    __const __malloc_ptr_t)) =
06012     __memalign_hook;
06013 
06014   /* Test whether the SIZE argument is valid.  It must be a power of
06015      two multiple of sizeof (void *).  */
06016   if (alignment % sizeof (void *) != 0
06017       || !powerof2 (alignment / sizeof (void *)) != 0
06018       || alignment == 0)
06019     return EINVAL;
06020 
06021   /* Call the hook here, so that caller is posix_memalign's caller
06022      and not posix_memalign itself.  */
06023   if (hook != NULL)
06024     mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
06025   else
06026     mem = public_mEMALIGn (alignment, size);
06027 
06028   if (mem != NULL) {
06029     *memptr = mem;
06030     return 0;
06031   }
06032 
06033   return ENOMEM;
06034 }
06035 weak_alias (__posix_memalign, posix_memalign)
06036 
06037 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
06038 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
06039 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
06040 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
06041 strong_alias (__libc_memalign, __memalign)
06042 weak_alias (__libc_memalign, memalign)
06043 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
06044 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
06045 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
06046 strong_alias (__libc_mallinfo, __mallinfo)
06047 weak_alias (__libc_mallinfo, mallinfo)
06048 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
06049 
06050 weak_alias (__malloc_stats, malloc_stats)
06051 weak_alias (__malloc_usable_size, malloc_usable_size)
06052 weak_alias (__malloc_trim, malloc_trim)
06053 weak_alias (__malloc_get_state, malloc_get_state)
06054 weak_alias (__malloc_set_state, malloc_set_state)
06055 
06056 #endif /* _LIBC */
06057 
06058 /* ------------------------------------------------------------
06059 History:
06060 
06061 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
06062 
06063 */
06064 /*
06065  * Local variables:
06066  * c-basic-offset: 2
06067  * End:
06068  */