"Fossies" - the Fresh Open Source Software Archive

Member "glibc-2.28/malloc/malloc.c" (1 Aug 2018, 182618 Bytes) of package /linux/misc/glibc-2.28.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "malloc.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 2.27_vs_2.28.

    1 /* Malloc implementation for multiple threads without lock contention.
    2    Copyright (C) 1996-2018 Free Software Foundation, Inc.
    3    This file is part of the GNU C Library.
    4    Contributed by Wolfram Gloger <wg@malloc.de>
    5    and Doug Lea <dl@cs.oswego.edu>, 2001.
    6 
    7    The GNU C Library is free software; you can redistribute it and/or
    8    modify it under the terms of the GNU Lesser General Public License as
    9    published by the Free Software Foundation; either version 2.1 of the
   10    License, or (at your option) any later version.
   11 
   12    The GNU C Library is distributed in the hope that it will be useful,
   13    but WITHOUT ANY WARRANTY; without even the implied warranty of
   14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   15    Lesser General Public License for more details.
   16 
   17    You should have received a copy of the GNU Lesser General Public
   18    License along with the GNU C Library; see the file COPYING.LIB.  If
   19    not, see <http://www.gnu.org/licenses/>.  */
   20 
   21 /*
   22   This is a version (aka ptmalloc2) of malloc/free/realloc written by
   23   Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
   24 
   25   There have been substantial changes made after the integration into
   26   glibc in all parts of the code.  Do not look for much commonality
   27   with the ptmalloc2 version.
   28 
   29 * Version ptmalloc2-20011215
   30   based on:
   31   VERSION 2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
   32 
   33 * Quickstart
   34 
   35   In order to compile this implementation, a Makefile is provided with
   36   the ptmalloc2 distribution, which has pre-defined targets for some
   37   popular systems (e.g. "make posix" for Posix threads).  All that is
   38   typically required with regard to compiler flags is the selection of
   39   the thread package via defining one out of USE_PTHREADS, USE_THR or
   40   USE_SPROC.  Check the thread-m.h file for what effects this has.
   41   Many/most systems will additionally require USE_TSD_DATA_HACK to be
   42   defined, so this is the default for "make posix".
   43 
   44 * Why use this malloc?
   45 
   46   This is not the fastest, most space-conserving, most portable, or
   47   most tunable malloc ever written. However it is among the fastest
   48   while also being among the most space-conserving, portable and tunable.
   49   Consistent balance across these factors results in a good general-purpose
   50   allocator for malloc-intensive programs.
   51 
   52   The main properties of the algorithms are:
   53   * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
   54     with ties normally decided via FIFO (i.e. least recently used).
   55   * For small (<= 64 bytes by default) requests, it is a caching
   56     allocator, that maintains pools of quickly recycled chunks.
   57   * In between, and for combinations of large and small requests, it does
   58     the best it can trying to meet both goals at once.
   59   * For very large requests (>= 128KB by default), it relies on system
   60     memory mapping facilities, if supported.
   61 
   62   For a longer but slightly out of date high-level description, see
   63      http://gee.cs.oswego.edu/dl/html/malloc.html
   64 
   65   You may already by default be using a C library containing a malloc
   66   that is  based on some version of this malloc (for example in
   67   linux). You might still want to use the one in this file in order to
   68   customize settings or to avoid overheads associated with library
   69   versions.
   70 
   71 * Contents, described in more detail in "description of public routines" below.
   72 
   73   Standard (ANSI/SVID/...)  functions:
   74     malloc(size_t n);
   75     calloc(size_t n_elements, size_t element_size);
   76     free(void* p);
   77     realloc(void* p, size_t n);
   78     memalign(size_t alignment, size_t n);
   79     valloc(size_t n);
   80     mallinfo()
   81     mallopt(int parameter_number, int parameter_value)
   82 
   83   Additional functions:
   84     independent_calloc(size_t n_elements, size_t size, void* chunks[]);
   85     independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   86     pvalloc(size_t n);
   87     malloc_trim(size_t pad);
   88     malloc_usable_size(void* p);
   89     malloc_stats();
   90 
   91 * Vital statistics:
   92 
   93   Supported pointer representation:       4 or 8 bytes
   94   Supported size_t  representation:       4 or 8 bytes
   95        Note that size_t is allowed to be 4 bytes even if pointers are 8.
   96        You can adjust this by defining INTERNAL_SIZE_T
   97 
   98   Alignment:                              2 * sizeof(size_t) (default)
   99        (i.e., 8 byte alignment with 4byte size_t). This suffices for
  100        nearly all current machines and C compilers. However, you can
  101        define MALLOC_ALIGNMENT to be wider than this if necessary.
  102 
  103   Minimum overhead per allocated chunk:   4 or 8 bytes
  104        Each malloced chunk has a hidden word of overhead holding size
  105        and status information.
  106 
  107   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
  108               8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
  109 
  110        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
  111        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
  112        needed; 4 (8) for a trailing size field and 8 (16) bytes for
  113        free list pointers. Thus, the minimum allocatable size is
  114        16/24/32 bytes.
  115 
  116        Even a request for zero bytes (i.e., malloc(0)) returns a
  117        pointer to something of the minimum allocatable size.
  118 
  119        The maximum overhead wastage (i.e., number of extra bytes
  120        allocated than were requested in malloc) is less than or equal
  121        to the minimum size, except for requests >= mmap_threshold that
  122        are serviced via mmap(), where the worst case wastage is 2 *
  123        sizeof(size_t) bytes plus the remainder from a system page (the
  124        minimal mmap unit); typically 4096 or 8192 bytes.
  125 
  126   Maximum allocated size:  4-byte size_t: 2^32 minus about two pages
  127                8-byte size_t: 2^64 minus about two pages
  128 
  129        It is assumed that (possibly signed) size_t values suffice to
  130        represent chunk sizes. `Possibly signed' is due to the fact
  131        that `size_t' may be defined on a system as either a signed or
  132        an unsigned type. The ISO C standard says that it must be
  133        unsigned, but a few systems are known not to adhere to this.
  134        Additionally, even when size_t is unsigned, sbrk (which is by
  135        default used to obtain memory from system) accepts signed
  136        arguments, and may not be able to handle size_t-wide arguments
  137        with negative sign bit.  Generally, values that would
  138        appear as negative after accounting for overhead and alignment
  139        are supported only via mmap(), which does not have this
  140        limitation.
  141 
  142        Requests for sizes outside the allowed range will perform an optional
  143        failure action and then return null. (Requests may also
  144        also fail because a system is out of memory.)
  145 
  146   Thread-safety: thread-safe
  147 
  148   Compliance: I believe it is compliant with the 1997 Single Unix Specification
  149        Also SVID/XPG, ANSI C, and probably others as well.
  150 
  151 * Synopsis of compile-time options:
  152 
  153     People have reported using previous versions of this malloc on all
  154     versions of Unix, sometimes by tweaking some of the defines
  155     below. It has been tested most extensively on Solaris and Linux.
  156     People also report using it in stand-alone embedded systems.
  157 
  158     The implementation is in straight, hand-tuned ANSI C.  It is not
  159     at all modular. (Sorry!)  It uses a lot of macros.  To be at all
  160     usable, this code should be compiled using an optimizing compiler
  161     (for example gcc -O3) that can simplify expressions and control
  162     paths. (FAQ: some macros import variables as arguments rather than
  163     declare locals because people reported that some debuggers
  164     otherwise get confused.)
  165 
  166     OPTION                     DEFAULT VALUE
  167 
  168     Compilation Environment options:
  169 
  170     HAVE_MREMAP                0
  171 
  172     Changing default word sizes:
  173 
  174     INTERNAL_SIZE_T            size_t
  175 
  176     Configuration and functionality options:
  177 
  178     USE_PUBLIC_MALLOC_WRAPPERS NOT defined
  179     USE_MALLOC_LOCK            NOT defined
  180     MALLOC_DEBUG               NOT defined
  181     REALLOC_ZERO_BYTES_FREES   1
  182     TRIM_FASTBINS              0
  183 
  184     Options for customizing MORECORE:
  185 
  186     MORECORE                   sbrk
  187     MORECORE_FAILURE           -1
  188     MORECORE_CONTIGUOUS        1
  189     MORECORE_CANNOT_TRIM       NOT defined
  190     MORECORE_CLEARS            1
  191     MMAP_AS_MORECORE_SIZE      (1024 * 1024)
  192 
  193     Tuning options that are also dynamically changeable via mallopt:
  194 
  195     DEFAULT_MXFAST             64 (for 32bit), 128 (for 64bit)
  196     DEFAULT_TRIM_THRESHOLD     128 * 1024
  197     DEFAULT_TOP_PAD            0
  198     DEFAULT_MMAP_THRESHOLD     128 * 1024
  199     DEFAULT_MMAP_MAX           65536
  200 
  201     There are several other #defined constants and macros that you
  202     probably don't want to touch unless you are extending or adapting malloc.  */
  203 
  204 /*
  205   void* is the pointer type that malloc should say it returns
  206 */
  207 
  208 #ifndef void
  209 #define void      void
  210 #endif /*void*/
  211 
  212 #include <stddef.h>   /* for size_t */
  213 #include <stdlib.h>   /* for getenv(), abort() */
  214 #include <unistd.h>   /* for __libc_enable_secure */
  215 
  216 #include <atomic.h>
  217 #include <_itoa.h>
  218 #include <bits/wordsize.h>
  219 #include <sys/sysinfo.h>
  220 
  221 #include <ldsodefs.h>
  222 
  223 #include <unistd.h>
  224 #include <stdio.h>    /* needed for malloc_stats */
  225 #include <errno.h>
  226 #include <assert.h>
  227 
  228 #include <shlib-compat.h>
  229 
  230 /* For uintptr_t.  */
  231 #include <stdint.h>
  232 
  233 /* For va_arg, va_start, va_end.  */
  234 #include <stdarg.h>
  235 
  236 /* For MIN, MAX, powerof2.  */
  237 #include <sys/param.h>
  238 
  239 /* For ALIGN_UP et. al.  */
  240 #include <libc-pointer-arith.h>
  241 
  242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al.  */
  243 #include <libc-diag.h>
  244 
  245 #include <malloc/malloc-internal.h>
  246 
  247 /* For SINGLE_THREAD_P.  */
  248 #include <sysdep-cancel.h>
  249 
  250 /*
  251   Debugging:
  252 
  253   Because freed chunks may be overwritten with bookkeeping fields, this
  254   malloc will often die when freed memory is overwritten by user
  255   programs.  This can be very effective (albeit in an annoying way)
  256   in helping track down dangling pointers.
  257 
  258   If you compile with -DMALLOC_DEBUG, a number of assertion checks are
  259   enabled that will catch more memory errors. You probably won't be
  260   able to make much sense of the actual assertion errors, but they
  261   should help you locate incorrectly overwritten memory.  The checking
  262   is fairly extensive, and will slow down execution
  263   noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
  264   will attempt to check every non-mmapped allocated and free chunk in
  265   the course of computing the summmaries. (By nature, mmapped regions
  266   cannot be checked very much automatically.)
  267 
  268   Setting MALLOC_DEBUG may also be helpful if you are trying to modify
  269   this code. The assertions in the check routines spell out in more
  270   detail the assumptions and invariants underlying the algorithms.
  271 
  272   Setting MALLOC_DEBUG does NOT provide an automated mechanism for
  273   checking that all accesses to malloced memory stay within their
  274   bounds. However, there are several add-ons and adaptations of this
  275   or other mallocs available that do this.
  276 */
  277 
  278 #ifndef MALLOC_DEBUG
  279 #define MALLOC_DEBUG 0
  280 #endif
  281 
  282 #ifndef NDEBUG
  283 # define __assert_fail(assertion, file, line, function)         \
  284      __malloc_assert(assertion, file, line, function)
  285 
  286 extern const char *__progname;
  287 
  288 static void
  289 __malloc_assert (const char *assertion, const char *file, unsigned int line,
  290          const char *function)
  291 {
  292   (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
  293              __progname, __progname[0] ? ": " : "",
  294              file, line,
  295              function ? function : "", function ? ": " : "",
  296              assertion);
  297   fflush (stderr);
  298   abort ();
  299 }
  300 #endif
  301 
  302 #if USE_TCACHE
  303 /* We want 64 entries.  This is an arbitrary limit, which tunables can reduce.  */
  304 # define TCACHE_MAX_BINS        64
  305 # define MAX_TCACHE_SIZE    tidx2usize (TCACHE_MAX_BINS-1)
  306 
  307 /* Only used to pre-fill the tunables.  */
  308 # define tidx2usize(idx)    (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
  309 
  310 /* When "x" is from chunksize().  */
  311 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
  312 /* When "x" is a user-provided size.  */
  313 # define usize2tidx(x) csize2tidx (request2size (x))
  314 
  315 /* With rounding and alignment, the bins are...
  316    idx 0   bytes 0..24 (64-bit) or 0..12 (32-bit)
  317    idx 1   bytes 25..40 or 13..20
  318    idx 2   bytes 41..56 or 21..28
  319    etc.  */
  320 
  321 /* This is another arbitrary limit, which tunables can change.  Each
  322    tcache bin will hold at most this number of chunks.  */
  323 # define TCACHE_FILL_COUNT 7
  324 #endif
  325 
  326 
  327 /*
  328   REALLOC_ZERO_BYTES_FREES should be set if a call to
  329   realloc with zero bytes should be the same as a call to free.
  330   This is required by the C standard. Otherwise, since this malloc
  331   returns a unique pointer for malloc(0), so does realloc(p, 0).
  332 */
  333 
  334 #ifndef REALLOC_ZERO_BYTES_FREES
  335 #define REALLOC_ZERO_BYTES_FREES 1
  336 #endif
  337 
  338 /*
  339   TRIM_FASTBINS controls whether free() of a very small chunk can
  340   immediately lead to trimming. Setting to true (1) can reduce memory
  341   footprint, but will almost always slow down programs that use a lot
  342   of small chunks.
  343 
  344   Define this only if you are willing to give up some speed to more
  345   aggressively reduce system-level memory footprint when releasing
  346   memory in programs that use many small chunks.  You can get
  347   essentially the same effect by setting MXFAST to 0, but this can
  348   lead to even greater slowdowns in programs using many small chunks.
  349   TRIM_FASTBINS is an in-between compile-time option, that disables
  350   only those chunks bordering topmost memory from being placed in
  351   fastbins.
  352 */
  353 
  354 #ifndef TRIM_FASTBINS
  355 #define TRIM_FASTBINS  0
  356 #endif
  357 
  358 
  359 /* Definition for getting more memory from the OS.  */
  360 #define MORECORE         (*__morecore)
  361 #define MORECORE_FAILURE 0
  362 void * __default_morecore (ptrdiff_t);
  363 void *(*__morecore)(ptrdiff_t) = __default_morecore;
  364 
  365 
  366 #include <string.h>
  367 
  368 /*
  369   MORECORE-related declarations. By default, rely on sbrk
  370 */
  371 
  372 
  373 /*
  374   MORECORE is the name of the routine to call to obtain more memory
  375   from the system.  See below for general guidance on writing
  376   alternative MORECORE functions, as well as a version for WIN32 and a
  377   sample version for pre-OSX macos.
  378 */
  379 
  380 #ifndef MORECORE
  381 #define MORECORE sbrk
  382 #endif
  383 
  384 /*
  385   MORECORE_FAILURE is the value returned upon failure of MORECORE
  386   as well as mmap. Since it cannot be an otherwise valid memory address,
  387   and must reflect values of standard sys calls, you probably ought not
  388   try to redefine it.
  389 */
  390 
  391 #ifndef MORECORE_FAILURE
  392 #define MORECORE_FAILURE (-1)
  393 #endif
  394 
  395 /*
  396   If MORECORE_CONTIGUOUS is true, take advantage of fact that
  397   consecutive calls to MORECORE with positive arguments always return
  398   contiguous increasing addresses.  This is true of unix sbrk.  Even
  399   if not defined, when regions happen to be contiguous, malloc will
  400   permit allocations spanning regions obtained from different
  401   calls. But defining this when applicable enables some stronger
  402   consistency checks and space efficiencies.
  403 */
  404 
  405 #ifndef MORECORE_CONTIGUOUS
  406 #define MORECORE_CONTIGUOUS 1
  407 #endif
  408 
  409 /*
  410   Define MORECORE_CANNOT_TRIM if your version of MORECORE
  411   cannot release space back to the system when given negative
  412   arguments. This is generally necessary only if you are using
  413   a hand-crafted MORECORE function that cannot handle negative arguments.
  414 */
  415 
  416 /* #define MORECORE_CANNOT_TRIM */
  417 
  418 /*  MORECORE_CLEARS           (default 1)
  419      The degree to which the routine mapped to MORECORE zeroes out
  420      memory: never (0), only for newly allocated space (1) or always
  421      (2).  The distinction between (1) and (2) is necessary because on
  422      some systems, if the application first decrements and then
  423      increments the break value, the contents of the reallocated space
  424      are unspecified.
  425  */
  426 
  427 #ifndef MORECORE_CLEARS
  428 # define MORECORE_CLEARS 1
  429 #endif
  430 
  431 
  432 /*
  433    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
  434    sbrk fails, and mmap is used as a backup.  The value must be a
  435    multiple of page size.  This backup strategy generally applies only
  436    when systems have "holes" in address space, so sbrk cannot perform
  437    contiguous expansion, but there is still space available on system.
  438    On systems for which this is known to be useful (i.e. most linux
  439    kernels), this occurs only when programs allocate huge amounts of
  440    memory.  Between this, and the fact that mmap regions tend to be
  441    limited, the size should be large, to avoid too many mmap calls and
  442    thus avoid running out of kernel resources.  */
  443 
  444 #ifndef MMAP_AS_MORECORE_SIZE
  445 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
  446 #endif
  447 
  448 /*
  449   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
  450   large blocks.
  451 */
  452 
  453 #ifndef HAVE_MREMAP
  454 #define HAVE_MREMAP 0
  455 #endif
  456 
  457 /* We may need to support __malloc_initialize_hook for backwards
  458    compatibility.  */
  459 
  460 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
  461 # define HAVE_MALLOC_INIT_HOOK 1
  462 #else
  463 # define HAVE_MALLOC_INIT_HOOK 0
  464 #endif
  465 
  466 
  467 /*
  468   This version of malloc supports the standard SVID/XPG mallinfo
  469   routine that returns a struct containing usage properties and
  470   statistics. It should work on any SVID/XPG compliant system that has
  471   a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
  472   install such a thing yourself, cut out the preliminary declarations
  473   as described above and below and save them in a malloc.h file. But
  474   there's no compelling reason to bother to do this.)
  475 
  476   The main declaration needed is the mallinfo struct that is returned
  477   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
  478   bunch of fields that are not even meaningful in this version of
  479   malloc.  These fields are are instead filled by mallinfo() with
  480   other numbers that might be of interest.
  481 */
  482 
  483 
  484 /* ---------- description of public routines ------------ */
  485 
  486 /*
  487   malloc(size_t n)
  488   Returns a pointer to a newly allocated chunk of at least n bytes, or null
  489   if no space is available. Additionally, on failure, errno is
  490   set to ENOMEM on ANSI C systems.
  491 
  492   If n is zero, malloc returns a minumum-sized chunk. (The minimum
  493   size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
  494   systems.)  On most systems, size_t is an unsigned type, so calls
  495   with negative arguments are interpreted as requests for huge amounts
  496   of space, which will often fail. The maximum supported value of n
  497   differs across systems, but is in all cases less than the maximum
  498   representable value of a size_t.
  499 */
  500 void*  __libc_malloc(size_t);
  501 libc_hidden_proto (__libc_malloc)
  502 
  503 /*
  504   free(void* p)
  505   Releases the chunk of memory pointed to by p, that had been previously
  506   allocated using malloc or a related routine such as realloc.
  507   It has no effect if p is null. It can have arbitrary (i.e., bad!)
  508   effects if p has already been freed.
  509 
  510   Unless disabled (using mallopt), freeing very large spaces will
  511   when possible, automatically trigger operations that give
  512   back unused memory to the system, thus reducing program footprint.
  513 */
  514 void     __libc_free(void*);
  515 libc_hidden_proto (__libc_free)
  516 
  517 /*
  518   calloc(size_t n_elements, size_t element_size);
  519   Returns a pointer to n_elements * element_size bytes, with all locations
  520   set to zero.
  521 */
  522 void*  __libc_calloc(size_t, size_t);
  523 
  524 /*
  525   realloc(void* p, size_t n)
  526   Returns a pointer to a chunk of size n that contains the same data
  527   as does chunk p up to the minimum of (n, p's size) bytes, or null
  528   if no space is available.
  529 
  530   The returned pointer may or may not be the same as p. The algorithm
  531   prefers extending p when possible, otherwise it employs the
  532   equivalent of a malloc-copy-free sequence.
  533 
  534   If p is null, realloc is equivalent to malloc.
  535 
  536   If space is not available, realloc returns null, errno is set (if on
  537   ANSI) and p is NOT freed.
  538 
  539   if n is for fewer bytes than already held by p, the newly unused
  540   space is lopped off and freed if possible.  Unless the #define
  541   REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
  542   zero (re)allocates a minimum-sized chunk.
  543 
  544   Large chunks that were internally obtained via mmap will always be
  545   grown using malloc-copy-free sequences unless the system supports
  546   MREMAP (currently only linux).
  547 
  548   The old unix realloc convention of allowing the last-free'd chunk
  549   to be used as an argument to realloc is not supported.
  550 */
  551 void*  __libc_realloc(void*, size_t);
  552 libc_hidden_proto (__libc_realloc)
  553 
  554 /*
  555   memalign(size_t alignment, size_t n);
  556   Returns a pointer to a newly allocated chunk of n bytes, aligned
  557   in accord with the alignment argument.
  558 
  559   The alignment argument should be a power of two. If the argument is
  560   not a power of two, the nearest greater power is used.
  561   8-byte alignment is guaranteed by normal malloc calls, so don't
  562   bother calling memalign with an argument of 8 or less.
  563 
  564   Overreliance on memalign is a sure way to fragment space.
  565 */
  566 void*  __libc_memalign(size_t, size_t);
  567 libc_hidden_proto (__libc_memalign)
  568 
  569 /*
  570   valloc(size_t n);
  571   Equivalent to memalign(pagesize, n), where pagesize is the page
  572   size of the system. If the pagesize is unknown, 4096 is used.
  573 */
  574 void*  __libc_valloc(size_t);
  575 
  576 
  577 
  578 /*
  579   mallopt(int parameter_number, int parameter_value)
  580   Sets tunable parameters The format is to provide a
  581   (parameter-number, parameter-value) pair.  mallopt then sets the
  582   corresponding parameter to the argument value if it can (i.e., so
  583   long as the value is meaningful), and returns 1 if successful else
  584   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
  585   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
  586   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  587   so setting them has no effect. But this malloc also supports four
  588   other options in mallopt. See below for details.  Briefly, supported
  589   parameters are as follows (listed defaults are for "typical"
  590   configurations).
  591 
  592   Symbol            param #   default    allowed param values
  593   M_MXFAST          1         64         0-80  (0 disables fastbins)
  594   M_TRIM_THRESHOLD -1         128*1024   any   (-1U disables trimming)
  595   M_TOP_PAD        -2         0          any
  596   M_MMAP_THRESHOLD -3         128*1024   any   (or 0 if no MMAP support)
  597   M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
  598 */
  599 int      __libc_mallopt(int, int);
  600 libc_hidden_proto (__libc_mallopt)
  601 
  602 
  603 /*
  604   mallinfo()
  605   Returns (by copy) a struct containing various summary statistics:
  606 
  607   arena:     current total non-mmapped bytes allocated from system
  608   ordblks:   the number of free chunks
  609   smblks:    the number of fastbin blocks (i.e., small chunks that
  610            have been freed but not use resused or consolidated)
  611   hblks:     current number of mmapped regions
  612   hblkhd:    total bytes held in mmapped regions
  613   usmblks:   always 0
  614   fsmblks:   total bytes held in fastbin blocks
  615   uordblks:  current total allocated space (normal or mmapped)
  616   fordblks:  total free space
  617   keepcost:  the maximum number of bytes that could ideally be released
  618            back to system via malloc_trim. ("ideally" means that
  619            it ignores page restrictions etc.)
  620 
  621   Because these fields are ints, but internal bookkeeping may
  622   be kept as longs, the reported values may wrap around zero and
  623   thus be inaccurate.
  624 */
  625 struct mallinfo __libc_mallinfo(void);
  626 
  627 
  628 /*
  629   pvalloc(size_t n);
  630   Equivalent to valloc(minimum-page-that-holds(n)), that is,
  631   round up n to nearest pagesize.
  632  */
  633 void*  __libc_pvalloc(size_t);
  634 
  635 /*
  636   malloc_trim(size_t pad);
  637 
  638   If possible, gives memory back to the system (via negative
  639   arguments to sbrk) if there is unused memory at the `high' end of
  640   the malloc pool. You can call this after freeing large blocks of
  641   memory to potentially reduce the system-level memory requirements
  642   of a program. However, it cannot guarantee to reduce memory. Under
  643   some allocation patterns, some large free blocks of memory will be
  644   locked between two used chunks, so they cannot be given back to
  645   the system.
  646 
  647   The `pad' argument to malloc_trim represents the amount of free
  648   trailing space to leave untrimmed. If this argument is zero,
  649   only the minimum amount of memory to maintain internal data
  650   structures will be left (one page or less). Non-zero arguments
  651   can be supplied to maintain enough trailing space to service
  652   future expected allocations without having to re-obtain memory
  653   from the system.
  654 
  655   Malloc_trim returns 1 if it actually released any memory, else 0.
  656   On systems that do not support "negative sbrks", it will always
  657   return 0.
  658 */
  659 int      __malloc_trim(size_t);
  660 
  661 /*
  662   malloc_usable_size(void* p);
  663 
  664   Returns the number of bytes you can actually use in
  665   an allocated chunk, which may be more than you requested (although
  666   often not) due to alignment and minimum size constraints.
  667   You can use this many bytes without worrying about
  668   overwriting other allocated objects. This is not a particularly great
  669   programming practice. malloc_usable_size can be more useful in
  670   debugging and assertions, for example:
  671 
  672   p = malloc(n);
  673   assert(malloc_usable_size(p) >= 256);
  674 
  675 */
  676 size_t   __malloc_usable_size(void*);
  677 
  678 /*
  679   malloc_stats();
  680   Prints on stderr the amount of space obtained from the system (both
  681   via sbrk and mmap), the maximum amount (which may be more than
  682   current if malloc_trim and/or munmap got called), and the current
  683   number of bytes allocated via malloc (or realloc, etc) but not yet
  684   freed. Note that this is the number of bytes allocated, not the
  685   number requested. It will be larger than the number requested
  686   because of alignment and bookkeeping overhead. Because it includes
  687   alignment wastage as being in use, this figure may be greater than
  688   zero even when no user-level chunks are allocated.
  689 
  690   The reported current and maximum system memory can be inaccurate if
  691   a program makes other calls to system memory allocation functions
  692   (normally sbrk) outside of malloc.
  693 
  694   malloc_stats prints only the most commonly interesting statistics.
  695   More information can be obtained by calling mallinfo.
  696 
  697 */
  698 void     __malloc_stats(void);
  699 
  700 /*
  701   posix_memalign(void **memptr, size_t alignment, size_t size);
  702 
  703   POSIX wrapper like memalign(), checking for validity of size.
  704 */
  705 int      __posix_memalign(void **, size_t, size_t);
  706 
  707 /* mallopt tuning options */
  708 
  709 /*
  710   M_MXFAST is the maximum request size used for "fastbins", special bins
  711   that hold returned chunks without consolidating their spaces. This
  712   enables future requests for chunks of the same size to be handled
  713   very quickly, but can increase fragmentation, and thus increase the
  714   overall memory footprint of a program.
  715 
  716   This malloc manages fastbins very conservatively yet still
  717   efficiently, so fragmentation is rarely a problem for values less
  718   than or equal to the default.  The maximum supported value of MXFAST
  719   is 80. You wouldn't want it any higher than this anyway.  Fastbins
  720   are designed especially for use with many small structs, objects or
  721   strings -- the default handles structs/objects/arrays with sizes up
  722   to 8 4byte fields, or small strings representing words, tokens,
  723   etc. Using fastbins for larger objects normally worsens
  724   fragmentation without improving speed.
  725 
  726   M_MXFAST is set in REQUEST size units. It is internally used in
  727   chunksize units, which adds padding and alignment.  You can reduce
  728   M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
  729   algorithm to be a closer approximation of fifo-best-fit in all cases,
  730   not just for larger requests, but will generally cause it to be
  731   slower.
  732 */
  733 
  734 
  735 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
  736 #ifndef M_MXFAST
  737 #define M_MXFAST            1
  738 #endif
  739 
  740 #ifndef DEFAULT_MXFAST
  741 #define DEFAULT_MXFAST     (64 * SIZE_SZ / 4)
  742 #endif
  743 
  744 
  745 /*
  746   M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
  747   to keep before releasing via malloc_trim in free().
  748 
  749   Automatic trimming is mainly useful in long-lived programs.
  750   Because trimming via sbrk can be slow on some systems, and can
  751   sometimes be wasteful (in cases where programs immediately
  752   afterward allocate more large chunks) the value should be high
  753   enough so that your overall system performance would improve by
  754   releasing this much memory.
  755 
  756   The trim threshold and the mmap control parameters (see below)
  757   can be traded off with one another. Trimming and mmapping are
  758   two different ways of releasing unused memory back to the
  759   system. Between these two, it is often possible to keep
  760   system-level demands of a long-lived program down to a bare
  761   minimum. For example, in one test suite of sessions measuring
  762   the XF86 X server on Linux, using a trim threshold of 128K and a
  763   mmap threshold of 192K led to near-minimal long term resource
  764   consumption.
  765 
  766   If you are using this malloc in a long-lived program, it should
  767   pay to experiment with these values.  As a rough guide, you
  768   might set to a value close to the average size of a process
  769   (program) running on your system.  Releasing this much memory
  770   would allow such a process to run in memory.  Generally, it's
  771   worth it to tune for trimming rather tham memory mapping when a
  772   program undergoes phases where several large chunks are
  773   allocated and released in ways that can reuse each other's
  774   storage, perhaps mixed with phases where there are no such
  775   chunks at all.  And in well-behaved long-lived programs,
  776   controlling release of large blocks via trimming versus mapping
  777   is usually faster.
  778 
  779   However, in most programs, these parameters serve mainly as
  780   protection against the system-level effects of carrying around
  781   massive amounts of unneeded memory. Since frequent calls to
  782   sbrk, mmap, and munmap otherwise degrade performance, the default
  783   parameters are set to relatively high values that serve only as
  784   safeguards.
  785 
  786   The trim value It must be greater than page size to have any useful
  787   effect.  To disable trimming completely, you can set to
  788   (unsigned long)(-1)
  789 
  790   Trim settings interact with fastbin (MXFAST) settings: Unless
  791   TRIM_FASTBINS is defined, automatic trimming never takes place upon
  792   freeing a chunk with size less than or equal to MXFAST. Trimming is
  793   instead delayed until subsequent freeing of larger chunks. However,
  794   you can still force an attempted trim by calling malloc_trim.
  795 
  796   Also, trimming is not generally possible in cases where
  797   the main arena is obtained via mmap.
  798 
  799   Note that the trick some people use of mallocing a huge space and
  800   then freeing it at program startup, in an attempt to reserve system
  801   memory, doesn't have the intended effect under automatic trimming,
  802   since that memory will immediately be returned to the system.
  803 */
  804 
  805 #define M_TRIM_THRESHOLD       -1
  806 
  807 #ifndef DEFAULT_TRIM_THRESHOLD
  808 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
  809 #endif
  810 
  811 /*
  812   M_TOP_PAD is the amount of extra `padding' space to allocate or
  813   retain whenever sbrk is called. It is used in two ways internally:
  814 
  815   * When sbrk is called to extend the top of the arena to satisfy
  816   a new malloc request, this much padding is added to the sbrk
  817   request.
  818 
  819   * When malloc_trim is called automatically from free(),
  820   it is used as the `pad' argument.
  821 
  822   In both cases, the actual amount of padding is rounded
  823   so that the end of the arena is always a system page boundary.
  824 
  825   The main reason for using padding is to avoid calling sbrk so
  826   often. Having even a small pad greatly reduces the likelihood
  827   that nearly every malloc request during program start-up (or
  828   after trimming) will invoke sbrk, which needlessly wastes
  829   time.
  830 
  831   Automatic rounding-up to page-size units is normally sufficient
  832   to avoid measurable overhead, so the default is 0.  However, in
  833   systems where sbrk is relatively slow, it can pay to increase
  834   this value, at the expense of carrying around more memory than
  835   the program needs.
  836 */
  837 
  838 #define M_TOP_PAD              -2
  839 
  840 #ifndef DEFAULT_TOP_PAD
  841 #define DEFAULT_TOP_PAD        (0)
  842 #endif
  843 
  844 /*
  845   MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
  846   adjusted MMAP_THRESHOLD.
  847 */
  848 
  849 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
  850 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
  851 #endif
  852 
  853 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
  854   /* For 32-bit platforms we cannot increase the maximum mmap
  855      threshold much because it is also the minimum value for the
  856      maximum heap size and its alignment.  Going above 512k (i.e., 1M
  857      for new heaps) wastes too much address space.  */
  858 # if __WORDSIZE == 32
  859 #  define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
  860 # else
  861 #  define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
  862 # endif
  863 #endif
  864 
  865 /*
  866   M_MMAP_THRESHOLD is the request size threshold for using mmap()
  867   to service a request. Requests of at least this size that cannot
  868   be allocated using already-existing space will be serviced via mmap.
  869   (If enough normal freed space already exists it is used instead.)
  870 
  871   Using mmap segregates relatively large chunks of memory so that
  872   they can be individually obtained and released from the host
  873   system. A request serviced through mmap is never reused by any
  874   other request (at least not directly; the system may just so
  875   happen to remap successive requests to the same locations).
  876 
  877   Segregating space in this way has the benefits that:
  878 
  879    1. Mmapped space can ALWAYS be individually released back
  880       to the system, which helps keep the system level memory
  881       demands of a long-lived program low.
  882    2. Mapped memory can never become `locked' between
  883       other chunks, as can happen with normally allocated chunks, which
  884       means that even trimming via malloc_trim would not release them.
  885    3. On some systems with "holes" in address spaces, mmap can obtain
  886       memory that sbrk cannot.
  887 
  888   However, it has the disadvantages that:
  889 
  890    1. The space cannot be reclaimed, consolidated, and then
  891       used to service later requests, as happens with normal chunks.
  892    2. It can lead to more wastage because of mmap page alignment
  893       requirements
  894    3. It causes malloc performance to be more dependent on host
  895       system memory management support routines which may vary in
  896       implementation quality and may impose arbitrary
  897       limitations. Generally, servicing a request via normal
  898       malloc steps is faster than going through a system's mmap.
  899 
  900   The advantages of mmap nearly always outweigh disadvantages for
  901   "large" chunks, but the value of "large" varies across systems.  The
  902   default is an empirically derived value that works well in most
  903   systems.
  904 
  905 
  906   Update in 2006:
  907   The above was written in 2001. Since then the world has changed a lot.
  908   Memory got bigger. Applications got bigger. The virtual address space
  909   layout in 32 bit linux changed.
  910 
  911   In the new situation, brk() and mmap space is shared and there are no
  912   artificial limits on brk size imposed by the kernel. What is more,
  913   applications have started using transient allocations larger than the
  914   128Kb as was imagined in 2001.
  915 
  916   The price for mmap is also high now; each time glibc mmaps from the
  917   kernel, the kernel is forced to zero out the memory it gives to the
  918   application. Zeroing memory is expensive and eats a lot of cache and
  919   memory bandwidth. This has nothing to do with the efficiency of the
  920   virtual memory system, by doing mmap the kernel just has no choice but
  921   to zero.
  922 
  923   In 2001, the kernel had a maximum size for brk() which was about 800
  924   megabytes on 32 bit x86, at that point brk() would hit the first
  925   mmaped shared libaries and couldn't expand anymore. With current 2.6
  926   kernels, the VA space layout is different and brk() and mmap
  927   both can span the entire heap at will.
  928 
  929   Rather than using a static threshold for the brk/mmap tradeoff,
  930   we are now using a simple dynamic one. The goal is still to avoid
  931   fragmentation. The old goals we kept are
  932   1) try to get the long lived large allocations to use mmap()
  933   2) really large allocations should always use mmap()
  934   and we're adding now:
  935   3) transient allocations should use brk() to avoid forcing the kernel
  936      having to zero memory over and over again
  937 
  938   The implementation works with a sliding threshold, which is by default
  939   limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
  940   out at 128Kb as per the 2001 default.
  941 
  942   This allows us to satisfy requirement 1) under the assumption that long
  943   lived allocations are made early in the process' lifespan, before it has
  944   started doing dynamic allocations of the same size (which will
  945   increase the threshold).
  946 
  947   The upperbound on the threshold satisfies requirement 2)
  948 
  949   The threshold goes up in value when the application frees memory that was
  950   allocated with the mmap allocator. The idea is that once the application
  951   starts freeing memory of a certain size, it's highly probable that this is
  952   a size the application uses for transient allocations. This estimator
  953   is there to satisfy the new third requirement.
  954 
  955 */
  956 
  957 #define M_MMAP_THRESHOLD      -3
  958 
  959 #ifndef DEFAULT_MMAP_THRESHOLD
  960 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
  961 #endif
  962 
  963 /*
  964   M_MMAP_MAX is the maximum number of requests to simultaneously
  965   service using mmap. This parameter exists because
  966   some systems have a limited number of internal tables for
  967   use by mmap, and using more than a few of them may degrade
  968   performance.
  969 
  970   The default is set to a value that serves only as a safeguard.
  971   Setting to 0 disables use of mmap for servicing large requests.
  972 */
  973 
  974 #define M_MMAP_MAX             -4
  975 
  976 #ifndef DEFAULT_MMAP_MAX
  977 #define DEFAULT_MMAP_MAX       (65536)
  978 #endif
  979 
  980 #include <malloc.h>
  981 
  982 #ifndef RETURN_ADDRESS
  983 #define RETURN_ADDRESS(X_) (NULL)
  984 #endif
  985 
  986 /* Forward declarations.  */
  987 struct malloc_chunk;
  988 typedef struct malloc_chunk* mchunkptr;
  989 
  990 /* Internal routines.  */
  991 
  992 static void*  _int_malloc(mstate, size_t);
  993 static void     _int_free(mstate, mchunkptr, int);
  994 static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
  995                INTERNAL_SIZE_T);
  996 static void*  _int_memalign(mstate, size_t, size_t);
  997 static void*  _mid_memalign(size_t, size_t, void *);
  998 
  999 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
 1000 
 1001 static void* mem2mem_check(void *p, size_t sz);
 1002 static void top_check(void);
 1003 static void munmap_chunk(mchunkptr p);
 1004 #if HAVE_MREMAP
 1005 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
 1006 #endif
 1007 
 1008 static void*   malloc_check(size_t sz, const void *caller);
 1009 static void      free_check(void* mem, const void *caller);
 1010 static void*   realloc_check(void* oldmem, size_t bytes,
 1011                    const void *caller);
 1012 static void*   memalign_check(size_t alignment, size_t bytes,
 1013                 const void *caller);
 1014 
 1015 /* ------------------ MMAP support ------------------  */
 1016 
 1017 
 1018 #include <fcntl.h>
 1019 #include <sys/mman.h>
 1020 
 1021 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
 1022 # define MAP_ANONYMOUS MAP_ANON
 1023 #endif
 1024 
 1025 #ifndef MAP_NORESERVE
 1026 # define MAP_NORESERVE 0
 1027 #endif
 1028 
 1029 #define MMAP(addr, size, prot, flags) \
 1030  __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
 1031 
 1032 
 1033 /*
 1034   -----------------------  Chunk representations -----------------------
 1035 */
 1036 
 1037 
 1038 /*
 1039   This struct declaration is misleading (but accurate and necessary).
 1040   It declares a "view" into memory allowing access to necessary
 1041   fields at known offsets from a given base. See explanation below.
 1042 */
 1043 
 1044 struct malloc_chunk {
 1045 
 1046   INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */
 1047   INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */
 1048 
 1049   struct malloc_chunk* fd;         /* double links -- used only if free. */
 1050   struct malloc_chunk* bk;
 1051 
 1052   /* Only used for large blocks: pointer to next larger size.  */
 1053   struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
 1054   struct malloc_chunk* bk_nextsize;
 1055 };
 1056 
 1057 
 1058 /*
 1059    malloc_chunk details:
 1060 
 1061     (The following includes lightly edited explanations by Colin Plumb.)
 1062 
 1063     Chunks of memory are maintained using a `boundary tag' method as
 1064     described in e.g., Knuth or Standish.  (See the paper by Paul
 1065     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
 1066     survey of such techniques.)  Sizes of free chunks are stored both
 1067     in the front of each chunk and at the end.  This makes
 1068     consolidating fragmented chunks into bigger chunks very fast.  The
 1069     size fields also hold bits representing whether chunks are free or
 1070     in use.
 1071 
 1072     An allocated chunk looks like this:
 1073 
 1074 
 1075     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1076         |             Size of previous chunk, if unallocated (P clear)  |
 1077         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1078         |             Size of chunk, in bytes                     |A|M|P|
 1079       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1080         |             User data starts here...                          .
 1081         .                                                               .
 1082         .             (malloc_usable_size() bytes)                      .
 1083         .                                                               |
 1084 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1085         |             (size of chunk, but used for application data)    |
 1086         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1087         |             Size of next chunk, in bytes                |A|0|1|
 1088         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1089 
 1090     Where "chunk" is the front of the chunk for the purpose of most of
 1091     the malloc code, but "mem" is the pointer that is returned to the
 1092     user.  "Nextchunk" is the beginning of the next contiguous chunk.
 1093 
 1094     Chunks always begin on even word boundaries, so the mem portion
 1095     (which is returned to the user) is also on an even word boundary, and
 1096     thus at least double-word aligned.
 1097 
 1098     Free chunks are stored in circular doubly-linked lists, and look like this:
 1099 
 1100     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1101         |             Size of previous chunk, if unallocated (P clear)  |
 1102         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1103     `head:' |             Size of chunk, in bytes                     |A|0|P|
 1104       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1105         |             Forward pointer to next chunk in list             |
 1106         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1107         |             Back pointer to previous chunk in list            |
 1108         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1109         |             Unused space (may be 0 bytes long)                .
 1110         .                                                               .
 1111         .                                                               |
 1112 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1113     `foot:' |             Size of chunk, in bytes                           |
 1114         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1115         |             Size of next chunk, in bytes                |A|0|0|
 1116         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1117 
 1118     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
 1119     chunk size (which is always a multiple of two words), is an in-use
 1120     bit for the *previous* chunk.  If that bit is *clear*, then the
 1121     word before the current chunk size contains the previous chunk
 1122     size, and can be used to find the front of the previous chunk.
 1123     The very first chunk allocated always has this bit set,
 1124     preventing access to non-existent (or non-owned) memory. If
 1125     prev_inuse is set for any given chunk, then you CANNOT determine
 1126     the size of the previous chunk, and might even get a memory
 1127     addressing fault when trying to do so.
 1128 
 1129     The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
 1130     main arena, described by the main_arena variable.  When additional
 1131     threads are spawned, each thread receives its own arena (up to a
 1132     configurable limit, after which arenas are reused for multiple
 1133     threads), and the chunks in these arenas have the A bit set.  To
 1134     find the arena for a chunk on such a non-main arena, heap_for_ptr
 1135     performs a bit mask operation and indirection through the ar_ptr
 1136     member of the per-heap header heap_info (see arena.c).
 1137 
 1138     Note that the `foot' of the current chunk is actually represented
 1139     as the prev_size of the NEXT chunk. This makes it easier to
 1140     deal with alignments etc but can be very confusing when trying
 1141     to extend or adapt this code.
 1142 
 1143     The three exceptions to all this are:
 1144 
 1145      1. The special chunk `top' doesn't bother using the
 1146     trailing size field since there is no next contiguous chunk
 1147     that would have to index off it. After initialization, `top'
 1148     is forced to always exist.  If it would become less than
 1149     MINSIZE bytes long, it is replenished.
 1150 
 1151      2. Chunks allocated via mmap, which have the second-lowest-order
 1152     bit M (IS_MMAPPED) set in their size fields.  Because they are
 1153     allocated one-by-one, each must contain its own trailing size
 1154     field.  If the M bit is set, the other bits are ignored
 1155     (because mmapped chunks are neither in an arena, nor adjacent
 1156     to a freed chunk).  The M bit is also used for chunks which
 1157     originally came from a dumped heap via malloc_set_state in
 1158     hooks.c.
 1159 
 1160      3. Chunks in fastbins are treated as allocated chunks from the
 1161     point of view of the chunk allocator.  They are consolidated
 1162     with their neighbors only in bulk, in malloc_consolidate.
 1163 */
 1164 
 1165 /*
 1166   ---------- Size and alignment checks and conversions ----------
 1167 */
 1168 
 1169 /* conversion from malloc headers to user pointers, and back */
 1170 
 1171 #define chunk2mem(p)   ((void*)((char*)(p) + 2*SIZE_SZ))
 1172 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 1173 
 1174 /* The smallest possible chunk */
 1175 #define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
 1176 
 1177 /* The smallest size we can malloc is an aligned minimal chunk */
 1178 
 1179 #define MINSIZE  \
 1180   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
 1181 
 1182 /* Check if m has acceptable alignment */
 1183 
 1184 #define aligned_OK(m)  (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
 1185 
 1186 #define misaligned_chunk(p) \
 1187   ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
 1188    & MALLOC_ALIGN_MASK)
 1189 
 1190 
 1191 /*
 1192    Check if a request is so large that it would wrap around zero when
 1193    padded and aligned. To simplify some other code, the bound is made
 1194    low enough so that adding MINSIZE will also not wrap around zero.
 1195  */
 1196 
 1197 #define REQUEST_OUT_OF_RANGE(req)                                 \
 1198   ((unsigned long) (req) >=                           \
 1199    (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
 1200 
 1201 /* pad request bytes into a usable size -- internal version */
 1202 
 1203 #define request2size(req)                                         \
 1204   (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
 1205    MINSIZE :                                                      \
 1206    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
 1207 
 1208 /* Same, except also perform an argument and result check.  First, we check
 1209    that the padding done by request2size didn't result in an integer
 1210    overflow.  Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
 1211    size isn't so large that a later alignment would lead to another integer
 1212    overflow.  */
 1213 #define checked_request2size(req, sz) \
 1214 ({                  \
 1215   (sz) = request2size (req);        \
 1216   if (((sz) < (req))            \
 1217       || REQUEST_OUT_OF_RANGE (sz)) \
 1218     {                   \
 1219       __set_errno (ENOMEM);     \
 1220       return 0;             \
 1221     }                   \
 1222 })
 1223 
 1224 /*
 1225    --------------- Physical chunk operations ---------------
 1226  */
 1227 
 1228 
 1229 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
 1230 #define PREV_INUSE 0x1
 1231 
 1232 /* extract inuse bit of previous chunk */
 1233 #define prev_inuse(p)       ((p)->mchunk_size & PREV_INUSE)
 1234 
 1235 
 1236 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
 1237 #define IS_MMAPPED 0x2
 1238 
 1239 /* check for mmap()'ed chunk */
 1240 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
 1241 
 1242 
 1243 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
 1244    from a non-main arena.  This is only set immediately before handing
 1245    the chunk to the user, if necessary.  */
 1246 #define NON_MAIN_ARENA 0x4
 1247 
 1248 /* Check for chunk from main arena.  */
 1249 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
 1250 
 1251 /* Mark a chunk as not being on the main arena.  */
 1252 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
 1253 
 1254 
 1255 /*
 1256    Bits to mask off when extracting size
 1257 
 1258    Note: IS_MMAPPED is intentionally not masked off from size field in
 1259    macros for which mmapped chunks should never be seen. This should
 1260    cause helpful core dumps to occur if it is tried by accident by
 1261    people extending or adapting this malloc.
 1262  */
 1263 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
 1264 
 1265 /* Get size, ignoring use bits */
 1266 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
 1267 
 1268 /* Like chunksize, but do not mask SIZE_BITS.  */
 1269 #define chunksize_nomask(p)         ((p)->mchunk_size)
 1270 
 1271 /* Ptr to next physical malloc_chunk. */
 1272 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
 1273 
 1274 /* Size of the chunk below P.  Only valid if !prev_inuse (P).  */
 1275 #define prev_size(p) ((p)->mchunk_prev_size)
 1276 
 1277 /* Set the size of the chunk below P.  Only valid if !prev_inuse (P).  */
 1278 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
 1279 
 1280 /* Ptr to previous physical malloc_chunk.  Only valid if !prev_inuse (P).  */
 1281 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
 1282 
 1283 /* Treat space at ptr + offset as a chunk */
 1284 #define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
 1285 
 1286 /* extract p's inuse bit */
 1287 #define inuse(p)                                  \
 1288   ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
 1289 
 1290 /* set/clear chunk as being inuse without otherwise disturbing */
 1291 #define set_inuse(p)                                  \
 1292   ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
 1293 
 1294 #define clear_inuse(p)                                \
 1295   ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
 1296 
 1297 
 1298 /* check/set/clear inuse bits in known places */
 1299 #define inuse_bit_at_offset(p, s)                         \
 1300   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
 1301 
 1302 #define set_inuse_bit_at_offset(p, s)                         \
 1303   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
 1304 
 1305 #define clear_inuse_bit_at_offset(p, s)                       \
 1306   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
 1307 
 1308 
 1309 /* Set size at head, without disturbing its use bit */
 1310 #define set_head_size(p, s)  ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
 1311 
 1312 /* Set size/use field */
 1313 #define set_head(p, s)       ((p)->mchunk_size = (s))
 1314 
 1315 /* Set size at footer (only when chunk is not in use) */
 1316 #define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
 1317 
 1318 
 1319 #pragma GCC poison mchunk_size
 1320 #pragma GCC poison mchunk_prev_size
 1321 
 1322 /*
 1323    -------------------- Internal data structures --------------------
 1324 
 1325    All internal state is held in an instance of malloc_state defined
 1326    below. There are no other static variables, except in two optional
 1327    cases:
 1328  * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
 1329  * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
 1330      for mmap.
 1331 
 1332    Beware of lots of tricks that minimize the total bookkeeping space
 1333    requirements. The result is a little over 1K bytes (for 4byte
 1334    pointers and size_t.)
 1335  */
 1336 
 1337 /*
 1338    Bins
 1339 
 1340     An array of bin headers for free chunks. Each bin is doubly
 1341     linked.  The bins are approximately proportionally (log) spaced.
 1342     There are a lot of these bins (128). This may look excessive, but
 1343     works very well in practice.  Most bins hold sizes that are
 1344     unusual as malloc request sizes, but are more usual for fragments
 1345     and consolidated sets of chunks, which is what these bins hold, so
 1346     they can be found quickly.  All procedures maintain the invariant
 1347     that no consolidated chunk physically borders another one, so each
 1348     chunk in a list is known to be preceeded and followed by either
 1349     inuse chunks or the ends of memory.
 1350 
 1351     Chunks in bins are kept in size order, with ties going to the
 1352     approximately least recently used chunk. Ordering isn't needed
 1353     for the small bins, which all contain the same-sized chunks, but
 1354     facilitates best-fit allocation for larger chunks. These lists
 1355     are just sequential. Keeping them in order almost never requires
 1356     enough traversal to warrant using fancier ordered data
 1357     structures.
 1358 
 1359     Chunks of the same size are linked with the most
 1360     recently freed at the front, and allocations are taken from the
 1361     back.  This results in LRU (FIFO) allocation order, which tends
 1362     to give each chunk an equal opportunity to be consolidated with
 1363     adjacent freed chunks, resulting in larger free chunks and less
 1364     fragmentation.
 1365 
 1366     To simplify use in double-linked lists, each bin header acts
 1367     as a malloc_chunk. This avoids special-casing for headers.
 1368     But to conserve space and improve locality, we allocate
 1369     only the fd/bk pointers of bins, and then use repositioning tricks
 1370     to treat these as the fields of a malloc_chunk*.
 1371  */
 1372 
 1373 typedef struct malloc_chunk *mbinptr;
 1374 
 1375 /* addressing -- note that bin_at(0) does not exist */
 1376 #define bin_at(m, i) \
 1377   (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))               \
 1378              - offsetof (struct malloc_chunk, fd))
 1379 
 1380 /* analog of ++bin */
 1381 #define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
 1382 
 1383 /* Reminders about list directionality within bins */
 1384 #define first(b)     ((b)->fd)
 1385 #define last(b)      ((b)->bk)
 1386 
 1387 /* Take a chunk off a bin list */
 1388 #define unlink(AV, P, BK, FD) {                                            \
 1389     if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0))      \
 1390       malloc_printerr ("corrupted size vs. prev_size");               \
 1391     FD = P->fd;                                   \
 1392     BK = P->bk;                                   \
 1393     if (__builtin_expect (FD->bk != P || BK->fd != P, 0))             \
 1394       malloc_printerr ("corrupted double-linked list");               \
 1395     else {                                    \
 1396         FD->bk = BK;                                  \
 1397         BK->fd = FD;                                  \
 1398         if (!in_smallbin_range (chunksize_nomask (P))                 \
 1399             && __builtin_expect (P->fd_nextsize != NULL, 0)) {            \
 1400         if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)        \
 1401         || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))    \
 1402           malloc_printerr ("corrupted double-linked list (not small)");   \
 1403             if (FD->fd_nextsize == NULL) {                    \
 1404                 if (P->fd_nextsize == P)                      \
 1405                   FD->fd_nextsize = FD->bk_nextsize = FD;             \
 1406                 else {                                \
 1407                     FD->fd_nextsize = P->fd_nextsize;                 \
 1408                     FD->bk_nextsize = P->bk_nextsize;                 \
 1409                     P->fd_nextsize->bk_nextsize = FD;                 \
 1410                     P->bk_nextsize->fd_nextsize = FD;                 \
 1411                   }                               \
 1412               } else {                                \
 1413                 P->fd_nextsize->bk_nextsize = P->bk_nextsize;             \
 1414                 P->bk_nextsize->fd_nextsize = P->fd_nextsize;             \
 1415               }                                   \
 1416           }                                   \
 1417       }                                       \
 1418 }
 1419 
 1420 /*
 1421    Indexing
 1422 
 1423     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
 1424     8 bytes apart. Larger bins are approximately logarithmically spaced:
 1425 
 1426     64 bins of size       8
 1427     32 bins of size      64
 1428     16 bins of size     512
 1429      8 bins of size    4096
 1430      4 bins of size   32768
 1431      2 bins of size  262144
 1432      1 bin  of size what's left
 1433 
 1434     There is actually a little bit of slop in the numbers in bin_index
 1435     for the sake of speed. This makes no difference elsewhere.
 1436 
 1437     The bins top out around 1MB because we expect to service large
 1438     requests via mmap.
 1439 
 1440     Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
 1441     a valid chunk size the small bins are bumped up one.
 1442  */
 1443 
 1444 #define NBINS             128
 1445 #define NSMALLBINS         64
 1446 #define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
 1447 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
 1448 #define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
 1449 
 1450 #define in_smallbin_range(sz)  \
 1451   ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
 1452 
 1453 #define smallbin_index(sz) \
 1454   ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
 1455    + SMALLBIN_CORRECTION)
 1456 
 1457 #define largebin_index_32(sz)                                                \
 1458   (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
 1459    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1460    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1461    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1462    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1463    126)
 1464 
 1465 #define largebin_index_32_big(sz)                                            \
 1466   (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
 1467    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1468    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1469    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1470    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1471    126)
 1472 
 1473 // XXX It remains to be seen whether it is good to keep the widths of
 1474 // XXX the buckets the same or whether it should be scaled by a factor
 1475 // XXX of two as well.
 1476 #define largebin_index_64(sz)                                                \
 1477   (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
 1478    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1479    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1480    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1481    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1482    126)
 1483 
 1484 #define largebin_index(sz) \
 1485   (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
 1486    : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)                     \
 1487    : largebin_index_32 (sz))
 1488 
 1489 #define bin_index(sz) \
 1490   ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
 1491 
 1492 
 1493 /*
 1494    Unsorted chunks
 1495 
 1496     All remainders from chunk splits, as well as all returned chunks,
 1497     are first placed in the "unsorted" bin. They are then placed
 1498     in regular bins after malloc gives them ONE chance to be used before
 1499     binning. So, basically, the unsorted_chunks list acts as a queue,
 1500     with chunks being placed on it in free (and malloc_consolidate),
 1501     and taken off (to be either used or placed in bins) in malloc.
 1502 
 1503     The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
 1504     does not have to be taken into account in size comparisons.
 1505  */
 1506 
 1507 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
 1508 #define unsorted_chunks(M)          (bin_at (M, 1))
 1509 
 1510 /*
 1511    Top
 1512 
 1513     The top-most available chunk (i.e., the one bordering the end of
 1514     available memory) is treated specially. It is never included in
 1515     any bin, is used only if no other chunk is available, and is
 1516     released back to the system if it is very large (see
 1517     M_TRIM_THRESHOLD).  Because top initially
 1518     points to its own bin with initial zero size, thus forcing
 1519     extension on the first malloc request, we avoid having any special
 1520     code in malloc to check whether it even exists yet. But we still
 1521     need to do so when getting memory from system, so we make
 1522     initial_top treat the bin as a legal but unusable chunk during the
 1523     interval between initialization and the first call to
 1524     sysmalloc. (This is somewhat delicate, since it relies on
 1525     the 2 preceding words to be zero during this interval as well.)
 1526  */
 1527 
 1528 /* Conveniently, the unsorted bin can be used as dummy top on first call */
 1529 #define initial_top(M)              (unsorted_chunks (M))
 1530 
 1531 /*
 1532    Binmap
 1533 
 1534     To help compensate for the large number of bins, a one-level index
 1535     structure is used for bin-by-bin searching.  `binmap' is a
 1536     bitvector recording whether bins are definitely empty so they can
 1537     be skipped over during during traversals.  The bits are NOT always
 1538     cleared as soon as bins are empty, but instead only
 1539     when they are noticed to be empty during traversal in malloc.
 1540  */
 1541 
 1542 /* Conservatively use 32 bits per map word, even if on 64bit system */
 1543 #define BINMAPSHIFT      5
 1544 #define BITSPERMAP       (1U << BINMAPSHIFT)
 1545 #define BINMAPSIZE       (NBINS / BITSPERMAP)
 1546 
 1547 #define idx2block(i)     ((i) >> BINMAPSHIFT)
 1548 #define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
 1549 
 1550 #define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
 1551 #define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
 1552 #define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
 1553 
 1554 /*
 1555    Fastbins
 1556 
 1557     An array of lists holding recently freed small chunks.  Fastbins
 1558     are not doubly linked.  It is faster to single-link them, and
 1559     since chunks are never removed from the middles of these lists,
 1560     double linking is not necessary. Also, unlike regular bins, they
 1561     are not even processed in FIFO order (they use faster LIFO) since
 1562     ordering doesn't much matter in the transient contexts in which
 1563     fastbins are normally used.
 1564 
 1565     Chunks in fastbins keep their inuse bit set, so they cannot
 1566     be consolidated with other free chunks. malloc_consolidate
 1567     releases all chunks in fastbins and consolidates them with
 1568     other free chunks.
 1569  */
 1570 
 1571 typedef struct malloc_chunk *mfastbinptr;
 1572 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
 1573 
 1574 /* offset 2 to use otherwise unindexable first 2 bins */
 1575 #define fastbin_index(sz) \
 1576   ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
 1577 
 1578 
 1579 /* The maximum fastbin request size we support */
 1580 #define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
 1581 
 1582 #define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
 1583 
 1584 /*
 1585    FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
 1586    that triggers automatic consolidation of possibly-surrounding
 1587    fastbin chunks. This is a heuristic, so the exact value should not
 1588    matter too much. It is defined at half the default trim threshold as a
 1589    compromise heuristic to only attempt consolidation if it is likely
 1590    to lead to trimming. However, it is not dynamically tunable, since
 1591    consolidation reduces fragmentation surrounding large chunks even
 1592    if trimming is not used.
 1593  */
 1594 
 1595 #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
 1596 
 1597 /*
 1598    NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
 1599    regions.  Otherwise, contiguity is exploited in merging together,
 1600    when possible, results from consecutive MORECORE calls.
 1601 
 1602    The initial value comes from MORECORE_CONTIGUOUS, but is
 1603    changed dynamically if mmap is ever used as an sbrk substitute.
 1604  */
 1605 
 1606 #define NONCONTIGUOUS_BIT     (2U)
 1607 
 1608 #define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
 1609 #define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
 1610 #define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
 1611 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
 1612 
 1613 /* Maximum size of memory handled in fastbins.  */
 1614 static INTERNAL_SIZE_T global_max_fast;
 1615 
 1616 /*
 1617    Set value of max_fast.
 1618    Use impossibly small value if 0.
 1619    Precondition: there are no existing fastbin chunks in the main arena.
 1620    Since do_check_malloc_state () checks this, we call malloc_consolidate ()
 1621    before changing max_fast.  Note other arenas will leak their fast bin
 1622    entries if max_fast is reduced.
 1623  */
 1624 
 1625 #define set_max_fast(s) \
 1626   global_max_fast = (((s) == 0)                           \
 1627                      ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
 1628 
 1629 static inline INTERNAL_SIZE_T
 1630 get_max_fast (void)
 1631 {
 1632   /* Tell the GCC optimizers that global_max_fast is never larger
 1633      than MAX_FAST_SIZE.  This avoids out-of-bounds array accesses in
 1634      _int_malloc after constant propagation of the size parameter.
 1635      (The code never executes because malloc preserves the
 1636      global_max_fast invariant, but the optimizers may not recognize
 1637      this.)  */
 1638   if (global_max_fast > MAX_FAST_SIZE)
 1639     __builtin_unreachable ();
 1640   return global_max_fast;
 1641 }
 1642 
 1643 /*
 1644    ----------- Internal state representation and initialization -----------
 1645  */
 1646 
 1647 /*
 1648    have_fastchunks indicates that there are probably some fastbin chunks.
 1649    It is set true on entering a chunk into any fastbin, and cleared early in
 1650    malloc_consolidate.  The value is approximate since it may be set when there
 1651    are no fastbin chunks, or it may be clear even if there are fastbin chunks
 1652    available.  Given it's sole purpose is to reduce number of redundant calls to
 1653    malloc_consolidate, it does not affect correctness.  As a result we can safely
 1654    use relaxed atomic accesses.
 1655  */
 1656 
 1657 
 1658 struct malloc_state
 1659 {
 1660   /* Serialize access.  */
 1661   __libc_lock_define (, mutex);
 1662 
 1663   /* Flags (formerly in max_fast).  */
 1664   int flags;
 1665 
 1666   /* Set if the fastbin chunks contain recently inserted free blocks.  */
 1667   /* Note this is a bool but not all targets support atomics on booleans.  */
 1668   int have_fastchunks;
 1669 
 1670   /* Fastbins */
 1671   mfastbinptr fastbinsY[NFASTBINS];
 1672 
 1673   /* Base of the topmost chunk -- not otherwise kept in a bin */
 1674   mchunkptr top;
 1675 
 1676   /* The remainder from the most recent split of a small request */
 1677   mchunkptr last_remainder;
 1678 
 1679   /* Normal bins packed as described above */
 1680   mchunkptr bins[NBINS * 2 - 2];
 1681 
 1682   /* Bitmap of bins */
 1683   unsigned int binmap[BINMAPSIZE];
 1684 
 1685   /* Linked list */
 1686   struct malloc_state *next;
 1687 
 1688   /* Linked list for free arenas.  Access to this field is serialized
 1689      by free_list_lock in arena.c.  */
 1690   struct malloc_state *next_free;
 1691 
 1692   /* Number of threads attached to this arena.  0 if the arena is on
 1693      the free list.  Access to this field is serialized by
 1694      free_list_lock in arena.c.  */
 1695   INTERNAL_SIZE_T attached_threads;
 1696 
 1697   /* Memory allocated from the system in this arena.  */
 1698   INTERNAL_SIZE_T system_mem;
 1699   INTERNAL_SIZE_T max_system_mem;
 1700 };
 1701 
 1702 struct malloc_par
 1703 {
 1704   /* Tunable parameters */
 1705   unsigned long trim_threshold;
 1706   INTERNAL_SIZE_T top_pad;
 1707   INTERNAL_SIZE_T mmap_threshold;
 1708   INTERNAL_SIZE_T arena_test;
 1709   INTERNAL_SIZE_T arena_max;
 1710 
 1711   /* Memory map support */
 1712   int n_mmaps;
 1713   int n_mmaps_max;
 1714   int max_n_mmaps;
 1715   /* the mmap_threshold is dynamic, until the user sets
 1716      it manually, at which point we need to disable any
 1717      dynamic behavior. */
 1718   int no_dyn_threshold;
 1719 
 1720   /* Statistics */
 1721   INTERNAL_SIZE_T mmapped_mem;
 1722   INTERNAL_SIZE_T max_mmapped_mem;
 1723 
 1724   /* First address handed out by MORECORE/sbrk.  */
 1725   char *sbrk_base;
 1726 
 1727 #if USE_TCACHE
 1728   /* Maximum number of buckets to use.  */
 1729   size_t tcache_bins;
 1730   size_t tcache_max_bytes;
 1731   /* Maximum number of chunks in each bucket.  */
 1732   size_t tcache_count;
 1733   /* Maximum number of chunks to remove from the unsorted list, which
 1734      aren't used to prefill the cache.  */
 1735   size_t tcache_unsorted_limit;
 1736 #endif
 1737 };
 1738 
 1739 /* There are several instances of this struct ("arenas") in this
 1740    malloc.  If you are adapting this malloc in a way that does NOT use
 1741    a static or mmapped malloc_state, you MUST explicitly zero-fill it
 1742    before using. This malloc relies on the property that malloc_state
 1743    is initialized to all zeroes (as is true of C statics).  */
 1744 
 1745 static struct malloc_state main_arena =
 1746 {
 1747   .mutex = _LIBC_LOCK_INITIALIZER,
 1748   .next = &main_arena,
 1749   .attached_threads = 1
 1750 };
 1751 
 1752 /* These variables are used for undumping support.  Chunked are marked
 1753    as using mmap, but we leave them alone if they fall into this
 1754    range.  NB: The chunk size for these chunks only includes the
 1755    initial size field (of SIZE_SZ bytes), there is no trailing size
 1756    field (unlike with regular mmapped chunks).  */
 1757 static mchunkptr dumped_main_arena_start; /* Inclusive.  */
 1758 static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
 1759 
 1760 /* True if the pointer falls into the dumped arena.  Use this after
 1761    chunk_is_mmapped indicates a chunk is mmapped.  */
 1762 #define DUMPED_MAIN_ARENA_CHUNK(p) \
 1763   ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
 1764 
 1765 /* There is only one instance of the malloc parameters.  */
 1766 
 1767 static struct malloc_par mp_ =
 1768 {
 1769   .top_pad = DEFAULT_TOP_PAD,
 1770   .n_mmaps_max = DEFAULT_MMAP_MAX,
 1771   .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
 1772   .trim_threshold = DEFAULT_TRIM_THRESHOLD,
 1773 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
 1774   .arena_test = NARENAS_FROM_NCORES (1)
 1775 #if USE_TCACHE
 1776   ,
 1777   .tcache_count = TCACHE_FILL_COUNT,
 1778   .tcache_bins = TCACHE_MAX_BINS,
 1779   .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
 1780   .tcache_unsorted_limit = 0 /* No limit.  */
 1781 #endif
 1782 };
 1783 
 1784 /*
 1785    Initialize a malloc_state struct.
 1786 
 1787    This is called from ptmalloc_init () or from _int_new_arena ()
 1788    when creating a new arena.
 1789  */
 1790 
 1791 static void
 1792 malloc_init_state (mstate av)
 1793 {
 1794   int i;
 1795   mbinptr bin;
 1796 
 1797   /* Establish circular links for normal bins */
 1798   for (i = 1; i < NBINS; ++i)
 1799     {
 1800       bin = bin_at (av, i);
 1801       bin->fd = bin->bk = bin;
 1802     }
 1803 
 1804 #if MORECORE_CONTIGUOUS
 1805   if (av != &main_arena)
 1806 #endif
 1807   set_noncontiguous (av);
 1808   if (av == &main_arena)
 1809     set_max_fast (DEFAULT_MXFAST);
 1810   atomic_store_relaxed (&av->have_fastchunks, false);
 1811 
 1812   av->top = initial_top (av);
 1813 }
 1814 
 1815 /*
 1816    Other internal utilities operating on mstates
 1817  */
 1818 
 1819 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
 1820 static int      systrim (size_t, mstate);
 1821 static void     malloc_consolidate (mstate);
 1822 
 1823 
 1824 /* -------------- Early definitions for debugging hooks ---------------- */
 1825 
 1826 /* Define and initialize the hook variables.  These weak definitions must
 1827    appear before any use of the variables in a function (arena.c uses one).  */
 1828 #ifndef weak_variable
 1829 /* In GNU libc we want the hook variables to be weak definitions to
 1830    avoid a problem with Emacs.  */
 1831 # define weak_variable weak_function
 1832 #endif
 1833 
 1834 /* Forward declarations.  */
 1835 static void *malloc_hook_ini (size_t sz,
 1836                               const void *caller) __THROW;
 1837 static void *realloc_hook_ini (void *ptr, size_t sz,
 1838                                const void *caller) __THROW;
 1839 static void *memalign_hook_ini (size_t alignment, size_t sz,
 1840                                 const void *caller) __THROW;
 1841 
 1842 #if HAVE_MALLOC_INIT_HOOK
 1843 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
 1844 compat_symbol (libc, __malloc_initialize_hook,
 1845            __malloc_initialize_hook, GLIBC_2_0);
 1846 #endif
 1847 
 1848 void weak_variable (*__free_hook) (void *__ptr,
 1849                                    const void *) = NULL;
 1850 void *weak_variable (*__malloc_hook)
 1851   (size_t __size, const void *) = malloc_hook_ini;
 1852 void *weak_variable (*__realloc_hook)
 1853   (void *__ptr, size_t __size, const void *)
 1854   = realloc_hook_ini;
 1855 void *weak_variable (*__memalign_hook)
 1856   (size_t __alignment, size_t __size, const void *)
 1857   = memalign_hook_ini;
 1858 void weak_variable (*__after_morecore_hook) (void) = NULL;
 1859 
 1860 /* This function is called from the arena shutdown hook, to free the
 1861    thread cache (if it exists).  */
 1862 static void tcache_thread_shutdown (void);
 1863 
 1864 /* ------------------ Testing support ----------------------------------*/
 1865 
 1866 static int perturb_byte;
 1867 
 1868 static void
 1869 alloc_perturb (char *p, size_t n)
 1870 {
 1871   if (__glibc_unlikely (perturb_byte))
 1872     memset (p, perturb_byte ^ 0xff, n);
 1873 }
 1874 
 1875 static void
 1876 free_perturb (char *p, size_t n)
 1877 {
 1878   if (__glibc_unlikely (perturb_byte))
 1879     memset (p, perturb_byte, n);
 1880 }
 1881 
 1882 
 1883 
 1884 #include <stap-probe.h>
 1885 
 1886 /* ------------------- Support for multiple arenas -------------------- */
 1887 #include "arena.c"
 1888 
 1889 /*
 1890    Debugging support
 1891 
 1892    These routines make a number of assertions about the states
 1893    of data structures that should be true at all times. If any
 1894    are not true, it's very likely that a user program has somehow
 1895    trashed memory. (It's also possible that there is a coding error
 1896    in malloc. In which case, please report it!)
 1897  */
 1898 
 1899 #if !MALLOC_DEBUG
 1900 
 1901 # define check_chunk(A, P)
 1902 # define check_free_chunk(A, P)
 1903 # define check_inuse_chunk(A, P)
 1904 # define check_remalloced_chunk(A, P, N)
 1905 # define check_malloced_chunk(A, P, N)
 1906 # define check_malloc_state(A)
 1907 
 1908 #else
 1909 
 1910 # define check_chunk(A, P)              do_check_chunk (A, P)
 1911 # define check_free_chunk(A, P)         do_check_free_chunk (A, P)
 1912 # define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
 1913 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
 1914 # define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
 1915 # define check_malloc_state(A)         do_check_malloc_state (A)
 1916 
 1917 /*
 1918    Properties of all chunks
 1919  */
 1920 
 1921 static void
 1922 do_check_chunk (mstate av, mchunkptr p)
 1923 {
 1924   unsigned long sz = chunksize (p);
 1925   /* min and max possible addresses assuming contiguous allocation */
 1926   char *max_address = (char *) (av->top) + chunksize (av->top);
 1927   char *min_address = max_address - av->system_mem;
 1928 
 1929   if (!chunk_is_mmapped (p))
 1930     {
 1931       /* Has legal address ... */
 1932       if (p != av->top)
 1933         {
 1934           if (contiguous (av))
 1935             {
 1936               assert (((char *) p) >= min_address);
 1937               assert (((char *) p + sz) <= ((char *) (av->top)));
 1938             }
 1939         }
 1940       else
 1941         {
 1942           /* top size is always at least MINSIZE */
 1943           assert ((unsigned long) (sz) >= MINSIZE);
 1944           /* top predecessor always marked inuse */
 1945           assert (prev_inuse (p));
 1946         }
 1947     }
 1948   else if (!DUMPED_MAIN_ARENA_CHUNK (p))
 1949     {
 1950       /* address is outside main heap  */
 1951       if (contiguous (av) && av->top != initial_top (av))
 1952         {
 1953           assert (((char *) p) < min_address || ((char *) p) >= max_address);
 1954         }
 1955       /* chunk is page-aligned */
 1956       assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
 1957       /* mem is aligned */
 1958       assert (aligned_OK (chunk2mem (p)));
 1959     }
 1960 }
 1961 
 1962 /*
 1963    Properties of free chunks
 1964  */
 1965 
 1966 static void
 1967 do_check_free_chunk (mstate av, mchunkptr p)
 1968 {
 1969   INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
 1970   mchunkptr next = chunk_at_offset (p, sz);
 1971 
 1972   do_check_chunk (av, p);
 1973 
 1974   /* Chunk must claim to be free ... */
 1975   assert (!inuse (p));
 1976   assert (!chunk_is_mmapped (p));
 1977 
 1978   /* Unless a special marker, must have OK fields */
 1979   if ((unsigned long) (sz) >= MINSIZE)
 1980     {
 1981       assert ((sz & MALLOC_ALIGN_MASK) == 0);
 1982       assert (aligned_OK (chunk2mem (p)));
 1983       /* ... matching footer field */
 1984       assert (prev_size (next_chunk (p)) == sz);
 1985       /* ... and is fully consolidated */
 1986       assert (prev_inuse (p));
 1987       assert (next == av->top || inuse (next));
 1988 
 1989       /* ... and has minimally sane links */
 1990       assert (p->fd->bk == p);
 1991       assert (p->bk->fd == p);
 1992     }
 1993   else /* markers are always of size SIZE_SZ */
 1994     assert (sz == SIZE_SZ);
 1995 }
 1996 
 1997 /*
 1998    Properties of inuse chunks
 1999  */
 2000 
 2001 static void
 2002 do_check_inuse_chunk (mstate av, mchunkptr p)
 2003 {
 2004   mchunkptr next;
 2005 
 2006   do_check_chunk (av, p);
 2007 
 2008   if (chunk_is_mmapped (p))
 2009     return; /* mmapped chunks have no next/prev */
 2010 
 2011   /* Check whether it claims to be in use ... */
 2012   assert (inuse (p));
 2013 
 2014   next = next_chunk (p);
 2015 
 2016   /* ... and is surrounded by OK chunks.
 2017      Since more things can be checked with free chunks than inuse ones,
 2018      if an inuse chunk borders them and debug is on, it's worth doing them.
 2019    */
 2020   if (!prev_inuse (p))
 2021     {
 2022       /* Note that we cannot even look at prev unless it is not inuse */
 2023       mchunkptr prv = prev_chunk (p);
 2024       assert (next_chunk (prv) == p);
 2025       do_check_free_chunk (av, prv);
 2026     }
 2027 
 2028   if (next == av->top)
 2029     {
 2030       assert (prev_inuse (next));
 2031       assert (chunksize (next) >= MINSIZE);
 2032     }
 2033   else if (!inuse (next))
 2034     do_check_free_chunk (av, next);
 2035 }
 2036 
 2037 /*
 2038    Properties of chunks recycled from fastbins
 2039  */
 2040 
 2041 static void
 2042 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
 2043 {
 2044   INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
 2045 
 2046   if (!chunk_is_mmapped (p))
 2047     {
 2048       assert (av == arena_for_chunk (p));
 2049       if (chunk_main_arena (p))
 2050         assert (av == &main_arena);
 2051       else
 2052         assert (av != &main_arena);
 2053     }
 2054 
 2055   do_check_inuse_chunk (av, p);
 2056 
 2057   /* Legal size ... */
 2058   assert ((sz & MALLOC_ALIGN_MASK) == 0);
 2059   assert ((unsigned long) (sz) >= MINSIZE);
 2060   /* ... and alignment */
 2061   assert (aligned_OK (chunk2mem (p)));
 2062   /* chunk is less than MINSIZE more than request */
 2063   assert ((long) (sz) - (long) (s) >= 0);
 2064   assert ((long) (sz) - (long) (s + MINSIZE) < 0);
 2065 }
 2066 
 2067 /*
 2068    Properties of nonrecycled chunks at the point they are malloced
 2069  */
 2070 
 2071 static void
 2072 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
 2073 {
 2074   /* same as recycled case ... */
 2075   do_check_remalloced_chunk (av, p, s);
 2076 
 2077   /*
 2078      ... plus,  must obey implementation invariant that prev_inuse is
 2079      always true of any allocated chunk; i.e., that each allocated
 2080      chunk borders either a previously allocated and still in-use
 2081      chunk, or the base of its memory arena. This is ensured
 2082      by making all allocations from the `lowest' part of any found
 2083      chunk.  This does not necessarily hold however for chunks
 2084      recycled via fastbins.
 2085    */
 2086 
 2087   assert (prev_inuse (p));
 2088 }
 2089 
 2090 
 2091 /*
 2092    Properties of malloc_state.
 2093 
 2094    This may be useful for debugging malloc, as well as detecting user
 2095    programmer errors that somehow write into malloc_state.
 2096 
 2097    If you are extending or experimenting with this malloc, you can
 2098    probably figure out how to hack this routine to print out or
 2099    display chunk addresses, sizes, bins, and other instrumentation.
 2100  */
 2101 
 2102 static void
 2103 do_check_malloc_state (mstate av)
 2104 {
 2105   int i;
 2106   mchunkptr p;
 2107   mchunkptr q;
 2108   mbinptr b;
 2109   unsigned int idx;
 2110   INTERNAL_SIZE_T size;
 2111   unsigned long total = 0;
 2112   int max_fast_bin;
 2113 
 2114   /* internal size_t must be no wider than pointer type */
 2115   assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
 2116 
 2117   /* alignment is a power of 2 */
 2118   assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
 2119 
 2120   /* Check the arena is initialized. */
 2121   assert (av->top != 0);
 2122 
 2123   /* No memory has been allocated yet, so doing more tests is not possible.  */
 2124   if (av->top == initial_top (av))
 2125     return;
 2126 
 2127   /* pagesize is a power of 2 */
 2128   assert (powerof2(GLRO (dl_pagesize)));
 2129 
 2130   /* A contiguous main_arena is consistent with sbrk_base.  */
 2131   if (av == &main_arena && contiguous (av))
 2132     assert ((char *) mp_.sbrk_base + av->system_mem ==
 2133             (char *) av->top + chunksize (av->top));
 2134 
 2135   /* properties of fastbins */
 2136 
 2137   /* max_fast is in allowed range */
 2138   assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
 2139 
 2140   max_fast_bin = fastbin_index (get_max_fast ());
 2141 
 2142   for (i = 0; i < NFASTBINS; ++i)
 2143     {
 2144       p = fastbin (av, i);
 2145 
 2146       /* The following test can only be performed for the main arena.
 2147          While mallopt calls malloc_consolidate to get rid of all fast
 2148          bins (especially those larger than the new maximum) this does
 2149          only happen for the main arena.  Trying to do this for any
 2150          other arena would mean those arenas have to be locked and
 2151          malloc_consolidate be called for them.  This is excessive.  And
 2152          even if this is acceptable to somebody it still cannot solve
 2153          the problem completely since if the arena is locked a
 2154          concurrent malloc call might create a new arena which then
 2155          could use the newly invalid fast bins.  */
 2156 
 2157       /* all bins past max_fast are empty */
 2158       if (av == &main_arena && i > max_fast_bin)
 2159         assert (p == 0);
 2160 
 2161       while (p != 0)
 2162         {
 2163           /* each chunk claims to be inuse */
 2164           do_check_inuse_chunk (av, p);
 2165           total += chunksize (p);
 2166           /* chunk belongs in this bin */
 2167           assert (fastbin_index (chunksize (p)) == i);
 2168           p = p->fd;
 2169         }
 2170     }
 2171 
 2172   /* check normal bins */
 2173   for (i = 1; i < NBINS; ++i)
 2174     {
 2175       b = bin_at (av, i);
 2176 
 2177       /* binmap is accurate (except for bin 1 == unsorted_chunks) */
 2178       if (i >= 2)
 2179         {
 2180           unsigned int binbit = get_binmap (av, i);
 2181           int empty = last (b) == b;
 2182           if (!binbit)
 2183             assert (empty);
 2184           else if (!empty)
 2185             assert (binbit);
 2186         }
 2187 
 2188       for (p = last (b); p != b; p = p->bk)
 2189         {
 2190           /* each chunk claims to be free */
 2191           do_check_free_chunk (av, p);
 2192           size = chunksize (p);
 2193           total += size;
 2194           if (i >= 2)
 2195             {
 2196               /* chunk belongs in bin */
 2197               idx = bin_index (size);
 2198               assert (idx == i);
 2199               /* lists are sorted */
 2200               assert (p->bk == b ||
 2201                       (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
 2202 
 2203               if (!in_smallbin_range (size))
 2204                 {
 2205                   if (p->fd_nextsize != NULL)
 2206                     {
 2207                       if (p->fd_nextsize == p)
 2208                         assert (p->bk_nextsize == p);
 2209                       else
 2210                         {
 2211                           if (p->fd_nextsize == first (b))
 2212                             assert (chunksize (p) < chunksize (p->fd_nextsize));
 2213                           else
 2214                             assert (chunksize (p) > chunksize (p->fd_nextsize));
 2215 
 2216                           if (p == first (b))
 2217                             assert (chunksize (p) > chunksize (p->bk_nextsize));
 2218                           else
 2219                             assert (chunksize (p) < chunksize (p->bk_nextsize));
 2220                         }
 2221                     }
 2222                   else
 2223                     assert (p->bk_nextsize == NULL);
 2224                 }
 2225             }
 2226           else if (!in_smallbin_range (size))
 2227             assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
 2228           /* chunk is followed by a legal chain of inuse chunks */
 2229           for (q = next_chunk (p);
 2230                (q != av->top && inuse (q) &&
 2231                 (unsigned long) (chunksize (q)) >= MINSIZE);
 2232                q = next_chunk (q))
 2233             do_check_inuse_chunk (av, q);
 2234         }
 2235     }
 2236 
 2237   /* top chunk is OK */
 2238   check_chunk (av, av->top);
 2239 }
 2240 #endif
 2241 
 2242 
 2243 /* ----------------- Support for debugging hooks -------------------- */
 2244 #include "hooks.c"
 2245 
 2246 
 2247 /* ----------- Routines dealing with system allocation -------------- */
 2248 
 2249 /*
 2250    sysmalloc handles malloc cases requiring more memory from the system.
 2251    On entry, it is assumed that av->top does not have enough
 2252    space to service request for nb bytes, thus requiring that av->top
 2253    be extended or replaced.
 2254  */
 2255 
 2256 static void *
 2257 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 2258 {
 2259   mchunkptr old_top;              /* incoming value of av->top */
 2260   INTERNAL_SIZE_T old_size;       /* its size */
 2261   char *old_end;                  /* its end address */
 2262 
 2263   long size;                      /* arg to first MORECORE or mmap call */
 2264   char *brk;                      /* return value from MORECORE */
 2265 
 2266   long correction;                /* arg to 2nd MORECORE call */
 2267   char *snd_brk;                  /* 2nd return val */
 2268 
 2269   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
 2270   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
 2271   char *aligned_brk;              /* aligned offset into brk */
 2272 
 2273   mchunkptr p;                    /* the allocated/returned chunk */
 2274   mchunkptr remainder;            /* remainder from allocation */
 2275   unsigned long remainder_size;   /* its size */
 2276 
 2277 
 2278   size_t pagesize = GLRO (dl_pagesize);
 2279   bool tried_mmap = false;
 2280 
 2281 
 2282   /*
 2283      If have mmap, and the request size meets the mmap threshold, and
 2284      the system supports mmap, and there are few enough currently
 2285      allocated mmapped regions, try to directly map this request
 2286      rather than expanding top.
 2287    */
 2288 
 2289   if (av == NULL
 2290       || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
 2291       && (mp_.n_mmaps < mp_.n_mmaps_max)))
 2292     {
 2293       char *mm;           /* return value from mmap call*/
 2294 
 2295     try_mmap:
 2296       /*
 2297          Round up size to nearest page.  For mmapped chunks, the overhead
 2298          is one SIZE_SZ unit larger than for normal chunks, because there
 2299          is no following chunk whose prev_size field could be used.
 2300 
 2301          See the front_misalign handling below, for glibc there is no
 2302          need for further alignments unless we have have high alignment.
 2303        */
 2304       if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2305         size = ALIGN_UP (nb + SIZE_SZ, pagesize);
 2306       else
 2307         size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
 2308       tried_mmap = true;
 2309 
 2310       /* Don't try if size wraps around 0 */
 2311       if ((unsigned long) (size) > (unsigned long) (nb))
 2312         {
 2313           mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
 2314 
 2315           if (mm != MAP_FAILED)
 2316             {
 2317               /*
 2318                  The offset to the start of the mmapped region is stored
 2319                  in the prev_size field of the chunk. This allows us to adjust
 2320                  returned start address to meet alignment requirements here
 2321                  and in memalign(), and still be able to compute proper
 2322                  address argument for later munmap in free() and realloc().
 2323                */
 2324 
 2325               if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2326                 {
 2327                   /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
 2328                      MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
 2329                      aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
 2330                   assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
 2331                   front_misalign = 0;
 2332                 }
 2333               else
 2334                 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
 2335               if (front_misalign > 0)
 2336                 {
 2337                   correction = MALLOC_ALIGNMENT - front_misalign;
 2338                   p = (mchunkptr) (mm + correction);
 2339           set_prev_size (p, correction);
 2340                   set_head (p, (size - correction) | IS_MMAPPED);
 2341                 }
 2342               else
 2343                 {
 2344                   p = (mchunkptr) mm;
 2345           set_prev_size (p, 0);
 2346                   set_head (p, size | IS_MMAPPED);
 2347                 }
 2348 
 2349               /* update statistics */
 2350 
 2351               int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
 2352               atomic_max (&mp_.max_n_mmaps, new);
 2353 
 2354               unsigned long sum;
 2355               sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
 2356               atomic_max (&mp_.max_mmapped_mem, sum);
 2357 
 2358               check_chunk (av, p);
 2359 
 2360               return chunk2mem (p);
 2361             }
 2362         }
 2363     }
 2364 
 2365   /* There are no usable arenas and mmap also failed.  */
 2366   if (av == NULL)
 2367     return 0;
 2368 
 2369   /* Record incoming configuration of top */
 2370 
 2371   old_top = av->top;
 2372   old_size = chunksize (old_top);
 2373   old_end = (char *) (chunk_at_offset (old_top, old_size));
 2374 
 2375   brk = snd_brk = (char *) (MORECORE_FAILURE);
 2376 
 2377   /*
 2378      If not the first time through, we require old_size to be
 2379      at least MINSIZE and to have prev_inuse set.
 2380    */
 2381 
 2382   assert ((old_top == initial_top (av) && old_size == 0) ||
 2383           ((unsigned long) (old_size) >= MINSIZE &&
 2384            prev_inuse (old_top) &&
 2385            ((unsigned long) old_end & (pagesize - 1)) == 0));
 2386 
 2387   /* Precondition: not enough current space to satisfy nb request */
 2388   assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
 2389 
 2390 
 2391   if (av != &main_arena)
 2392     {
 2393       heap_info *old_heap, *heap;
 2394       size_t old_heap_size;
 2395 
 2396       /* First try to extend the current heap. */
 2397       old_heap = heap_for_ptr (old_top);
 2398       old_heap_size = old_heap->size;
 2399       if ((long) (MINSIZE + nb - old_size) > 0
 2400           && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
 2401         {
 2402           av->system_mem += old_heap->size - old_heap_size;
 2403           set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
 2404                     | PREV_INUSE);
 2405         }
 2406       else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
 2407         {
 2408           /* Use a newly allocated heap.  */
 2409           heap->ar_ptr = av;
 2410           heap->prev = old_heap;
 2411           av->system_mem += heap->size;
 2412           /* Set up the new top.  */
 2413           top (av) = chunk_at_offset (heap, sizeof (*heap));
 2414           set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
 2415 
 2416           /* Setup fencepost and free the old top chunk with a multiple of
 2417              MALLOC_ALIGNMENT in size. */
 2418           /* The fencepost takes at least MINSIZE bytes, because it might
 2419              become the top chunk again later.  Note that a footer is set
 2420              up, too, although the chunk is marked in use. */
 2421           old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
 2422           set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
 2423           if (old_size >= MINSIZE)
 2424             {
 2425               set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
 2426               set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
 2427               set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
 2428               _int_free (av, old_top, 1);
 2429             }
 2430           else
 2431             {
 2432               set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
 2433               set_foot (old_top, (old_size + 2 * SIZE_SZ));
 2434             }
 2435         }
 2436       else if (!tried_mmap)
 2437         /* We can at least try to use to mmap memory.  */
 2438         goto try_mmap;
 2439     }
 2440   else     /* av == main_arena */
 2441 
 2442 
 2443     { /* Request enough space for nb + pad + overhead */
 2444       size = nb + mp_.top_pad + MINSIZE;
 2445 
 2446       /*
 2447          If contiguous, we can subtract out existing space that we hope to
 2448          combine with new space. We add it back later only if
 2449          we don't actually get contiguous space.
 2450        */
 2451 
 2452       if (contiguous (av))
 2453         size -= old_size;
 2454 
 2455       /*
 2456          Round to a multiple of page size.
 2457          If MORECORE is not contiguous, this ensures that we only call it
 2458          with whole-page arguments.  And if MORECORE is contiguous and
 2459          this is not first time through, this preserves page-alignment of
 2460          previous calls. Otherwise, we correct to page-align below.
 2461        */
 2462 
 2463       size = ALIGN_UP (size, pagesize);
 2464 
 2465       /*
 2466          Don't try to call MORECORE if argument is so big as to appear
 2467          negative. Note that since mmap takes size_t arg, it may succeed
 2468          below even if we cannot call MORECORE.
 2469        */
 2470 
 2471       if (size > 0)
 2472         {
 2473           brk = (char *) (MORECORE (size));
 2474           LIBC_PROBE (memory_sbrk_more, 2, brk, size);
 2475         }
 2476 
 2477       if (brk != (char *) (MORECORE_FAILURE))
 2478         {
 2479           /* Call the `morecore' hook if necessary.  */
 2480           void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2481           if (__builtin_expect (hook != NULL, 0))
 2482             (*hook)();
 2483         }
 2484       else
 2485         {
 2486           /*
 2487              If have mmap, try using it as a backup when MORECORE fails or
 2488              cannot be used. This is worth doing on systems that have "holes" in
 2489              address space, so sbrk cannot extend to give contiguous space, but
 2490              space is available elsewhere.  Note that we ignore mmap max count
 2491              and threshold limits, since the space will not be used as a
 2492              segregated mmap region.
 2493            */
 2494 
 2495           /* Cannot merge with old top, so add its size back in */
 2496           if (contiguous (av))
 2497             size = ALIGN_UP (size + old_size, pagesize);
 2498 
 2499           /* If we are relying on mmap as backup, then use larger units */
 2500           if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
 2501             size = MMAP_AS_MORECORE_SIZE;
 2502 
 2503           /* Don't try if size wraps around 0 */
 2504           if ((unsigned long) (size) > (unsigned long) (nb))
 2505             {
 2506               char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
 2507 
 2508               if (mbrk != MAP_FAILED)
 2509                 {
 2510                   /* We do not need, and cannot use, another sbrk call to find end */
 2511                   brk = mbrk;
 2512                   snd_brk = brk + size;
 2513 
 2514                   /*
 2515                      Record that we no longer have a contiguous sbrk region.
 2516                      After the first time mmap is used as backup, we do not
 2517                      ever rely on contiguous space since this could incorrectly
 2518                      bridge regions.
 2519                    */
 2520                   set_noncontiguous (av);
 2521                 }
 2522             }
 2523         }
 2524 
 2525       if (brk != (char *) (MORECORE_FAILURE))
 2526         {
 2527           if (mp_.sbrk_base == 0)
 2528             mp_.sbrk_base = brk;
 2529           av->system_mem += size;
 2530 
 2531           /*
 2532              If MORECORE extends previous space, we can likewise extend top size.
 2533            */
 2534 
 2535           if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
 2536             set_head (old_top, (size + old_size) | PREV_INUSE);
 2537 
 2538           else if (contiguous (av) && old_size && brk < old_end)
 2539         /* Oops!  Someone else killed our space..  Can't touch anything.  */
 2540         malloc_printerr ("break adjusted to free malloc space");
 2541 
 2542           /*
 2543              Otherwise, make adjustments:
 2544 
 2545            * If the first time through or noncontiguous, we need to call sbrk
 2546               just to find out where the end of memory lies.
 2547 
 2548            * We need to ensure that all returned chunks from malloc will meet
 2549               MALLOC_ALIGNMENT
 2550 
 2551            * If there was an intervening foreign sbrk, we need to adjust sbrk
 2552               request size to account for fact that we will not be able to
 2553               combine new space with existing space in old_top.
 2554 
 2555            * Almost all systems internally allocate whole pages at a time, in
 2556               which case we might as well use the whole last page of request.
 2557               So we allocate enough more memory to hit a page boundary now,
 2558               which in turn causes future contiguous calls to page-align.
 2559            */
 2560 
 2561           else
 2562             {
 2563               front_misalign = 0;
 2564               end_misalign = 0;
 2565               correction = 0;
 2566               aligned_brk = brk;
 2567 
 2568               /* handle contiguous cases */
 2569               if (contiguous (av))
 2570                 {
 2571                   /* Count foreign sbrk as system_mem.  */
 2572                   if (old_size)
 2573                     av->system_mem += brk - old_end;
 2574 
 2575                   /* Guarantee alignment of first new chunk made from this space */
 2576 
 2577                   front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
 2578                   if (front_misalign > 0)
 2579                     {
 2580                       /*
 2581                          Skip over some bytes to arrive at an aligned position.
 2582                          We don't need to specially mark these wasted front bytes.
 2583                          They will never be accessed anyway because
 2584                          prev_inuse of av->top (and any chunk created from its start)
 2585                          is always true after initialization.
 2586                        */
 2587 
 2588                       correction = MALLOC_ALIGNMENT - front_misalign;
 2589                       aligned_brk += correction;
 2590                     }
 2591 
 2592                   /*
 2593                      If this isn't adjacent to existing space, then we will not
 2594                      be able to merge with old_top space, so must add to 2nd request.
 2595                    */
 2596 
 2597                   correction += old_size;
 2598 
 2599                   /* Extend the end address to hit a page boundary */
 2600                   end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
 2601                   correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
 2602 
 2603                   assert (correction >= 0);
 2604                   snd_brk = (char *) (MORECORE (correction));
 2605 
 2606                   /*
 2607                      If can't allocate correction, try to at least find out current
 2608                      brk.  It might be enough to proceed without failing.
 2609 
 2610                      Note that if second sbrk did NOT fail, we assume that space
 2611                      is contiguous with first sbrk. This is a safe assumption unless
 2612                      program is multithreaded but doesn't use locks and a foreign sbrk
 2613                      occurred between our first and second calls.
 2614                    */
 2615 
 2616                   if (snd_brk == (char *) (MORECORE_FAILURE))
 2617                     {
 2618                       correction = 0;
 2619                       snd_brk = (char *) (MORECORE (0));
 2620                     }
 2621                   else
 2622                     {
 2623                       /* Call the `morecore' hook if necessary.  */
 2624                       void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2625                       if (__builtin_expect (hook != NULL, 0))
 2626                         (*hook)();
 2627                     }
 2628                 }
 2629 
 2630               /* handle non-contiguous cases */
 2631               else
 2632                 {
 2633                   if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2634                     /* MORECORE/mmap must correctly align */
 2635                     assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
 2636                   else
 2637                     {
 2638                       front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
 2639                       if (front_misalign > 0)
 2640                         {
 2641                           /*
 2642                              Skip over some bytes to arrive at an aligned position.
 2643                              We don't need to specially mark these wasted front bytes.
 2644                              They will never be accessed anyway because
 2645                              prev_inuse of av->top (and any chunk created from its start)
 2646                              is always true after initialization.
 2647                            */
 2648 
 2649                           aligned_brk += MALLOC_ALIGNMENT - front_misalign;
 2650                         }
 2651                     }
 2652 
 2653                   /* Find out current end of memory */
 2654                   if (snd_brk == (char *) (MORECORE_FAILURE))
 2655                     {
 2656                       snd_brk = (char *) (MORECORE (0));
 2657                     }
 2658                 }
 2659 
 2660               /* Adjust top based on results of second sbrk */
 2661               if (snd_brk != (char *) (MORECORE_FAILURE))
 2662                 {
 2663                   av->top = (mchunkptr) aligned_brk;
 2664                   set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
 2665                   av->system_mem += correction;
 2666 
 2667                   /*
 2668                      If not the first time through, we either have a
 2669                      gap due to foreign sbrk or a non-contiguous region.  Insert a
 2670                      double fencepost at old_top to prevent consolidation with space
 2671                      we don't own. These fenceposts are artificial chunks that are
 2672                      marked as inuse and are in any case too small to use.  We need
 2673                      two to make sizes and alignments work out.
 2674                    */
 2675 
 2676                   if (old_size != 0)
 2677                     {
 2678                       /*
 2679                          Shrink old_top to insert fenceposts, keeping size a
 2680                          multiple of MALLOC_ALIGNMENT. We know there is at least
 2681                          enough space in old_top to do this.
 2682                        */
 2683                       old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
 2684                       set_head (old_top, old_size | PREV_INUSE);
 2685 
 2686                       /*
 2687                          Note that the following assignments completely overwrite
 2688                          old_top when old_size was previously MINSIZE.  This is
 2689                          intentional. We need the fencepost, even if old_top otherwise gets
 2690                          lost.
 2691                        */
 2692               set_head (chunk_at_offset (old_top, old_size),
 2693                 (2 * SIZE_SZ) | PREV_INUSE);
 2694               set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
 2695                 (2 * SIZE_SZ) | PREV_INUSE);
 2696 
 2697                       /* If possible, release the rest. */
 2698                       if (old_size >= MINSIZE)
 2699                         {
 2700                           _int_free (av, old_top, 1);
 2701                         }
 2702                     }
 2703                 }
 2704             }
 2705         }
 2706     } /* if (av !=  &main_arena) */
 2707 
 2708   if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
 2709     av->max_system_mem = av->system_mem;
 2710   check_malloc_state (av);
 2711 
 2712   /* finally, do the allocation */
 2713   p = av->top;
 2714   size = chunksize (p);
 2715 
 2716   /* check that one of the above allocation paths succeeded */
 2717   if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
 2718     {
 2719       remainder_size = size - nb;
 2720       remainder = chunk_at_offset (p, nb);
 2721       av->top = remainder;
 2722       set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
 2723       set_head (remainder, remainder_size | PREV_INUSE);
 2724       check_malloced_chunk (av, p, nb);
 2725       return chunk2mem (p);
 2726     }
 2727 
 2728   /* catch all failure paths */
 2729   __set_errno (ENOMEM);
 2730   return 0;
 2731 }
 2732 
 2733 
 2734 /*
 2735    systrim is an inverse of sorts to sysmalloc.  It gives memory back
 2736    to the system (via negative arguments to sbrk) if there is unused
 2737    memory at the `high' end of the malloc pool. It is called
 2738    automatically by free() when top space exceeds the trim
 2739    threshold. It is also called by the public malloc_trim routine.  It
 2740    returns 1 if it actually released any memory, else 0.
 2741  */
 2742 
 2743 static int
 2744 systrim (size_t pad, mstate av)
 2745 {
 2746   long top_size;         /* Amount of top-most memory */
 2747   long extra;            /* Amount to release */
 2748   long released;         /* Amount actually released */
 2749   char *current_brk;     /* address returned by pre-check sbrk call */
 2750   char *new_brk;         /* address returned by post-check sbrk call */
 2751   size_t pagesize;
 2752   long top_area;
 2753 
 2754   pagesize = GLRO (dl_pagesize);
 2755   top_size = chunksize (av->top);
 2756 
 2757   top_area = top_size - MINSIZE - 1;
 2758   if (top_area <= pad)
 2759     return 0;
 2760 
 2761   /* Release in pagesize units and round down to the nearest page.  */
 2762   extra = ALIGN_DOWN(top_area - pad, pagesize);
 2763 
 2764   if (extra == 0)
 2765     return 0;
 2766 
 2767   /*
 2768      Only proceed if end of memory is where we last set it.
 2769      This avoids problems if there were foreign sbrk calls.
 2770    */
 2771   current_brk = (char *) (MORECORE (0));
 2772   if (current_brk == (char *) (av->top) + top_size)
 2773     {
 2774       /*
 2775          Attempt to release memory. We ignore MORECORE return value,
 2776          and instead call again to find out where new end of memory is.
 2777          This avoids problems if first call releases less than we asked,
 2778          of if failure somehow altered brk value. (We could still
 2779          encounter problems if it altered brk in some very bad way,
 2780          but the only thing we can do is adjust anyway, which will cause
 2781          some downstream failure.)
 2782        */
 2783 
 2784       MORECORE (-extra);
 2785       /* Call the `morecore' hook if necessary.  */
 2786       void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2787       if (__builtin_expect (hook != NULL, 0))
 2788         (*hook)();
 2789       new_brk = (char *) (MORECORE (0));
 2790 
 2791       LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
 2792 
 2793       if (new_brk != (char *) MORECORE_FAILURE)
 2794         {
 2795           released = (long) (current_brk - new_brk);
 2796 
 2797           if (released != 0)
 2798             {
 2799               /* Success. Adjust top. */
 2800               av->system_mem -= released;
 2801               set_head (av->top, (top_size - released) | PREV_INUSE);
 2802               check_malloc_state (av);
 2803               return 1;
 2804             }
 2805         }
 2806     }
 2807   return 0;
 2808 }
 2809 
 2810 static void
 2811 munmap_chunk (mchunkptr p)
 2812 {
 2813   INTERNAL_SIZE_T size = chunksize (p);
 2814 
 2815   assert (chunk_is_mmapped (p));
 2816 
 2817   /* Do nothing if the chunk is a faked mmapped chunk in the dumped
 2818      main arena.  We never free this memory.  */
 2819   if (DUMPED_MAIN_ARENA_CHUNK (p))
 2820     return;
 2821 
 2822   uintptr_t block = (uintptr_t) p - prev_size (p);
 2823   size_t total_size = prev_size (p) + size;
 2824   /* Unfortunately we have to do the compilers job by hand here.  Normally
 2825      we would test BLOCK and TOTAL-SIZE separately for compliance with the
 2826      page size.  But gcc does not recognize the optimization possibility
 2827      (in the moment at least) so we combine the two values into one before
 2828      the bit test.  */
 2829   if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
 2830     malloc_printerr ("munmap_chunk(): invalid pointer");
 2831 
 2832   atomic_decrement (&mp_.n_mmaps);
 2833   atomic_add (&mp_.mmapped_mem, -total_size);
 2834 
 2835   /* If munmap failed the process virtual memory address space is in a
 2836      bad shape.  Just leave the block hanging around, the process will
 2837      terminate shortly anyway since not much can be done.  */
 2838   __munmap ((char *) block, total_size);
 2839 }
 2840 
 2841 #if HAVE_MREMAP
 2842 
 2843 static mchunkptr
 2844 mremap_chunk (mchunkptr p, size_t new_size)
 2845 {
 2846   size_t pagesize = GLRO (dl_pagesize);
 2847   INTERNAL_SIZE_T offset = prev_size (p);
 2848   INTERNAL_SIZE_T size = chunksize (p);
 2849   char *cp;
 2850 
 2851   assert (chunk_is_mmapped (p));
 2852   assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
 2853 
 2854   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
 2855   new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
 2856 
 2857   /* No need to remap if the number of pages does not change.  */
 2858   if (size + offset == new_size)
 2859     return p;
 2860 
 2861   cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
 2862                           MREMAP_MAYMOVE);
 2863 
 2864   if (cp == MAP_FAILED)
 2865     return 0;
 2866 
 2867   p = (mchunkptr) (cp + offset);
 2868 
 2869   assert (aligned_OK (chunk2mem (p)));
 2870 
 2871   assert (prev_size (p) == offset);
 2872   set_head (p, (new_size - offset) | IS_MMAPPED);
 2873 
 2874   INTERNAL_SIZE_T new;
 2875   new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
 2876         + new_size - size - offset;
 2877   atomic_max (&mp_.max_mmapped_mem, new);
 2878   return p;
 2879 }
 2880 #endif /* HAVE_MREMAP */
 2881 
 2882 /*------------------------ Public wrappers. --------------------------------*/
 2883 
 2884 #if USE_TCACHE
 2885 
 2886 /* We overlay this structure on the user-data portion of a chunk when
 2887    the chunk is stored in the per-thread cache.  */
 2888 typedef struct tcache_entry
 2889 {
 2890   struct tcache_entry *next;
 2891 } tcache_entry;
 2892 
 2893 /* There is one of these for each thread, which contains the
 2894    per-thread cache (hence "tcache_perthread_struct").  Keeping
 2895    overall size low is mildly important.  Note that COUNTS and ENTRIES
 2896    are redundant (we could have just counted the linked list each
 2897    time), this is for performance reasons.  */
 2898 typedef struct tcache_perthread_struct
 2899 {
 2900   char counts[TCACHE_MAX_BINS];
 2901   tcache_entry *entries[TCACHE_MAX_BINS];
 2902 } tcache_perthread_struct;
 2903 
 2904 static __thread bool tcache_shutting_down = false;
 2905 static __thread tcache_perthread_struct *tcache = NULL;
 2906 
 2907 /* Caller must ensure that we know tc_idx is valid and there's room
 2908    for more chunks.  */
 2909 static __always_inline void
 2910 tcache_put (mchunkptr chunk, size_t tc_idx)
 2911 {
 2912   tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
 2913   assert (tc_idx < TCACHE_MAX_BINS);
 2914   e->next = tcache->entries[tc_idx];
 2915   tcache->entries[tc_idx] = e;
 2916   ++(tcache->counts[tc_idx]);
 2917 }
 2918 
 2919 /* Caller must ensure that we know tc_idx is valid and there's
 2920    available chunks to remove.  */
 2921 static __always_inline void *
 2922 tcache_get (size_t tc_idx)
 2923 {
 2924   tcache_entry *e = tcache->entries[tc_idx];
 2925   assert (tc_idx < TCACHE_MAX_BINS);
 2926   assert (tcache->entries[tc_idx] > 0);
 2927   tcache->entries[tc_idx] = e->next;
 2928   --(tcache->counts[tc_idx]);
 2929   return (void *) e;
 2930 }
 2931 
 2932 static void
 2933 tcache_thread_shutdown (void)
 2934 {
 2935   int i;
 2936   tcache_perthread_struct *tcache_tmp = tcache;
 2937 
 2938   if (!tcache)
 2939     return;
 2940 
 2941   /* Disable the tcache and prevent it from being reinitialized.  */
 2942   tcache = NULL;
 2943   tcache_shutting_down = true;
 2944 
 2945   /* Free all of the entries and the tcache itself back to the arena
 2946      heap for coalescing.  */
 2947   for (i = 0; i < TCACHE_MAX_BINS; ++i)
 2948     {
 2949       while (tcache_tmp->entries[i])
 2950     {
 2951       tcache_entry *e = tcache_tmp->entries[i];
 2952       tcache_tmp->entries[i] = e->next;
 2953       __libc_free (e);
 2954     }
 2955     }
 2956 
 2957   __libc_free (tcache_tmp);
 2958 }
 2959 
 2960 static void
 2961 tcache_init(void)
 2962 {
 2963   mstate ar_ptr;
 2964   void *victim = 0;
 2965   const size_t bytes = sizeof (tcache_perthread_struct);
 2966 
 2967   if (tcache_shutting_down)
 2968     return;
 2969 
 2970   arena_get (ar_ptr, bytes);
 2971   victim = _int_malloc (ar_ptr, bytes);
 2972   if (!victim && ar_ptr != NULL)
 2973     {
 2974       ar_ptr = arena_get_retry (ar_ptr, bytes);
 2975       victim = _int_malloc (ar_ptr, bytes);
 2976     }
 2977 
 2978 
 2979   if (ar_ptr != NULL)
 2980     __libc_lock_unlock (ar_ptr->mutex);
 2981 
 2982   /* In a low memory situation, we may not be able to allocate memory
 2983      - in which case, we just keep trying later.  However, we
 2984      typically do this very early, so either there is sufficient
 2985      memory, or there isn't enough memory to do non-trivial
 2986      allocations anyway.  */
 2987   if (victim)
 2988     {
 2989       tcache = (tcache_perthread_struct *) victim;
 2990       memset (tcache, 0, sizeof (tcache_perthread_struct));
 2991     }
 2992 
 2993 }
 2994 
 2995 # define MAYBE_INIT_TCACHE() \
 2996   if (__glibc_unlikely (tcache == NULL)) \
 2997     tcache_init();
 2998 
 2999 #else  /* !USE_TCACHE */
 3000 # define MAYBE_INIT_TCACHE()
 3001 
 3002 static void
 3003 tcache_thread_shutdown (void)
 3004 {
 3005   /* Nothing to do if there is no thread cache.  */
 3006 }
 3007 
 3008 #endif /* !USE_TCACHE  */
 3009 
 3010 void *
 3011 __libc_malloc (size_t bytes)
 3012 {
 3013   mstate ar_ptr;
 3014   void *victim;
 3015 
 3016   void *(*hook) (size_t, const void *)
 3017     = atomic_forced_read (__malloc_hook);
 3018   if (__builtin_expect (hook != NULL, 0))
 3019     return (*hook)(bytes, RETURN_ADDRESS (0));
 3020 #if USE_TCACHE
 3021   /* int_free also calls request2size, be careful to not pad twice.  */
 3022   size_t tbytes;
 3023   checked_request2size (bytes, tbytes);
 3024   size_t tc_idx = csize2tidx (tbytes);
 3025 
 3026   MAYBE_INIT_TCACHE ();
 3027 
 3028   DIAG_PUSH_NEEDS_COMMENT;
 3029   if (tc_idx < mp_.tcache_bins
 3030       /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
 3031       && tcache
 3032       && tcache->entries[tc_idx] != NULL)
 3033     {
 3034       return tcache_get (tc_idx);
 3035     }
 3036   DIAG_POP_NEEDS_COMMENT;
 3037 #endif
 3038 
 3039   if (SINGLE_THREAD_P)
 3040     {
 3041       victim = _int_malloc (&main_arena, bytes);
 3042       assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
 3043           &main_arena == arena_for_chunk (mem2chunk (victim)));
 3044       return victim;
 3045     }
 3046 
 3047   arena_get (ar_ptr, bytes);
 3048 
 3049   victim = _int_malloc (ar_ptr, bytes);
 3050   /* Retry with another arena only if we were able to find a usable arena
 3051      before.  */
 3052   if (!victim && ar_ptr != NULL)
 3053     {
 3054       LIBC_PROBE (memory_malloc_retry, 1, bytes);
 3055       ar_ptr = arena_get_retry (ar_ptr, bytes);
 3056       victim = _int_malloc (ar_ptr, bytes);
 3057     }
 3058 
 3059   if (ar_ptr != NULL)
 3060     __libc_lock_unlock (ar_ptr->mutex);
 3061 
 3062   assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
 3063           ar_ptr == arena_for_chunk (mem2chunk (victim)));
 3064   return victim;
 3065 }
 3066 libc_hidden_def (__libc_malloc)
 3067 
 3068 void
 3069 __libc_free (void *mem)
 3070 {
 3071   mstate ar_ptr;
 3072   mchunkptr p;                          /* chunk corresponding to mem */
 3073 
 3074   void (*hook) (void *, const void *)
 3075     = atomic_forced_read (__free_hook);
 3076   if (__builtin_expect (hook != NULL, 0))
 3077     {
 3078       (*hook)(mem, RETURN_ADDRESS (0));
 3079       return;
 3080     }
 3081 
 3082   if (mem == 0)                              /* free(0) has no effect */
 3083     return;
 3084 
 3085   p = mem2chunk (mem);
 3086 
 3087   if (chunk_is_mmapped (p))                       /* release mmapped memory. */
 3088     {
 3089       /* See if the dynamic brk/mmap threshold needs adjusting.
 3090      Dumped fake mmapped chunks do not affect the threshold.  */
 3091       if (!mp_.no_dyn_threshold
 3092           && chunksize_nomask (p) > mp_.mmap_threshold
 3093           && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
 3094       && !DUMPED_MAIN_ARENA_CHUNK (p))
 3095         {
 3096           mp_.mmap_threshold = chunksize (p);
 3097           mp_.trim_threshold = 2 * mp_.mmap_threshold;
 3098           LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
 3099                       mp_.mmap_threshold, mp_.trim_threshold);
 3100         }
 3101       munmap_chunk (p);
 3102       return;
 3103     }
 3104 
 3105   MAYBE_INIT_TCACHE ();
 3106 
 3107   ar_ptr = arena_for_chunk (p);
 3108   _int_free (ar_ptr, p, 0);
 3109 }
 3110 libc_hidden_def (__libc_free)
 3111 
 3112 void *
 3113 __libc_realloc (void *oldmem, size_t bytes)
 3114 {
 3115   mstate ar_ptr;
 3116   INTERNAL_SIZE_T nb;         /* padded request size */
 3117 
 3118   void *newp;             /* chunk to return */
 3119 
 3120   void *(*hook) (void *, size_t, const void *) =
 3121     atomic_forced_read (__realloc_hook);
 3122   if (__builtin_expect (hook != NULL, 0))
 3123     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
 3124 
 3125 #if REALLOC_ZERO_BYTES_FREES
 3126   if (bytes == 0 && oldmem != NULL)
 3127     {
 3128       __libc_free (oldmem); return 0;
 3129     }
 3130 #endif
 3131 
 3132   /* realloc of null is supposed to be same as malloc */
 3133   if (oldmem == 0)
 3134     return __libc_malloc (bytes);
 3135 
 3136   /* chunk corresponding to oldmem */
 3137   const mchunkptr oldp = mem2chunk (oldmem);
 3138   /* its size */
 3139   const INTERNAL_SIZE_T oldsize = chunksize (oldp);
 3140 
 3141   if (chunk_is_mmapped (oldp))
 3142     ar_ptr = NULL;
 3143   else
 3144     {
 3145       MAYBE_INIT_TCACHE ();
 3146       ar_ptr = arena_for_chunk (oldp);
 3147     }
 3148 
 3149   /* Little security check which won't hurt performance: the allocator
 3150      never wrapps around at the end of the address space.  Therefore
 3151      we can exclude some size values which might appear here by
 3152      accident or by "design" from some intruder.  We need to bypass
 3153      this check for dumped fake mmap chunks from the old main arena
 3154      because the new malloc may provide additional alignment.  */
 3155   if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
 3156        || __builtin_expect (misaligned_chunk (oldp), 0))
 3157       && !DUMPED_MAIN_ARENA_CHUNK (oldp))
 3158       malloc_printerr ("realloc(): invalid pointer");
 3159 
 3160   checked_request2size (bytes, nb);
 3161 
 3162   if (chunk_is_mmapped (oldp))
 3163     {
 3164       /* If this is a faked mmapped chunk from the dumped main arena,
 3165      always make a copy (and do not free the old chunk).  */
 3166       if (DUMPED_MAIN_ARENA_CHUNK (oldp))
 3167     {
 3168       /* Must alloc, copy, free. */
 3169       void *newmem = __libc_malloc (bytes);
 3170       if (newmem == 0)
 3171         return NULL;
 3172       /* Copy as many bytes as are available from the old chunk
 3173          and fit into the new size.  NB: The overhead for faked
 3174          mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
 3175          regular mmapped chunks.  */
 3176       if (bytes > oldsize - SIZE_SZ)
 3177         bytes = oldsize - SIZE_SZ;
 3178       memcpy (newmem, oldmem, bytes);
 3179       return newmem;
 3180     }
 3181 
 3182       void *newmem;
 3183 
 3184 #if HAVE_MREMAP
 3185       newp = mremap_chunk (oldp, nb);
 3186       if (newp)
 3187         return chunk2mem (newp);
 3188 #endif
 3189       /* Note the extra SIZE_SZ overhead. */
 3190       if (oldsize - SIZE_SZ >= nb)
 3191         return oldmem;                         /* do nothing */
 3192 
 3193       /* Must alloc, copy, free. */
 3194       newmem = __libc_malloc (bytes);
 3195       if (newmem == 0)
 3196         return 0;              /* propagate failure */
 3197 
 3198       memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
 3199       munmap_chunk (oldp);
 3200       return newmem;
 3201     }
 3202 
 3203   if (SINGLE_THREAD_P)
 3204     {
 3205       newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
 3206       assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
 3207           ar_ptr == arena_for_chunk (mem2chunk (newp)));
 3208 
 3209       return newp;
 3210     }
 3211 
 3212   __libc_lock_lock (ar_ptr->mutex);
 3213 
 3214   newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
 3215 
 3216   __libc_lock_unlock (ar_ptr->mutex);
 3217   assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
 3218           ar_ptr == arena_for_chunk (mem2chunk (newp)));
 3219 
 3220   if (newp == NULL)
 3221     {
 3222       /* Try harder to allocate memory in other arenas.  */
 3223       LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
 3224       newp = __libc_malloc (bytes);
 3225       if (newp != NULL)
 3226         {
 3227           memcpy (newp, oldmem, oldsize - SIZE_SZ);
 3228           _int_free (ar_ptr, oldp, 0);
 3229         }
 3230     }
 3231 
 3232   return newp;
 3233 }
 3234 libc_hidden_def (__libc_realloc)
 3235 
 3236 void *
 3237 __libc_memalign (size_t alignment, size_t bytes)
 3238 {
 3239   void *address = RETURN_ADDRESS (0);
 3240   return _mid_memalign (alignment, bytes, address);
 3241 }
 3242 
 3243 static void *
 3244 _mid_memalign (size_t alignment, size_t bytes, void *address)
 3245 {
 3246   mstate ar_ptr;
 3247   void *p;
 3248 
 3249   void *(*hook) (size_t, size_t, const void *) =
 3250     atomic_forced_read (__memalign_hook);
 3251   if (__builtin_expect (hook != NULL, 0))
 3252     return (*hook)(alignment, bytes, address);
 3253 
 3254   /* If we need less alignment than we give anyway, just relay to malloc.  */
 3255   if (alignment <= MALLOC_ALIGNMENT)
 3256     return __libc_malloc (bytes);
 3257 
 3258   /* Otherwise, ensure that it is at least a minimum chunk size */
 3259   if (alignment < MINSIZE)
 3260     alignment = MINSIZE;
 3261 
 3262   /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
 3263      power of 2 and will cause overflow in the check below.  */
 3264   if (alignment > SIZE_MAX / 2 + 1)
 3265     {
 3266       __set_errno (EINVAL);
 3267       return 0;
 3268     }
 3269 
 3270   /* Check for overflow.  */
 3271   if (bytes > SIZE_MAX - alignment - MINSIZE)
 3272     {
 3273       __set_errno (ENOMEM);
 3274       return 0;
 3275     }
 3276 
 3277 
 3278   /* Make sure alignment is power of 2.  */
 3279   if (!powerof2 (alignment))
 3280     {
 3281       size_t a = MALLOC_ALIGNMENT * 2;
 3282       while (a < alignment)
 3283         a <<= 1;
 3284       alignment = a;
 3285     }
 3286 
 3287   if (SINGLE_THREAD_P)
 3288     {
 3289       p = _int_memalign (&main_arena, alignment, bytes);
 3290       assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
 3291           &main_arena == arena_for_chunk (mem2chunk (p)));
 3292 
 3293       return p;
 3294     }
 3295 
 3296   arena_get (ar_ptr, bytes + alignment + MINSIZE);
 3297 
 3298   p = _int_memalign (ar_ptr, alignment, bytes);
 3299   if (!p && ar_ptr != NULL)
 3300     {
 3301       LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
 3302       ar_ptr = arena_get_retry (ar_ptr, bytes);
 3303       p = _int_memalign (ar_ptr, alignment, bytes);
 3304     }
 3305 
 3306   if (ar_ptr != NULL)
 3307     __libc_lock_unlock (ar_ptr->mutex);
 3308 
 3309   assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
 3310           ar_ptr == arena_for_chunk (mem2chunk (p)));
 3311   return p;
 3312 }
 3313 /* For ISO C11.  */
 3314 weak_alias (__libc_memalign, aligned_alloc)
 3315 libc_hidden_def (__libc_memalign)
 3316 
 3317 void *
 3318 __libc_valloc (size_t bytes)
 3319 {
 3320   if (__malloc_initialized < 0)
 3321     ptmalloc_init ();
 3322 
 3323   void *address = RETURN_ADDRESS (0);
 3324   size_t pagesize = GLRO (dl_pagesize);
 3325   return _mid_memalign (pagesize, bytes, address);
 3326 }
 3327 
 3328 void *
 3329 __libc_pvalloc (size_t bytes)
 3330 {
 3331   if (__malloc_initialized < 0)
 3332     ptmalloc_init ();
 3333 
 3334   void *address = RETURN_ADDRESS (0);
 3335   size_t pagesize = GLRO (dl_pagesize);
 3336   size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
 3337 
 3338   /* Check for overflow.  */
 3339   if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
 3340     {
 3341       __set_errno (ENOMEM);
 3342       return 0;
 3343     }
 3344 
 3345   return _mid_memalign (pagesize, rounded_bytes, address);
 3346 }
 3347 
 3348 void *
 3349 __libc_calloc (size_t n, size_t elem_size)
 3350 {
 3351   mstate av;
 3352   mchunkptr oldtop, p;
 3353   INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
 3354   void *mem;
 3355   unsigned long clearsize;
 3356   unsigned long nclears;
 3357   INTERNAL_SIZE_T *d;
 3358 
 3359   /* size_t is unsigned so the behavior on overflow is defined.  */
 3360   bytes = n * elem_size;
 3361 #define HALF_INTERNAL_SIZE_T \
 3362   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
 3363   if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
 3364     {
 3365       if (elem_size != 0 && bytes / elem_size != n)
 3366         {
 3367           __set_errno (ENOMEM);
 3368           return 0;
 3369         }
 3370     }
 3371 
 3372   void *(*hook) (size_t, const void *) =
 3373     atomic_forced_read (__malloc_hook);
 3374   if (__builtin_expect (hook != NULL, 0))
 3375     {
 3376       sz = bytes;
 3377       mem = (*hook)(sz, RETURN_ADDRESS (0));
 3378       if (mem == 0)
 3379         return 0;
 3380 
 3381       return memset (mem, 0, sz);
 3382     }
 3383 
 3384   sz = bytes;
 3385 
 3386   MAYBE_INIT_TCACHE ();
 3387 
 3388   if (SINGLE_THREAD_P)
 3389     av = &main_arena;
 3390   else
 3391     arena_get (av, sz);
 3392 
 3393   if (av)
 3394     {
 3395       /* Check if we hand out the top chunk, in which case there may be no
 3396      need to clear. */
 3397 #if MORECORE_CLEARS
 3398       oldtop = top (av);
 3399       oldtopsize = chunksize (top (av));
 3400 # if MORECORE_CLEARS < 2
 3401       /* Only newly allocated memory is guaranteed to be cleared.  */
 3402       if (av == &main_arena &&
 3403       oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
 3404     oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
 3405 # endif
 3406       if (av != &main_arena)
 3407     {
 3408       heap_info *heap = heap_for_ptr (oldtop);
 3409       if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
 3410         oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
 3411     }
 3412 #endif
 3413     }
 3414   else
 3415     {
 3416       /* No usable arenas.  */
 3417       oldtop = 0;
 3418       oldtopsize = 0;
 3419     }
 3420   mem = _int_malloc (av, sz);
 3421 
 3422   assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
 3423           av == arena_for_chunk (mem2chunk (mem)));
 3424 
 3425   if (!SINGLE_THREAD_P)
 3426     {
 3427       if (mem == 0 && av != NULL)
 3428     {
 3429       LIBC_PROBE (memory_calloc_retry, 1, sz);
 3430       av = arena_get_retry (av, sz);
 3431       mem = _int_malloc (av, sz);
 3432     }
 3433 
 3434       if (av != NULL)
 3435     __libc_lock_unlock (av->mutex);
 3436     }
 3437 
 3438   /* Allocation failed even after a retry.  */
 3439   if (mem == 0)
 3440     return 0;
 3441 
 3442   p = mem2chunk (mem);
 3443 
 3444   /* Two optional cases in which clearing not necessary */
 3445   if (chunk_is_mmapped (p))
 3446     {
 3447       if (__builtin_expect (perturb_byte, 0))
 3448         return memset (mem, 0, sz);
 3449 
 3450       return mem;
 3451     }
 3452 
 3453   csz = chunksize (p);
 3454 
 3455 #if MORECORE_CLEARS
 3456   if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
 3457     {
 3458       /* clear only the bytes from non-freshly-sbrked memory */
 3459       csz = oldtopsize;
 3460     }
 3461 #endif
 3462 
 3463   /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
 3464      contents have an odd number of INTERNAL_SIZE_T-sized words;
 3465      minimally 3.  */
 3466   d = (INTERNAL_SIZE_T *) mem;
 3467   clearsize = csz - SIZE_SZ;
 3468   nclears = clearsize / sizeof (INTERNAL_SIZE_T);
 3469   assert (nclears >= 3);
 3470 
 3471   if (nclears > 9)
 3472     return memset (d, 0, clearsize);
 3473 
 3474   else
 3475     {
 3476       *(d + 0) = 0;
 3477       *(d + 1) = 0;
 3478       *(d + 2) = 0;
 3479       if (nclears > 4)
 3480         {
 3481           *(d + 3) = 0;
 3482           *(d + 4) = 0;
 3483           if (nclears > 6)
 3484             {
 3485               *(d + 5) = 0;
 3486               *(d + 6) = 0;
 3487               if (nclears > 8)
 3488                 {
 3489                   *(d + 7) = 0;
 3490                   *(d + 8) = 0;
 3491                 }
 3492             }
 3493         }
 3494     }
 3495 
 3496   return mem;
 3497 }
 3498 
 3499 /*
 3500    ------------------------------ malloc ------------------------------
 3501  */
 3502 
 3503 static void *
 3504 _int_malloc (mstate av, size_t bytes)
 3505 {
 3506   INTERNAL_SIZE_T nb;               /* normalized request size */
 3507   unsigned int idx;                 /* associated bin index */
 3508   mbinptr bin;                      /* associated bin */
 3509 
 3510   mchunkptr victim;                 /* inspected/selected chunk */
 3511   INTERNAL_SIZE_T size;             /* its size */
 3512   int victim_index;                 /* its bin index */
 3513 
 3514   mchunkptr remainder;              /* remainder from a split */
 3515   unsigned long remainder_size;     /* its size */
 3516 
 3517   unsigned int block;               /* bit map traverser */
 3518   unsigned int bit;                 /* bit map traverser */
 3519   unsigned int map;                 /* current word of binmap */
 3520 
 3521   mchunkptr fwd;                    /* misc temp for linking */
 3522   mchunkptr bck;                    /* misc temp for linking */
 3523 
 3524 #if USE_TCACHE
 3525   size_t tcache_unsorted_count;     /* count of unsorted chunks processed */
 3526 #endif
 3527 
 3528   /*
 3529      Convert request size to internal form by adding SIZE_SZ bytes
 3530      overhead plus possibly more to obtain necessary alignment and/or
 3531      to obtain a size of at least MINSIZE, the smallest allocatable
 3532      size. Also, checked_request2size traps (returning 0) request sizes
 3533      that are so large that they wrap around zero when padded and
 3534      aligned.
 3535    */
 3536 
 3537   checked_request2size (bytes, nb);
 3538 
 3539   /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
 3540      mmap.  */
 3541   if (__glibc_unlikely (av == NULL))
 3542     {
 3543       void *p = sysmalloc (nb, av);
 3544       if (p != NULL)
 3545     alloc_perturb (p, bytes);
 3546       return p;
 3547     }
 3548 
 3549   /*
 3550      If the size qualifies as a fastbin, first check corresponding bin.
 3551      This code is safe to execute even if av is not yet initialized, so we
 3552      can try it without checking, which saves some time on this fast path.
 3553    */
 3554 
 3555 #define REMOVE_FB(fb, victim, pp)           \
 3556   do                            \
 3557     {                           \
 3558       victim = pp;                  \
 3559       if (victim == NULL)               \
 3560     break;                      \
 3561     }                           \
 3562   while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
 3563      != victim);                    \
 3564 
 3565   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
 3566     {
 3567       idx = fastbin_index (nb);
 3568       mfastbinptr *fb = &fastbin (av, idx);
 3569       mchunkptr pp;
 3570       victim = *fb;
 3571 
 3572       if (victim != NULL)
 3573     {
 3574       if (SINGLE_THREAD_P)
 3575         *fb = victim->fd;
 3576       else
 3577         REMOVE_FB (fb, pp, victim);
 3578       if (__glibc_likely (victim != NULL))
 3579         {
 3580           size_t victim_idx = fastbin_index (chunksize (victim));
 3581           if (__builtin_expect (victim_idx != idx, 0))
 3582         malloc_printerr ("malloc(): memory corruption (fast)");
 3583           check_remalloced_chunk (av, victim, nb);
 3584 #if USE_TCACHE
 3585           /* While we're here, if we see other chunks of the same size,
 3586          stash them in the tcache.  */
 3587           size_t tc_idx = csize2tidx (nb);
 3588           if (tcache && tc_idx < mp_.tcache_bins)
 3589         {
 3590           mchunkptr tc_victim;
 3591 
 3592           /* While bin not empty and tcache not full, copy chunks.  */
 3593           while (tcache->counts[tc_idx] < mp_.tcache_count
 3594              && (tc_victim = *fb) != NULL)
 3595             {
 3596               if (SINGLE_THREAD_P)
 3597             *fb = tc_victim->fd;
 3598               else
 3599             {
 3600               REMOVE_FB (fb, pp, tc_victim);
 3601               if (__glibc_unlikely (tc_victim == NULL))
 3602                 break;
 3603             }
 3604               tcache_put (tc_victim, tc_idx);
 3605             }
 3606         }
 3607 #endif
 3608           void *p = chunk2mem (victim);
 3609           alloc_perturb (p, bytes);
 3610           return p;
 3611         }
 3612     }
 3613     }
 3614 
 3615   /*
 3616      If a small request, check regular bin.  Since these "smallbins"
 3617      hold one size each, no searching within bins is necessary.
 3618      (For a large request, we need to wait until unsorted chunks are
 3619      processed to find best fit. But for small ones, fits are exact
 3620      anyway, so we can check now, which is faster.)
 3621    */
 3622 
 3623   if (in_smallbin_range (nb))
 3624     {
 3625       idx = smallbin_index (nb);
 3626       bin = bin_at (av, idx);
 3627 
 3628       if ((victim = last (bin)) != bin)
 3629         {
 3630           bck = victim->bk;
 3631       if (__glibc_unlikely (bck->fd != victim))
 3632         malloc_printerr ("malloc(): smallbin double linked list corrupted");
 3633           set_inuse_bit_at_offset (victim, nb);
 3634           bin->bk = bck;
 3635           bck->fd = bin;
 3636 
 3637           if (av != &main_arena)
 3638         set_non_main_arena (victim);
 3639           check_malloced_chunk (av, victim, nb);
 3640 #if USE_TCACHE
 3641       /* While we're here, if we see other chunks of the same size,
 3642          stash them in the tcache.  */
 3643       size_t tc_idx = csize2tidx (nb);
 3644       if (tcache && tc_idx < mp_.tcache_bins)
 3645         {
 3646           mchunkptr tc_victim;
 3647 
 3648           /* While bin not empty and tcache not full, copy chunks over.  */
 3649           while (tcache->counts[tc_idx] < mp_.tcache_count
 3650              && (tc_victim = last (bin)) != bin)
 3651         {
 3652           if (tc_victim != 0)
 3653             {
 3654               bck = tc_victim->bk;
 3655               set_inuse_bit_at_offset (tc_victim, nb);
 3656               if (av != &main_arena)
 3657             set_non_main_arena (tc_victim);
 3658               bin->bk = bck;
 3659               bck->fd = bin;
 3660 
 3661               tcache_put (tc_victim, tc_idx);
 3662                 }
 3663         }
 3664         }
 3665 #endif
 3666           void *p = chunk2mem (victim);
 3667           alloc_perturb (p, bytes);
 3668           return p;
 3669         }
 3670     }
 3671 
 3672   /*
 3673      If this is a large request, consolidate fastbins before continuing.
 3674      While it might look excessive to kill all fastbins before
 3675      even seeing if there is space available, this avoids
 3676      fragmentation problems normally associated with fastbins.
 3677      Also, in practice, programs tend to have runs of either small or
 3678      large requests, but less often mixtures, so consolidation is not
 3679      invoked all that often in most programs. And the programs that
 3680      it is called frequently in otherwise tend to fragment.
 3681    */
 3682 
 3683   else
 3684     {
 3685       idx = largebin_index (nb);
 3686       if (atomic_load_relaxed (&av->have_fastchunks))
 3687         malloc_consolidate (av);
 3688     }
 3689 
 3690   /*
 3691      Process recently freed or remaindered chunks, taking one only if
 3692      it is exact fit, or, if this a small request, the chunk is remainder from
 3693      the most recent non-exact fit.  Place other traversed chunks in
 3694      bins.  Note that this step is the only place in any routine where
 3695      chunks are placed in bins.
 3696 
 3697      The outer loop here is needed because we might not realize until
 3698      near the end of malloc that we should have consolidated, so must
 3699      do so and retry. This happens at most once, and only when we would
 3700      otherwise need to expand memory to service a "small" request.
 3701    */
 3702 
 3703 #if USE_TCACHE
 3704   INTERNAL_SIZE_T tcache_nb = 0;
 3705   size_t tc_idx = csize2tidx (nb);
 3706   if (tcache && tc_idx < mp_.tcache_bins)
 3707     tcache_nb = nb;
 3708   int return_cached = 0;
 3709 
 3710   tcache_unsorted_count = 0;
 3711 #endif
 3712 
 3713   for (;; )
 3714     {
 3715       int iters = 0;
 3716       while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
 3717         {
 3718           bck = victim->bk;
 3719           if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
 3720               || __builtin_expect (chunksize_nomask (victim)
 3721                    > av->system_mem, 0))
 3722             malloc_printerr ("malloc(): memory corruption");
 3723           size = chunksize (victim);
 3724 
 3725           /*
 3726              If a small request, try to use last remainder if it is the
 3727              only chunk in unsorted bin.  This helps promote locality for
 3728              runs of consecutive small requests. This is the only
 3729              exception to best-fit, and applies only when there is
 3730              no exact fit for a small chunk.
 3731            */
 3732 
 3733           if (in_smallbin_range (nb) &&
 3734               bck == unsorted_chunks (av) &&
 3735               victim == av->last_remainder &&
 3736               (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
 3737             {
 3738               /* split and reattach remainder */
 3739               remainder_size = size - nb;
 3740               remainder = chunk_at_offset (victim, nb);
 3741               unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
 3742               av->last_remainder = remainder;
 3743               remainder->bk = remainder->fd = unsorted_chunks (av);
 3744               if (!in_smallbin_range (remainder_size))
 3745                 {
 3746                   remainder->fd_nextsize = NULL;
 3747                   remainder->bk_nextsize = NULL;
 3748                 }
 3749 
 3750               set_head (victim, nb | PREV_INUSE |
 3751                         (av != &main_arena ? NON_MAIN_ARENA : 0));
 3752               set_head (remainder, remainder_size | PREV_INUSE);
 3753               set_foot (remainder, remainder_size);
 3754 
 3755               check_malloced_chunk (av, victim, nb);
 3756               void *p = chunk2mem (victim);
 3757               alloc_perturb (p, bytes);
 3758               return p;
 3759             }
 3760 
 3761           /* remove from unsorted list */
 3762           if (__glibc_unlikely (bck->fd != victim))
 3763             malloc_printerr ("malloc(): corrupted unsorted chunks 3");
 3764           unsorted_chunks (av)->bk = bck;
 3765           bck->fd = unsorted_chunks (av);
 3766 
 3767           /* Take now instead of binning if exact fit */
 3768 
 3769           if (size == nb)
 3770             {
 3771               set_inuse_bit_at_offset (victim, size);
 3772               if (av != &main_arena)
 3773         set_non_main_arena (victim);
 3774 #if USE_TCACHE
 3775           /* Fill cache first, return to user only if cache fills.
 3776          We may return one of these chunks later.  */
 3777           if (tcache_nb
 3778           && tcache->counts[tc_idx] < mp_.tcache_count)
 3779         {
 3780           tcache_put (victim, tc_idx);
 3781           return_cached = 1;
 3782           continue;
 3783         }
 3784           else
 3785         {
 3786 #endif
 3787               check_malloced_chunk (av, victim, nb);
 3788               void *p = chunk2mem (victim);
 3789               alloc_perturb (p, bytes);
 3790               return p;
 3791 #if USE_TCACHE
 3792         }
 3793 #endif
 3794             }
 3795 
 3796           /* place chunk in bin */
 3797 
 3798           if (in_smallbin_range (size))
 3799             {
 3800               victim_index = smallbin_index (size);
 3801               bck = bin_at (av, victim_index);
 3802               fwd = bck->fd;
 3803             }
 3804           else
 3805             {
 3806               victim_index = largebin_index (size);
 3807               bck = bin_at (av, victim_index);
 3808               fwd = bck->fd;
 3809 
 3810               /* maintain large bins in sorted order */
 3811               if (fwd != bck)
 3812                 {
 3813                   /* Or with inuse bit to speed comparisons */
 3814                   size |= PREV_INUSE;
 3815                   /* if smaller than smallest, bypass loop below */
 3816                   assert (chunk_main_arena (bck->bk));
 3817                   if ((unsigned long) (size)
 3818               < (unsigned long) chunksize_nomask (bck->bk))
 3819                     {
 3820                       fwd = bck;
 3821                       bck = bck->bk;
 3822 
 3823                       victim->fd_nextsize = fwd->fd;
 3824                       victim->bk_nextsize = fwd->fd->bk_nextsize;
 3825                       fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
 3826                     }
 3827                   else
 3828                     {
 3829                       assert (chunk_main_arena (fwd));
 3830                       while ((unsigned long) size < chunksize_nomask (fwd))
 3831                         {
 3832                           fwd = fwd->fd_nextsize;
 3833               assert (chunk_main_arena (fwd));
 3834                         }
 3835 
 3836                       if ((unsigned long) size
 3837               == (unsigned long) chunksize_nomask (fwd))
 3838                         /* Always insert in the second position.  */
 3839                         fwd = fwd->fd;
 3840                       else
 3841                         {
 3842                           victim->fd_nextsize = fwd;
 3843                           victim->bk_nextsize = fwd->bk_nextsize;
 3844                           fwd->bk_nextsize = victim;
 3845                           victim->bk_nextsize->fd_nextsize = victim;
 3846                         }
 3847                       bck = fwd->bk;
 3848                     }
 3849                 }
 3850               else
 3851                 victim->fd_nextsize = victim->bk_nextsize = victim;
 3852             }
 3853 
 3854           mark_bin (av, victim_index);
 3855           victim->bk = bck;
 3856           victim->fd = fwd;
 3857           fwd->bk = victim;
 3858           bck->fd = victim;
 3859 
 3860 #if USE_TCACHE
 3861       /* If we've processed as many chunks as we're allowed while
 3862      filling the cache, return one of the cached ones.  */
 3863       ++tcache_unsorted_count;
 3864       if (return_cached
 3865       && mp_.tcache_unsorted_limit > 0
 3866       && tcache_unsorted_count > mp_.tcache_unsorted_limit)
 3867     {
 3868       return tcache_get (tc_idx);
 3869     }
 3870 #endif
 3871 
 3872 #define MAX_ITERS       10000
 3873           if (++iters >= MAX_ITERS)
 3874             break;
 3875         }
 3876 
 3877 #if USE_TCACHE
 3878       /* If all the small chunks we found ended up cached, return one now.  */
 3879       if (return_cached)
 3880     {
 3881       return tcache_get (tc_idx);
 3882     }
 3883 #endif
 3884 
 3885       /*
 3886          If a large request, scan through the chunks of current bin in
 3887          sorted order to find smallest that fits.  Use the skip list for this.
 3888        */
 3889 
 3890       if (!in_smallbin_range (nb))
 3891         {
 3892           bin = bin_at (av, idx);
 3893 
 3894           /* skip scan if empty or largest chunk is too small */
 3895           if ((victim = first (bin)) != bin
 3896           && (unsigned long) chunksize_nomask (victim)
 3897             >= (unsigned long) (nb))
 3898             {
 3899               victim = victim->bk_nextsize;
 3900               while (((unsigned long) (size = chunksize (victim)) <
 3901                       (unsigned long) (nb)))
 3902                 victim = victim->bk_nextsize;
 3903 
 3904               /* Avoid removing the first entry for a size so that the skip
 3905                  list does not have to be rerouted.  */
 3906               if (victim != last (bin)
 3907           && chunksize_nomask (victim)
 3908             == chunksize_nomask (victim->fd))
 3909                 victim = victim->fd;
 3910 
 3911               remainder_size = size - nb;
 3912               unlink (av, victim, bck, fwd);
 3913 
 3914               /* Exhaust */
 3915               if (remainder_size < MINSIZE)
 3916                 {
 3917                   set_inuse_bit_at_offset (victim, size);
 3918                   if (av != &main_arena)
 3919             set_non_main_arena (victim);
 3920                 }
 3921               /* Split */
 3922               else
 3923                 {
 3924                   remainder = chunk_at_offset (victim, nb);
 3925                   /* We cannot assume the unsorted list is empty and therefore
 3926                      have to perform a complete insert here.  */
 3927                   bck = unsorted_chunks (av);
 3928                   fwd = bck->fd;
 3929           if (__glibc_unlikely (fwd->bk != bck))
 3930             malloc_printerr ("malloc(): corrupted unsorted chunks");
 3931                   remainder->bk = bck;
 3932                   remainder->fd = fwd;
 3933                   bck->fd = remainder;
 3934                   fwd->bk = remainder;
 3935                   if (!in_smallbin_range (remainder_size))
 3936                     {
 3937                       remainder->fd_nextsize = NULL;
 3938                       remainder->bk_nextsize = NULL;
 3939                     }
 3940                   set_head (victim, nb | PREV_INUSE |
 3941                             (av != &main_arena ? NON_MAIN_ARENA : 0));
 3942                   set_head (remainder, remainder_size | PREV_INUSE);
 3943                   set_foot (remainder, remainder_size);
 3944                 }
 3945               check_malloced_chunk (av, victim, nb);
 3946               void *p = chunk2mem (victim);
 3947               alloc_perturb (p, bytes);
 3948               return p;
 3949             }
 3950         }
 3951 
 3952       /*
 3953          Search for a chunk by scanning bins, starting with next largest
 3954          bin. This search is strictly by best-fit; i.e., the smallest
 3955          (with ties going to approximately the least recently used) chunk
 3956          that fits is selected.
 3957 
 3958          The bitmap avoids needing to check that most blocks are nonempty.
 3959          The particular case of skipping all bins during warm-up phases
 3960          when no chunks have been returned yet is faster than it might look.
 3961        */
 3962 
 3963       ++idx;
 3964       bin = bin_at (av, idx);
 3965       block = idx2block (idx);
 3966       map = av->binmap[block];
 3967       bit = idx2bit (idx);
 3968 
 3969       for (;; )
 3970         {
 3971           /* Skip rest of block if there are no more set bits in this block.  */
 3972           if (bit > map || bit == 0)
 3973             {
 3974               do
 3975                 {
 3976                   if (++block >= BINMAPSIZE) /* out of bins */
 3977                     goto use_top;
 3978                 }
 3979               while ((map = av->binmap[block]) == 0);
 3980 
 3981               bin = bin_at (av, (block << BINMAPSHIFT));
 3982               bit = 1;
 3983             }
 3984 
 3985           /* Advance to bin with set bit. There must be one. */
 3986           while ((bit & map) == 0)
 3987             {
 3988               bin = next_bin (bin);
 3989               bit <<= 1;
 3990               assert (bit != 0);
 3991             }
 3992 
 3993           /* Inspect the bin. It is likely to be non-empty */
 3994           victim = last (bin);
 3995 
 3996           /*  If a false alarm (empty bin), clear the bit. */
 3997           if (victim == bin)
 3998             {
 3999               av->binmap[block] = map &= ~bit; /* Write through */
 4000               bin = next_bin (bin);
 4001               bit <<= 1;
 4002             }
 4003 
 4004           else
 4005             {
 4006               size = chunksize (victim);
 4007 
 4008               /*  We know the first chunk in this bin is big enough to use. */
 4009               assert ((unsigned long) (size) >= (unsigned long) (nb));
 4010 
 4011               remainder_size = size - nb;
 4012 
 4013               /* unlink */
 4014               unlink (av, victim, bck, fwd);
 4015 
 4016               /* Exhaust */
 4017               if (remainder_size < MINSIZE)
 4018                 {
 4019                   set_inuse_bit_at_offset (victim, size);
 4020                   if (av != &main_arena)
 4021             set_non_main_arena (victim);
 4022                 }
 4023 
 4024               /* Split */
 4025               else
 4026                 {
 4027                   remainder = chunk_at_offset (victim, nb);
 4028 
 4029                   /* We cannot assume the unsorted list is empty and therefore
 4030                      have to perform a complete insert here.  */
 4031                   bck = unsorted_chunks (av);
 4032                   fwd = bck->fd;
 4033           if (__glibc_unlikely (fwd->bk != bck))
 4034             malloc_printerr ("malloc(): corrupted unsorted chunks 2");
 4035                   remainder->bk = bck;
 4036                   remainder->fd = fwd;
 4037                   bck->fd = remainder;
 4038                   fwd->bk = remainder;
 4039 
 4040                   /* advertise as last remainder */
 4041                   if (in_smallbin_range (nb))
 4042                     av->last_remainder = remainder;
 4043                   if (!in_smallbin_range (remainder_size))
 4044                     {
 4045                       remainder->fd_nextsize = NULL;
 4046                       remainder->bk_nextsize = NULL;
 4047                     }
 4048                   set_head (victim, nb | PREV_INUSE |
 4049                             (av != &main_arena ? NON_MAIN_ARENA : 0));
 4050                   set_head (remainder, remainder_size | PREV_INUSE);
 4051                   set_foot (remainder, remainder_size);
 4052                 }
 4053               check_malloced_chunk (av, victim, nb);
 4054               void *p = chunk2mem (victim);
 4055               alloc_perturb (p, bytes);
 4056               return p;
 4057             }
 4058         }
 4059 
 4060     use_top:
 4061       /*
 4062          If large enough, split off the chunk bordering the end of memory
 4063          (held in av->top). Note that this is in accord with the best-fit
 4064          search rule.  In effect, av->top is treated as larger (and thus
 4065          less well fitting) than any other available chunk since it can
 4066          be extended to be as large as necessary (up to system
 4067          limitations).
 4068 
 4069          We require that av->top always exists (i.e., has size >=
 4070          MINSIZE) after initialization, so if it would otherwise be
 4071          exhausted by current request, it is replenished. (The main
 4072          reason for ensuring it exists is that we may need MINSIZE space
 4073          to put in fenceposts in sysmalloc.)
 4074        */
 4075 
 4076       victim = av->top;
 4077       size = chunksize (victim);
 4078 
 4079       if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
 4080         {
 4081           remainder_size = size - nb;
 4082           remainder = chunk_at_offset (victim, nb);
 4083           av->top = remainder;
 4084           set_head (victim, nb | PREV_INUSE |
 4085                     (av != &main_arena ? NON_MAIN_ARENA : 0));
 4086           set_head (remainder, remainder_size | PREV_INUSE);
 4087 
 4088           check_malloced_chunk (av, victim, nb);
 4089           void *p = chunk2mem (victim);
 4090           alloc_perturb (p, bytes);
 4091           return p;
 4092         }
 4093 
 4094       /* When we are using atomic ops to free fast chunks we can get
 4095          here for all block sizes.  */
 4096       else if (atomic_load_relaxed (&av->have_fastchunks))
 4097         {
 4098           malloc_consolidate (av);
 4099           /* restore original bin index */
 4100           if (in_smallbin_range (nb))
 4101             idx = smallbin_index (nb);
 4102           else
 4103             idx = largebin_index (nb);
 4104         }
 4105 
 4106       /*
 4107          Otherwise, relay to handle system-dependent cases
 4108        */
 4109       else
 4110         {
 4111           void *p = sysmalloc (nb, av);
 4112           if (p != NULL)
 4113             alloc_perturb (p, bytes);
 4114           return p;
 4115         }
 4116     }
 4117 }
 4118 
 4119 /*
 4120    ------------------------------ free ------------------------------
 4121  */
 4122 
 4123 static void
 4124 _int_free (mstate av, mchunkptr p, int have_lock)
 4125 {
 4126   INTERNAL_SIZE_T size;        /* its size */
 4127   mfastbinptr *fb;             /* associated fastbin */
 4128   mchunkptr nextchunk;         /* next contiguous chunk */
 4129   INTERNAL_SIZE_T nextsize;    /* its size */
 4130   int nextinuse;               /* true if nextchunk is used */
 4131   INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
 4132   mchunkptr bck;               /* misc temp for linking */
 4133   mchunkptr fwd;               /* misc temp for linking */
 4134 
 4135   size = chunksize (p);
 4136 
 4137   /* Little security check which won't hurt performance: the
 4138      allocator never wrapps around at the end of the address space.
 4139      Therefore we can exclude some size values which might appear
 4140      here by accident or by "design" from some intruder.  */
 4141   if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
 4142       || __builtin_expect (misaligned_chunk (p), 0))
 4143     malloc_printerr ("free(): invalid pointer");
 4144   /* We know that each chunk is at least MINSIZE bytes in size or a
 4145      multiple of MALLOC_ALIGNMENT.  */
 4146   if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
 4147     malloc_printerr ("free(): invalid size");
 4148 
 4149   check_inuse_chunk(av, p);
 4150 
 4151 #if USE_TCACHE
 4152   {
 4153     size_t tc_idx = csize2tidx (size);
 4154 
 4155     if (tcache
 4156     && tc_idx < mp_.tcache_bins
 4157     && tcache->counts[tc_idx] < mp_.tcache_count)
 4158       {
 4159     tcache_put (p, tc_idx);
 4160     return;
 4161       }
 4162   }
 4163 #endif
 4164 
 4165   /*
 4166     If eligible, place chunk on a fastbin so it can be found
 4167     and used quickly in malloc.
 4168   */
 4169 
 4170   if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
 4171 
 4172 #if TRIM_FASTBINS
 4173       /*
 4174     If TRIM_FASTBINS set, don't place chunks
 4175     bordering top into fastbins
 4176       */
 4177       && (chunk_at_offset(p, size) != av->top)
 4178 #endif
 4179       ) {
 4180 
 4181     if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
 4182               <= 2 * SIZE_SZ, 0)
 4183     || __builtin_expect (chunksize (chunk_at_offset (p, size))
 4184                  >= av->system_mem, 0))
 4185       {
 4186     bool fail = true;
 4187     /* We might not have a lock at this point and concurrent modifications
 4188        of system_mem might result in a false positive.  Redo the test after
 4189        getting the lock.  */
 4190     if (!have_lock)
 4191       {
 4192         __libc_lock_lock (av->mutex);
 4193         fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
 4194             || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
 4195         __libc_lock_unlock (av->mutex);
 4196       }
 4197 
 4198     if (fail)
 4199       malloc_printerr ("free(): invalid next size (fast)");
 4200       }
 4201 
 4202     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 4203 
 4204     atomic_store_relaxed (&av->have_fastchunks, true);
 4205     unsigned int idx = fastbin_index(size);
 4206     fb = &fastbin (av, idx);
 4207 
 4208     /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
 4209     mchunkptr old = *fb, old2;
 4210 
 4211     if (SINGLE_THREAD_P)
 4212       {
 4213     /* Check that the top of the bin is not the record we are going to
 4214        add (i.e., double free).  */
 4215     if (__builtin_expect (old == p, 0))
 4216       malloc_printerr ("double free or corruption (fasttop)");
 4217     p->fd = old;
 4218     *fb = p;
 4219       }
 4220     else
 4221       do
 4222     {
 4223       /* Check that the top of the bin is not the record we are going to
 4224          add (i.e., double free).  */
 4225       if (__builtin_expect (old == p, 0))
 4226         malloc_printerr ("double free or corruption (fasttop)");
 4227       p->fd = old2 = old;
 4228     }
 4229       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
 4230          != old2);
 4231 
 4232     /* Check that size of fastbin chunk at the top is the same as
 4233        size of the chunk that we are adding.  We can dereference OLD
 4234        only if we have the lock, otherwise it might have already been
 4235        allocated again.  */
 4236     if (have_lock && old != NULL
 4237     && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
 4238       malloc_printerr ("invalid fastbin entry (free)");
 4239   }
 4240 
 4241   /*
 4242     Consolidate other non-mmapped chunks as they arrive.
 4243   */
 4244 
 4245   else if (!chunk_is_mmapped(p)) {
 4246 
 4247     /* If we're single-threaded, don't lock the arena.  */
 4248     if (SINGLE_THREAD_P)
 4249       have_lock = true;
 4250 
 4251     if (!have_lock)
 4252       __libc_lock_lock (av->mutex);
 4253 
 4254     nextchunk = chunk_at_offset(p, size);
 4255 
 4256     /* Lightweight tests: check whether the block is already the
 4257        top block.  */
 4258     if (__glibc_unlikely (p == av->top))
 4259       malloc_printerr ("double free or corruption (top)");
 4260     /* Or whether the next chunk is beyond the boundaries of the arena.  */
 4261     if (__builtin_expect (contiguous (av)
 4262               && (char *) nextchunk
 4263               >= ((char *) av->top + chunksize(av->top)), 0))
 4264     malloc_printerr ("double free or corruption (out)");
 4265     /* Or whether the block is actually not marked used.  */
 4266     if (__glibc_unlikely (!prev_inuse(nextchunk)))
 4267       malloc_printerr ("double free or corruption (!prev)");
 4268 
 4269     nextsize = chunksize(nextchunk);
 4270     if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
 4271     || __builtin_expect (nextsize >= av->system_mem, 0))
 4272       malloc_printerr ("free(): invalid next size (normal)");
 4273 
 4274     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 4275 
 4276     /* consolidate backward */
 4277     if (!prev_inuse(p)) {
 4278       prevsize = prev_size (p);
 4279       size += prevsize;
 4280       p = chunk_at_offset(p, -((long) prevsize));
 4281       unlink(av, p, bck, fwd);
 4282     }
 4283 
 4284     if (nextchunk != av->top) {
 4285       /* get and clear inuse bit */
 4286       nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 4287 
 4288       /* consolidate forward */
 4289       if (!nextinuse) {
 4290     unlink(av, nextchunk, bck, fwd);
 4291     size += nextsize;
 4292       } else
 4293     clear_inuse_bit_at_offset(nextchunk, 0);
 4294 
 4295       /*
 4296     Place the chunk in unsorted chunk list. Chunks are
 4297     not placed into regular bins until after they have
 4298     been given one chance to be used in malloc.
 4299       */
 4300 
 4301       bck = unsorted_chunks(av);
 4302       fwd = bck->fd;
 4303       if (__glibc_unlikely (fwd->bk != bck))
 4304     malloc_printerr ("free(): corrupted unsorted chunks");
 4305       p->fd = fwd;
 4306       p->bk = bck;
 4307       if (!in_smallbin_range(size))
 4308     {
 4309       p->fd_nextsize = NULL;
 4310       p->bk_nextsize = NULL;
 4311     }
 4312       bck->fd = p;
 4313       fwd->bk = p;
 4314 
 4315       set_head(p, size | PREV_INUSE);
 4316       set_foot(p, size);
 4317 
 4318       check_free_chunk(av, p);
 4319     }
 4320 
 4321     /*
 4322       If the chunk borders the current high end of memory,
 4323       consolidate into top
 4324     */
 4325 
 4326     else {
 4327       size += nextsize;
 4328       set_head(p, size | PREV_INUSE);
 4329       av->top = p;
 4330       check_chunk(av, p);
 4331     }
 4332 
 4333     /*
 4334       If freeing a large space, consolidate possibly-surrounding
 4335       chunks. Then, if the total unused topmost memory exceeds trim
 4336       threshold, ask malloc_trim to reduce top.
 4337 
 4338       Unless max_fast is 0, we don't know if there are fastbins
 4339       bordering top, so we cannot tell for sure whether threshold
 4340       has been reached unless fastbins are consolidated.  But we
 4341       don't want to consolidate on each free.  As a compromise,
 4342       consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
 4343       is reached.
 4344     */
 4345 
 4346     if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
 4347       if (atomic_load_relaxed (&av->have_fastchunks))
 4348     malloc_consolidate(av);
 4349 
 4350       if (av == &main_arena) {
 4351 #ifndef MORECORE_CANNOT_TRIM
 4352     if ((unsigned long)(chunksize(av->top)) >=
 4353         (unsigned long)(mp_.trim_threshold))
 4354       systrim(mp_.top_pad, av);
 4355 #endif
 4356       } else {
 4357     /* Always try heap_trim(), even if the top chunk is not
 4358        large, because the corresponding heap might go away.  */
 4359     heap_info *heap = heap_for_ptr(top(av));
 4360 
 4361     assert(heap->ar_ptr == av);
 4362     heap_trim(heap, mp_.top_pad);
 4363       }
 4364     }
 4365 
 4366     if (!have_lock)
 4367       __libc_lock_unlock (av->mutex);
 4368   }
 4369   /*
 4370     If the chunk was allocated via mmap, release via munmap().
 4371   */
 4372 
 4373   else {
 4374     munmap_chunk (p);
 4375   }
 4376 }
 4377 
 4378 /*
 4379   ------------------------- malloc_consolidate -------------------------
 4380 
 4381   malloc_consolidate is a specialized version of free() that tears
 4382   down chunks held in fastbins.  Free itself cannot be used for this
 4383   purpose since, among other things, it might place chunks back onto
 4384   fastbins.  So, instead, we need to use a minor variant of the same
 4385   code.
 4386 */
 4387 
 4388 static void malloc_consolidate(mstate av)
 4389 {
 4390   mfastbinptr*    fb;                 /* current fastbin being consolidated */
 4391   mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
 4392   mchunkptr       p;                  /* current chunk being consolidated */
 4393   mchunkptr       nextp;              /* next chunk to consolidate */
 4394   mchunkptr       unsorted_bin;       /* bin header */
 4395   mchunkptr       first_unsorted;     /* chunk to link to */
 4396 
 4397   /* These have same use as in free() */
 4398   mchunkptr       nextchunk;
 4399   INTERNAL_SIZE_T size;
 4400   INTERNAL_SIZE_T nextsize;
 4401   INTERNAL_SIZE_T prevsize;
 4402   int             nextinuse;
 4403   mchunkptr       bck;
 4404   mchunkptr       fwd;
 4405 
 4406   atomic_store_relaxed (&av->have_fastchunks, false);
 4407 
 4408   unsorted_bin = unsorted_chunks(av);
 4409 
 4410   /*
 4411     Remove each chunk from fast bin and consolidate it, placing it
 4412     then in unsorted bin. Among other reasons for doing this,
 4413     placing in unsorted bin avoids needing to calculate actual bins
 4414     until malloc is sure that chunks aren't immediately going to be
 4415     reused anyway.
 4416   */
 4417 
 4418   maxfb = &fastbin (av, NFASTBINS - 1);
 4419   fb = &fastbin (av, 0);
 4420   do {
 4421     p = atomic_exchange_acq (fb, NULL);
 4422     if (p != 0) {
 4423       do {
 4424     {
 4425       unsigned int idx = fastbin_index (chunksize (p));
 4426       if ((&fastbin (av, idx)) != fb)
 4427         malloc_printerr ("malloc_consolidate(): invalid chunk size");
 4428     }
 4429 
 4430     check_inuse_chunk(av, p);
 4431     nextp = p->fd;
 4432 
 4433     /* Slightly streamlined version of consolidation code in free() */
 4434     size = chunksize (p);
 4435     nextchunk = chunk_at_offset(p, size);
 4436     nextsize = chunksize(nextchunk);
 4437 
 4438     if (!prev_inuse(p)) {
 4439       prevsize = prev_size (p);
 4440       size += prevsize;
 4441       p = chunk_at_offset(p, -((long) prevsize));
 4442       unlink(av, p, bck, fwd);
 4443     }
 4444 
 4445     if (nextchunk != av->top) {
 4446       nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 4447 
 4448       if (!nextinuse) {
 4449         size += nextsize;
 4450         unlink(av, nextchunk, bck, fwd);
 4451       } else
 4452         clear_inuse_bit_at_offset(nextchunk, 0);
 4453 
 4454       first_unsorted = unsorted_bin->fd;
 4455       unsorted_bin->fd = p;
 4456       first_unsorted->bk = p;
 4457 
 4458       if (!in_smallbin_range (size)) {
 4459         p->fd_nextsize = NULL;
 4460         p->bk_nextsize = NULL;
 4461       }
 4462 
 4463       set_head(p, size | PREV_INUSE);
 4464       p->bk = unsorted_bin;
 4465       p->fd = first_unsorted;
 4466       set_foot(p, size);
 4467     }
 4468 
 4469     else {
 4470       size += nextsize;
 4471       set_head(p, size | PREV_INUSE);
 4472       av->top = p;
 4473     }
 4474 
 4475       } while ( (p = nextp) != 0);
 4476 
 4477     }
 4478   } while (fb++ != maxfb);
 4479 }
 4480 
 4481 /*
 4482   ------------------------------ realloc ------------------------------
 4483 */
 4484 
 4485 void*
 4486 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 4487          INTERNAL_SIZE_T nb)
 4488 {
 4489   mchunkptr        newp;            /* chunk to return */
 4490   INTERNAL_SIZE_T  newsize;         /* its size */
 4491   void*          newmem;          /* corresponding user mem */
 4492 
 4493   mchunkptr        next;            /* next contiguous chunk after oldp */
 4494 
 4495   mchunkptr        remainder;       /* extra space at end of newp */
 4496   unsigned long    remainder_size;  /* its size */
 4497 
 4498   mchunkptr        bck;             /* misc temp for linking */
 4499   mchunkptr        fwd;             /* misc temp for linking */
 4500 
 4501   unsigned long    copysize;        /* bytes to copy */
 4502   unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
 4503   INTERNAL_SIZE_T* s;               /* copy source */
 4504   INTERNAL_SIZE_T* d;               /* copy destination */
 4505 
 4506   /* oldmem size */
 4507   if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
 4508       || __builtin_expect (oldsize >= av->system_mem, 0))
 4509     malloc_printerr ("realloc(): invalid old size");
 4510 
 4511   check_inuse_chunk (av, oldp);
 4512 
 4513   /* All callers already filter out mmap'ed chunks.  */
 4514   assert (!chunk_is_mmapped (oldp));
 4515 
 4516   next = chunk_at_offset (oldp, oldsize);
 4517   INTERNAL_SIZE_T nextsize = chunksize (next);
 4518   if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
 4519       || __builtin_expect (nextsize >= av->system_mem, 0))
 4520     malloc_printerr ("realloc(): invalid next size");
 4521 
 4522   if ((unsigned long) (oldsize) >= (unsigned long) (nb))
 4523     {
 4524       /* already big enough; split below */
 4525       newp = oldp;
 4526       newsize = oldsize;
 4527     }
 4528 
 4529   else
 4530     {
 4531       /* Try to expand forward into top */
 4532       if (next == av->top &&
 4533           (unsigned long) (newsize = oldsize + nextsize) >=
 4534           (unsigned long) (nb + MINSIZE))
 4535         {
 4536           set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4537           av->top = chunk_at_offset (oldp, nb);
 4538           set_head (av->top, (newsize - nb) | PREV_INUSE);
 4539           check_inuse_chunk (av, oldp);
 4540           return chunk2mem (oldp);
 4541         }
 4542 
 4543       /* Try to expand forward into next chunk;  split off remainder below */
 4544       else if (next != av->top &&
 4545                !inuse (next) &&
 4546                (unsigned long) (newsize = oldsize + nextsize) >=
 4547                (unsigned long) (nb))
 4548         {
 4549           newp = oldp;
 4550           unlink (av, next, bck, fwd);
 4551         }
 4552 
 4553       /* allocate, copy, free */
 4554       else
 4555         {
 4556           newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
 4557           if (newmem == 0)
 4558             return 0; /* propagate failure */
 4559 
 4560           newp = mem2chunk (newmem);
 4561           newsize = chunksize (newp);
 4562 
 4563           /*
 4564              Avoid copy if newp is next chunk after oldp.
 4565            */
 4566           if (newp == next)
 4567             {
 4568               newsize += oldsize;
 4569               newp = oldp;
 4570             }
 4571           else
 4572             {
 4573               /*
 4574                  Unroll copy of <= 36 bytes (72 if 8byte sizes)
 4575                  We know that contents have an odd number of
 4576                  INTERNAL_SIZE_T-sized words; minimally 3.
 4577                */
 4578 
 4579               copysize = oldsize - SIZE_SZ;
 4580               s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
 4581               d = (INTERNAL_SIZE_T *) (newmem);
 4582               ncopies = copysize / sizeof (INTERNAL_SIZE_T);
 4583               assert (ncopies >= 3);
 4584 
 4585               if (ncopies > 9)
 4586                 memcpy (d, s, copysize);
 4587 
 4588               else
 4589                 {
 4590                   *(d + 0) = *(s + 0);
 4591                   *(d + 1) = *(s + 1);
 4592                   *(d + 2) = *(s + 2);
 4593                   if (ncopies > 4)
 4594                     {
 4595                       *(d + 3) = *(s + 3);
 4596                       *(d + 4) = *(s + 4);
 4597                       if (ncopies > 6)
 4598                         {
 4599                           *(d + 5) = *(s + 5);
 4600                           *(d + 6) = *(s + 6);
 4601                           if (ncopies > 8)
 4602                             {
 4603                               *(d + 7) = *(s + 7);
 4604                               *(d + 8) = *(s + 8);
 4605                             }
 4606                         }
 4607                     }
 4608                 }
 4609 
 4610               _int_free (av, oldp, 1);
 4611               check_inuse_chunk (av, newp);
 4612               return chunk2mem (newp);
 4613             }
 4614         }
 4615     }
 4616 
 4617   /* If possible, free extra space in old or extended chunk */
 4618 
 4619   assert ((unsigned long) (newsize) >= (unsigned long) (nb));
 4620 
 4621   remainder_size = newsize - nb;
 4622 
 4623   if (remainder_size < MINSIZE)   /* not enough extra to split off */
 4624     {
 4625       set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4626       set_inuse_bit_at_offset (newp, newsize);
 4627     }
 4628   else   /* split remainder */
 4629     {
 4630       remainder = chunk_at_offset (newp, nb);
 4631       set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4632       set_head (remainder, remainder_size | PREV_INUSE |
 4633                 (av != &main_arena ? NON_MAIN_ARENA : 0));
 4634       /* Mark remainder as inuse so free() won't complain */
 4635       set_inuse_bit_at_offset (remainder, remainder_size);
 4636       _int_free (av, remainder, 1);
 4637     }
 4638 
 4639   check_inuse_chunk (av, newp);
 4640   return chunk2mem (newp);
 4641 }
 4642 
 4643 /*
 4644    ------------------------------ memalign ------------------------------
 4645  */
 4646 
 4647 static void *
 4648 _int_memalign (mstate av, size_t alignment, size_t bytes)
 4649 {
 4650   INTERNAL_SIZE_T nb;             /* padded  request size */
 4651   char *m;                        /* memory returned by malloc call */
 4652   mchunkptr p;                    /* corresponding chunk */
 4653   char *brk;                      /* alignment point within p */
 4654   mchunkptr newp;                 /* chunk to return */
 4655   INTERNAL_SIZE_T newsize;        /* its size */
 4656   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
 4657   mchunkptr remainder;            /* spare room at end to split off */
 4658   unsigned long remainder_size;   /* its size */
 4659   INTERNAL_SIZE_T size;
 4660 
 4661 
 4662 
 4663   checked_request2size (bytes, nb);
 4664 
 4665   /*
 4666      Strategy: find a spot within that chunk that meets the alignment
 4667      request, and then possibly free the leading and trailing space.
 4668    */
 4669 
 4670 
 4671   /* Check for overflow.  */
 4672   if (nb > SIZE_MAX - alignment - MINSIZE)
 4673     {
 4674       __set_errno (ENOMEM);
 4675       return 0;
 4676     }
 4677 
 4678   /* Call malloc with worst case padding to hit alignment. */
 4679 
 4680   m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
 4681 
 4682   if (m == 0)
 4683     return 0;           /* propagate failure */
 4684 
 4685   p = mem2chunk (m);
 4686 
 4687   if ((((unsigned long) (m)) % alignment) != 0)   /* misaligned */
 4688 
 4689     { /*
 4690                 Find an aligned spot inside chunk.  Since we need to give back
 4691                 leading space in a chunk of at least MINSIZE, if the first
 4692                 calculation places us at a spot with less than MINSIZE leader,
 4693                 we can move to the next aligned spot -- we've allocated enough
 4694                 total room so that this is always possible.
 4695                  */
 4696       brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
 4697                                 - ((signed long) alignment));
 4698       if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
 4699         brk += alignment;
 4700 
 4701       newp = (mchunkptr) brk;
 4702       leadsize = brk - (char *) (p);
 4703       newsize = chunksize (p) - leadsize;
 4704 
 4705       /* For mmapped chunks, just adjust offset */
 4706       if (chunk_is_mmapped (p))
 4707         {
 4708           set_prev_size (newp, prev_size (p) + leadsize);
 4709           set_head (newp, newsize | IS_MMAPPED);
 4710           return chunk2mem (newp);
 4711         }
 4712 
 4713       /* Otherwise, give back leader, use the rest */
 4714       set_head (newp, newsize | PREV_INUSE |
 4715                 (av != &main_arena ? NON_MAIN_ARENA : 0));
 4716       set_inuse_bit_at_offset (newp, newsize);
 4717       set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4718       _int_free (av, p, 1);
 4719       p = newp;
 4720 
 4721       assert (newsize >= nb &&
 4722               (((unsigned long) (chunk2mem (p))) % alignment) == 0);
 4723     }
 4724 
 4725   /* Also give back spare room at the end */
 4726   if (!chunk_is_mmapped (p))
 4727     {
 4728       size = chunksize (p);
 4729       if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
 4730         {
 4731           remainder_size = size - nb;
 4732           remainder = chunk_at_offset (p, nb);
 4733           set_head (remainder, remainder_size | PREV_INUSE |
 4734                     (av != &main_arena ? NON_MAIN_ARENA : 0));
 4735           set_head_size (p, nb);
 4736           _int_free (av, remainder, 1);
 4737         }
 4738     }
 4739 
 4740   check_inuse_chunk (av, p);
 4741   return chunk2mem (p);
 4742 }
 4743 
 4744 
 4745 /*
 4746    ------------------------------ malloc_trim ------------------------------
 4747  */
 4748 
 4749 static int
 4750 mtrim (mstate av, size_t pad)
 4751 {
 4752   /* Ensure all blocks are consolidated.  */
 4753   malloc_consolidate (av);
 4754 
 4755   const size_t ps = GLRO (dl_pagesize);
 4756   int psindex = bin_index (ps);
 4757   const size_t psm1 = ps - 1;
 4758 
 4759   int result = 0;
 4760   for (int i = 1; i < NBINS; ++i)
 4761     if (i == 1 || i >= psindex)
 4762       {
 4763         mbinptr bin = bin_at (av, i);
 4764 
 4765         for (mchunkptr p = last (bin); p != bin; p = p->bk)
 4766           {
 4767             INTERNAL_SIZE_T size = chunksize (p);
 4768 
 4769             if (size > psm1 + sizeof (struct malloc_chunk))
 4770               {
 4771                 /* See whether the chunk contains at least one unused page.  */
 4772                 char *paligned_mem = (char *) (((uintptr_t) p
 4773                                                 + sizeof (struct malloc_chunk)
 4774                                                 + psm1) & ~psm1);
 4775 
 4776                 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
 4777                 assert ((char *) p + size > paligned_mem);
 4778 
 4779                 /* This is the size we could potentially free.  */
 4780                 size -= paligned_mem - (char *) p;
 4781 
 4782                 if (size > psm1)
 4783                   {
 4784 #if MALLOC_DEBUG
 4785                     /* When debugging we simulate destroying the memory
 4786                        content.  */
 4787                     memset (paligned_mem, 0x89, size & ~psm1);
 4788 #endif
 4789                     __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
 4790 
 4791                     result = 1;
 4792                   }
 4793               }
 4794           }
 4795       }
 4796 
 4797 #ifndef MORECORE_CANNOT_TRIM
 4798   return result | (av == &main_arena ? systrim (pad, av) : 0);
 4799 
 4800 #else
 4801   return result;
 4802 #endif
 4803 }
 4804 
 4805 
 4806 int
 4807 __malloc_trim (size_t s)
 4808 {
 4809   int result = 0;
 4810 
 4811   if (__malloc_initialized < 0)
 4812     ptmalloc_init ();
 4813 
 4814   mstate ar_ptr = &main_arena;
 4815   do
 4816     {
 4817       __libc_lock_lock (ar_ptr->mutex);
 4818       result |= mtrim (ar_ptr, s);
 4819       __libc_lock_unlock (ar_ptr->mutex);
 4820 
 4821       ar_ptr = ar_ptr->next;
 4822     }
 4823   while (ar_ptr != &main_arena);
 4824 
 4825   return result;
 4826 }
 4827 
 4828 
 4829 /*
 4830    ------------------------- malloc_usable_size -------------------------
 4831  */
 4832 
 4833 static size_t
 4834 musable (void *mem)
 4835 {
 4836   mchunkptr p;
 4837   if (mem != 0)
 4838     {
 4839       p = mem2chunk (mem);
 4840 
 4841       if (__builtin_expect (using_malloc_checking == 1, 0))
 4842         return malloc_check_get_size (p);
 4843 
 4844       if (chunk_is_mmapped (p))
 4845     {
 4846       if (DUMPED_MAIN_ARENA_CHUNK (p))
 4847         return chunksize (p) - SIZE_SZ;
 4848       else
 4849         return chunksize (p) - 2 * SIZE_SZ;
 4850     }
 4851       else if (inuse (p))
 4852         return chunksize (p) - SIZE_SZ;
 4853     }
 4854   return 0;
 4855 }
 4856 
 4857 
 4858 size_t
 4859 __malloc_usable_size (void *m)
 4860 {
 4861   size_t result;
 4862 
 4863   result = musable (m);
 4864   return result;
 4865 }
 4866 
 4867 /*
 4868    ------------------------------ mallinfo ------------------------------
 4869    Accumulate malloc statistics for arena AV into M.
 4870  */
 4871 
 4872 static void
 4873 int_mallinfo (mstate av, struct mallinfo *m)
 4874 {
 4875   size_t i;
 4876   mbinptr b;
 4877   mchunkptr p;
 4878   INTERNAL_SIZE_T avail;
 4879   INTERNAL_SIZE_T fastavail;
 4880   int nblocks;
 4881   int nfastblocks;
 4882 
 4883   check_malloc_state (av);
 4884 
 4885   /* Account for top */
 4886   avail = chunksize (av->top);
 4887   nblocks = 1;  /* top always exists */
 4888 
 4889   /* traverse fastbins */
 4890   nfastblocks = 0;
 4891   fastavail = 0;
 4892 
 4893   for (i = 0; i < NFASTBINS; ++i)
 4894     {
 4895       for (p = fastbin (av, i); p != 0; p = p->fd)
 4896         {
 4897           ++nfastblocks;
 4898           fastavail += chunksize (p);
 4899         }
 4900     }
 4901 
 4902   avail += fastavail;
 4903 
 4904   /* traverse regular bins */
 4905   for (i = 1; i < NBINS; ++i)
 4906     {
 4907       b = bin_at (av, i);
 4908       for (p = last (b); p != b; p = p->bk)
 4909         {
 4910           ++nblocks;
 4911           avail += chunksize (p);
 4912         }
 4913     }
 4914 
 4915   m->smblks += nfastblocks;
 4916   m->ordblks += nblocks;
 4917   m->fordblks += avail;
 4918   m->uordblks += av->system_mem - avail;
 4919   m->arena += av->system_mem;
 4920   m->fsmblks += fastavail;
 4921   if (av == &main_arena)
 4922     {
 4923       m->hblks = mp_.n_mmaps;
 4924       m->hblkhd = mp_.mmapped_mem;
 4925       m->usmblks = 0;
 4926       m->keepcost = chunksize (av->top);
 4927     }
 4928 }
 4929 
 4930 
 4931 struct mallinfo
 4932 __libc_mallinfo (void)
 4933 {
 4934   struct mallinfo m;
 4935   mstate ar_ptr;
 4936 
 4937   if (__malloc_initialized < 0)
 4938     ptmalloc_init ();
 4939 
 4940   memset (&m, 0, sizeof (m));
 4941   ar_ptr = &main_arena;
 4942   do
 4943     {
 4944       __libc_lock_lock (ar_ptr->mutex);
 4945       int_mallinfo (ar_ptr, &m);
 4946       __libc_lock_unlock (ar_ptr->mutex);
 4947 
 4948       ar_ptr = ar_ptr->next;
 4949     }
 4950   while (ar_ptr != &main_arena);
 4951 
 4952   return m;
 4953 }
 4954 
 4955 /*
 4956    ------------------------------ malloc_stats ------------------------------
 4957  */
 4958 
 4959 void
 4960 __malloc_stats (void)
 4961 {
 4962   int i;
 4963   mstate ar_ptr;
 4964   unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
 4965 
 4966   if (__malloc_initialized < 0)
 4967     ptmalloc_init ();
 4968   _IO_flockfile (stderr);
 4969   int old_flags2 = stderr->_flags2;
 4970   stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
 4971   for (i = 0, ar_ptr = &main_arena;; i++)
 4972     {
 4973       struct mallinfo mi;
 4974 
 4975       memset (&mi, 0, sizeof (mi));
 4976       __libc_lock_lock (ar_ptr->mutex);
 4977       int_mallinfo (ar_ptr, &mi);
 4978       fprintf (stderr, "Arena %d:\n", i);
 4979       fprintf (stderr, "system bytes     = %10u\n", (unsigned int) mi.arena);
 4980       fprintf (stderr, "in use bytes     = %10u\n", (unsigned int) mi.uordblks);
 4981 #if MALLOC_DEBUG > 1
 4982       if (i > 0)
 4983         dump_heap (heap_for_ptr (top (ar_ptr)));
 4984 #endif
 4985       system_b += mi.arena;
 4986       in_use_b += mi.uordblks;
 4987       __libc_lock_unlock (ar_ptr->mutex);
 4988       ar_ptr = ar_ptr->next;
 4989       if (ar_ptr == &main_arena)
 4990         break;
 4991     }
 4992   fprintf (stderr, "Total (incl. mmap):\n");
 4993   fprintf (stderr, "system bytes     = %10u\n", system_b);
 4994   fprintf (stderr, "in use bytes     = %10u\n", in_use_b);
 4995   fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
 4996   fprintf (stderr, "max mmap bytes   = %10lu\n",
 4997            (unsigned long) mp_.max_mmapped_mem);
 4998   stderr->_flags2 = old_flags2;
 4999   _IO_funlockfile (stderr);
 5000 }
 5001 
 5002 
 5003 /*
 5004    ------------------------------ mallopt ------------------------------
 5005  */
 5006 static inline int
 5007 __always_inline
 5008 do_set_trim_threshold (size_t value)
 5009 {
 5010   LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
 5011           mp_.no_dyn_threshold);
 5012   mp_.trim_threshold = value;
 5013   mp_.no_dyn_threshold = 1;
 5014   return 1;
 5015 }
 5016 
 5017 static inline int
 5018 __always_inline
 5019 do_set_top_pad (size_t value)
 5020 {
 5021   LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
 5022           mp_.no_dyn_threshold);
 5023   mp_.top_pad = value;
 5024   mp_.no_dyn_threshold = 1;
 5025   return 1;
 5026 }
 5027 
 5028 static inline int
 5029 __always_inline
 5030 do_set_mmap_threshold (size_t value)
 5031 {
 5032   /* Forbid setting the threshold too high.  */
 5033   if (value <= HEAP_MAX_SIZE / 2)
 5034     {
 5035       LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
 5036           mp_.no_dyn_threshold);
 5037       mp_.mmap_threshold = value;
 5038       mp_.no_dyn_threshold = 1;
 5039       return 1;
 5040     }
 5041   return 0;
 5042 }
 5043 
 5044 static inline int
 5045 __always_inline
 5046 do_set_mmaps_max (int32_t value)
 5047 {
 5048   LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
 5049           mp_.no_dyn_threshold);
 5050   mp_.n_mmaps_max = value;
 5051   mp_.no_dyn_threshold = 1;
 5052   return 1;
 5053 }
 5054 
 5055 static inline int
 5056 __always_inline
 5057 do_set_mallopt_check (int32_t value)
 5058 {
 5059   return 1;
 5060 }
 5061 
 5062 static inline int
 5063 __always_inline
 5064 do_set_perturb_byte (int32_t value)
 5065 {
 5066   LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
 5067   perturb_byte = value;
 5068   return 1;
 5069 }
 5070 
 5071 static inline int
 5072 __always_inline
 5073 do_set_arena_test (size_t value)
 5074 {
 5075   LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
 5076   mp_.arena_test = value;
 5077   return 1;
 5078 }
 5079 
 5080 static inline int
 5081 __always_inline
 5082 do_set_arena_max (size_t value)
 5083 {
 5084   LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
 5085   mp_.arena_max = value;
 5086   return 1;
 5087 }
 5088 
 5089 #if USE_TCACHE
 5090 static inline int
 5091 __always_inline
 5092 do_set_tcache_max (size_t value)
 5093 {
 5094   if (value >= 0 && value <= MAX_TCACHE_SIZE)
 5095     {
 5096       LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
 5097       mp_.tcache_max_bytes = value;
 5098       mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
 5099     }
 5100   return 1;
 5101 }
 5102 
 5103 static inline int
 5104 __always_inline
 5105 do_set_tcache_count (size_t value)
 5106 {
 5107   LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
 5108   mp_.tcache_count = value;
 5109   return 1;
 5110 }
 5111 
 5112 static inline int
 5113 __always_inline
 5114 do_set_tcache_unsorted_limit (size_t value)
 5115 {
 5116   LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
 5117   mp_.tcache_unsorted_limit = value;
 5118   return 1;
 5119 }
 5120 #endif
 5121 
 5122 int
 5123 __libc_mallopt (int param_number, int value)
 5124 {
 5125   mstate av = &main_arena;
 5126   int res = 1;
 5127 
 5128   if (__malloc_initialized < 0)
 5129     ptmalloc_init ();
 5130   __libc_lock_lock (av->mutex);
 5131 
 5132   LIBC_PROBE (memory_mallopt, 2, param_number, value);
 5133 
 5134   /* We must consolidate main arena before changing max_fast
 5135      (see definition of set_max_fast).  */
 5136   malloc_consolidate (av);
 5137 
 5138   switch (param_number)
 5139     {
 5140     case M_MXFAST:
 5141       if (value >= 0 && value <= MAX_FAST_SIZE)
 5142         {
 5143           LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
 5144           set_max_fast (value);
 5145         }
 5146       else
 5147         res = 0;
 5148       break;
 5149 
 5150     case M_TRIM_THRESHOLD:
 5151       do_set_trim_threshold (value);
 5152       break;
 5153 
 5154     case M_TOP_PAD:
 5155       do_set_top_pad (value);
 5156       break;
 5157 
 5158     case M_MMAP_THRESHOLD:
 5159       res = do_set_mmap_threshold (value);
 5160       break;
 5161 
 5162     case M_MMAP_MAX:
 5163       do_set_mmaps_max (value);
 5164       break;
 5165 
 5166     case M_CHECK_ACTION:
 5167       do_set_mallopt_check (value);
 5168       break;
 5169 
 5170     case M_PERTURB:
 5171       do_set_perturb_byte (value);
 5172       break;
 5173 
 5174     case M_ARENA_TEST:
 5175       if (value > 0)
 5176     do_set_arena_test (value);
 5177       break;
 5178 
 5179     case M_ARENA_MAX:
 5180       if (value > 0)
 5181     do_set_arena_max (value);
 5182       break;
 5183     }
 5184   __libc_lock_unlock (av->mutex);
 5185   return res;
 5186 }
 5187 libc_hidden_def (__libc_mallopt)
 5188 
 5189 
 5190 /*
 5191    -------------------- Alternative MORECORE functions --------------------
 5192  */
 5193 
 5194 
 5195 /*
 5196    General Requirements for MORECORE.
 5197 
 5198    The MORECORE function must have the following properties:
 5199 
 5200    If MORECORE_CONTIGUOUS is false:
 5201 
 5202  * MORECORE must allocate in multiples of pagesize. It will
 5203       only be called with arguments that are multiples of pagesize.
 5204 
 5205  * MORECORE(0) must return an address that is at least
 5206       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
 5207 
 5208    else (i.e. If MORECORE_CONTIGUOUS is true):
 5209 
 5210  * Consecutive calls to MORECORE with positive arguments
 5211       return increasing addresses, indicating that space has been
 5212       contiguously extended.
 5213 
 5214  * MORECORE need not allocate in multiples of pagesize.
 5215       Calls to MORECORE need not have args of multiples of pagesize.
 5216 
 5217  * MORECORE need not page-align.
 5218 
 5219    In either case:
 5220 
 5221  * MORECORE may allocate more memory than requested. (Or even less,
 5222       but this will generally result in a malloc failure.)
 5223 
 5224  * MORECORE must not allocate memory when given argument zero, but
 5225       instead return one past the end address of memory from previous
 5226       nonzero call. This malloc does NOT call MORECORE(0)
 5227       until at least one call with positive arguments is made, so
 5228       the initial value returned is not important.
 5229 
 5230  * Even though consecutive calls to MORECORE need not return contiguous
 5231       addresses, it must be OK for malloc'ed chunks to span multiple
 5232       regions in those cases where they do happen to be contiguous.
 5233 
 5234  * MORECORE need not handle negative arguments -- it may instead
 5235       just return MORECORE_FAILURE when given negative arguments.
 5236       Negative arguments are always multiples of pagesize. MORECORE
 5237       must not misinterpret negative args as large positive unsigned
 5238       args. You can suppress all such calls from even occurring by defining
 5239       MORECORE_CANNOT_TRIM,
 5240 
 5241    There is some variation across systems about the type of the
 5242    argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
 5243    actually be size_t, because sbrk supports negative args, so it is
 5244    normally the signed type of the same width as size_t (sometimes
 5245    declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
 5246    matter though. Internally, we use "long" as arguments, which should
 5247    work across all reasonable possibilities.
 5248 
 5249    Additionally, if MORECORE ever returns failure for a positive
 5250    request, then mmap is used as a noncontiguous system allocator. This
 5251    is a useful backup strategy for systems with holes in address spaces
 5252    -- in this case sbrk cannot contiguously expand the heap, but mmap
 5253    may be able to map noncontiguous space.
 5254 
 5255    If you'd like mmap to ALWAYS be used, you can define MORECORE to be
 5256    a function that always returns MORECORE_FAILURE.
 5257 
 5258    If you are using this malloc with something other than sbrk (or its
 5259    emulation) to supply memory regions, you probably want to set
 5260    MORECORE_CONTIGUOUS as false.  As an example, here is a custom
 5261    allocator kindly contributed for pre-OSX macOS.  It uses virtually
 5262    but not necessarily physically contiguous non-paged memory (locked
 5263    in, present and won't get swapped out).  You can use it by
 5264    uncommenting this section, adding some #includes, and setting up the
 5265    appropriate defines above:
 5266 
 5267  *#define MORECORE osMoreCore
 5268  *#define MORECORE_CONTIGUOUS 0
 5269 
 5270    There is also a shutdown routine that should somehow be called for
 5271    cleanup upon program exit.
 5272 
 5273  *#define MAX_POOL_ENTRIES 100
 5274  *#define MINIMUM_MORECORE_SIZE  (64 * 1024)
 5275    static int next_os_pool;
 5276    void *our_os_pools[MAX_POOL_ENTRIES];
 5277 
 5278    void *osMoreCore(int size)
 5279    {
 5280     void *ptr = 0;
 5281     static void *sbrk_top = 0;
 5282 
 5283     if (size > 0)
 5284     {
 5285       if (size < MINIMUM_MORECORE_SIZE)
 5286          size = MINIMUM_MORECORE_SIZE;
 5287       if (CurrentExecutionLevel() == kTaskLevel)
 5288          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
 5289       if (ptr == 0)
 5290       {
 5291         return (void *) MORECORE_FAILURE;
 5292       }
 5293       // save ptrs so they can be freed during cleanup
 5294       our_os_pools[next_os_pool] = ptr;
 5295       next_os_pool++;
 5296       ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
 5297       sbrk_top = (char *) ptr + size;
 5298       return ptr;
 5299     }
 5300     else if (size < 0)
 5301     {
 5302       // we don't currently support shrink behavior
 5303       return (void *) MORECORE_FAILURE;
 5304     }
 5305     else
 5306     {
 5307       return sbrk_top;
 5308     }
 5309    }
 5310 
 5311    // cleanup any allocated memory pools
 5312    // called as last thing before shutting down driver
 5313 
 5314    void osCleanupMem(void)
 5315    {
 5316     void **ptr;
 5317 
 5318     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
 5319       if (*ptr)
 5320       {
 5321          PoolDeallocate(*ptr);
 5322  * ptr = 0;
 5323       }
 5324    }
 5325 
 5326  */
 5327 
 5328 
 5329 /* Helper code.  */
 5330 
 5331 extern char **__libc_argv attribute_hidden;
 5332 
 5333 static void
 5334 malloc_printerr (const char *str)
 5335 {
 5336   __libc_message (do_abort, "%s\n", str);
 5337   __builtin_unreachable ();
 5338 }
 5339 
 5340 /* We need a wrapper function for one of the additions of POSIX.  */
 5341 int
 5342 __posix_memalign (void **memptr, size_t alignment, size_t size)
 5343 {
 5344   void *mem;
 5345 
 5346   /* Test whether the SIZE argument is valid.  It must be a power of
 5347      two multiple of sizeof (void *).  */
 5348   if (alignment % sizeof (void *) != 0
 5349       || !powerof2 (alignment / sizeof (void *))
 5350       || alignment == 0)
 5351     return EINVAL;
 5352 
 5353 
 5354   void *address = RETURN_ADDRESS (0);
 5355   mem = _mid_memalign (alignment, size, address);
 5356 
 5357   if (mem != NULL)
 5358     {
 5359       *memptr = mem;
 5360       return 0;
 5361     }
 5362 
 5363   return ENOMEM;
 5364 }
 5365 weak_alias (__posix_memalign, posix_memalign)
 5366 
 5367 
 5368 int
 5369 __malloc_info (int options, FILE *fp)
 5370 {
 5371   /* For now, at least.  */
 5372   if (options != 0)
 5373     return EINVAL;
 5374 
 5375   int n = 0;
 5376   size_t total_nblocks = 0;
 5377   size_t total_nfastblocks = 0;
 5378   size_t total_avail = 0;
 5379   size_t total_fastavail = 0;
 5380   size_t total_system = 0;
 5381   size_t total_max_system = 0;
 5382   size_t total_aspace = 0;
 5383   size_t total_aspace_mprotect = 0;
 5384 
 5385 
 5386 
 5387   if (__malloc_initialized < 0)
 5388     ptmalloc_init ();
 5389 
 5390   fputs ("<malloc version=\"1\">\n", fp);
 5391 
 5392   /* Iterate over all arenas currently in use.  */
 5393   mstate ar_ptr = &main_arena;
 5394   do
 5395     {
 5396       fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
 5397 
 5398       size_t nblocks = 0;
 5399       size_t nfastblocks = 0;
 5400       size_t avail = 0;
 5401       size_t fastavail = 0;
 5402       struct
 5403       {
 5404     size_t from;
 5405     size_t to;
 5406     size_t total;
 5407     size_t count;
 5408       } sizes[NFASTBINS + NBINS - 1];
 5409 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
 5410 
 5411       __libc_lock_lock (ar_ptr->mutex);
 5412 
 5413       for (size_t i = 0; i < NFASTBINS; ++i)
 5414     {
 5415       mchunkptr p = fastbin (ar_ptr, i);
 5416       if (p != NULL)
 5417         {
 5418           size_t nthissize = 0;
 5419           size_t thissize = chunksize (p);
 5420 
 5421           while (p != NULL)
 5422         {
 5423           ++nthissize;
 5424           p = p->fd;
 5425         }
 5426 
 5427           fastavail += nthissize * thissize;
 5428           nfastblocks += nthissize;
 5429           sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
 5430           sizes[i].to = thissize;
 5431           sizes[i].count = nthissize;
 5432         }
 5433       else
 5434         sizes[i].from = sizes[i].to = sizes[i].count = 0;
 5435 
 5436       sizes[i].total = sizes[i].count * sizes[i].to;
 5437     }
 5438 
 5439 
 5440       mbinptr bin;
 5441       struct malloc_chunk *r;
 5442 
 5443       for (size_t i = 1; i < NBINS; ++i)
 5444     {
 5445       bin = bin_at (ar_ptr, i);
 5446       r = bin->fd;
 5447       sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
 5448       sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
 5449                       = sizes[NFASTBINS - 1 + i].count = 0;
 5450 
 5451       if (r != NULL)
 5452         while (r != bin)
 5453           {
 5454         size_t r_size = chunksize_nomask (r);
 5455         ++sizes[NFASTBINS - 1 + i].count;
 5456         sizes[NFASTBINS - 1 + i].total += r_size;
 5457         sizes[NFASTBINS - 1 + i].from
 5458           = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
 5459         sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
 5460                            r_size);
 5461 
 5462         r = r->fd;
 5463           }
 5464 
 5465       if (sizes[NFASTBINS - 1 + i].count == 0)
 5466         sizes[NFASTBINS - 1 + i].from = 0;
 5467       nblocks += sizes[NFASTBINS - 1 + i].count;
 5468       avail += sizes[NFASTBINS - 1 + i].total;
 5469     }
 5470 
 5471       size_t heap_size = 0;
 5472       size_t heap_mprotect_size = 0;
 5473       size_t heap_count = 0;
 5474       if (ar_ptr != &main_arena)
 5475     {
 5476       /* Iterate over the arena heaps from back to front.  */
 5477       heap_info *heap = heap_for_ptr (top (ar_ptr));
 5478       do
 5479         {
 5480           heap_size += heap->size;
 5481           heap_mprotect_size += heap->mprotect_size;
 5482           heap = heap->prev;
 5483           ++heap_count;
 5484         }
 5485       while (heap != NULL);
 5486     }
 5487 
 5488       __libc_lock_unlock (ar_ptr->mutex);
 5489 
 5490       total_nfastblocks += nfastblocks;
 5491       total_fastavail += fastavail;
 5492 
 5493       total_nblocks += nblocks;
 5494       total_avail += avail;
 5495 
 5496       for (size_t i = 0; i < nsizes; ++i)
 5497     if (sizes[i].count != 0 && i != NFASTBINS)
 5498       fprintf (fp, "                                  \
 5499   <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
 5500            sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
 5501 
 5502       if (sizes[NFASTBINS].count != 0)
 5503     fprintf (fp, "\
 5504   <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
 5505          sizes[NFASTBINS].from, sizes[NFASTBINS].to,
 5506          sizes[NFASTBINS].total, sizes[NFASTBINS].count);
 5507 
 5508       total_system += ar_ptr->system_mem;
 5509       total_max_system += ar_ptr->max_system_mem;
 5510 
 5511       fprintf (fp,
 5512            "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
 5513            "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
 5514            "<system type=\"current\" size=\"%zu\"/>\n"
 5515            "<system type=\"max\" size=\"%zu\"/>\n",
 5516            nfastblocks, fastavail, nblocks, avail,
 5517            ar_ptr->system_mem, ar_ptr->max_system_mem);
 5518 
 5519       if (ar_ptr != &main_arena)
 5520     {
 5521       fprintf (fp,
 5522            "<aspace type=\"total\" size=\"%zu\"/>\n"
 5523            "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
 5524            "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
 5525            heap_size, heap_mprotect_size, heap_count);
 5526       total_aspace += heap_size;
 5527       total_aspace_mprotect += heap_mprotect_size;
 5528     }
 5529       else
 5530     {
 5531       fprintf (fp,
 5532            "<aspace type=\"total\" size=\"%zu\"/>\n"
 5533            "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
 5534            ar_ptr->system_mem, ar_ptr->system_mem);
 5535       total_aspace += ar_ptr->system_mem;
 5536       total_aspace_mprotect += ar_ptr->system_mem;
 5537     }
 5538 
 5539       fputs ("</heap>\n", fp);
 5540       ar_ptr = ar_ptr->next;
 5541     }
 5542   while (ar_ptr != &main_arena);
 5543 
 5544   fprintf (fp,
 5545        "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
 5546        "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
 5547        "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
 5548        "<system type=\"current\" size=\"%zu\"/>\n"
 5549        "<system type=\"max\" size=\"%zu\"/>\n"
 5550        "<aspace type=\"total\" size=\"%zu\"/>\n"
 5551        "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
 5552        "</malloc>\n",
 5553        total_nfastblocks, total_fastavail, total_nblocks, total_avail,
 5554        mp_.n_mmaps, mp_.mmapped_mem,
 5555        total_system, total_max_system,
 5556        total_aspace, total_aspace_mprotect);
 5557 
 5558   return 0;
 5559 }
 5560 weak_alias (__malloc_info, malloc_info)
 5561 
 5562 
 5563 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
 5564 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
 5565 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
 5566 strong_alias (__libc_memalign, __memalign)
 5567 weak_alias (__libc_memalign, memalign)
 5568 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
 5569 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
 5570 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
 5571 strong_alias (__libc_mallinfo, __mallinfo)
 5572 weak_alias (__libc_mallinfo, mallinfo)
 5573 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
 5574 
 5575 weak_alias (__malloc_stats, malloc_stats)
 5576 weak_alias (__malloc_usable_size, malloc_usable_size)
 5577 weak_alias (__malloc_trim, malloc_trim)
 5578 
 5579 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
 5580 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
 5581 #endif
 5582 
 5583 /* ------------------------------------------------------------
 5584    History:
 5585 
 5586    [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
 5587 
 5588  */
 5589 /*
 5590  * Local variables:
 5591  * c-basic-offset: 2
 5592  * End:
 5593  */