"Fossies" - the Fresh Open Source Software Archive

Member "glibc-2.29/malloc/malloc.c" (31 Jan 2019, 182769 Bytes) of package /linux/misc/glibc-2.29.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "malloc.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 2.28_vs_2.29.

    1 /* Malloc implementation for multiple threads without lock contention.
    2    Copyright (C) 1996-2019 Free Software Foundation, Inc.
    3    This file is part of the GNU C Library.
    4    Contributed by Wolfram Gloger <wg@malloc.de>
    5    and Doug Lea <dl@cs.oswego.edu>, 2001.
    6 
    7    The GNU C Library is free software; you can redistribute it and/or
    8    modify it under the terms of the GNU Lesser General Public License as
    9    published by the Free Software Foundation; either version 2.1 of the
   10    License, or (at your option) any later version.
   11 
   12    The GNU C Library is distributed in the hope that it will be useful,
   13    but WITHOUT ANY WARRANTY; without even the implied warranty of
   14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   15    Lesser General Public License for more details.
   16 
   17    You should have received a copy of the GNU Lesser General Public
   18    License along with the GNU C Library; see the file COPYING.LIB.  If
   19    not, see <http://www.gnu.org/licenses/>.  */
   20 
   21 /*
   22   This is a version (aka ptmalloc2) of malloc/free/realloc written by
   23   Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
   24 
   25   There have been substantial changes made after the integration into
   26   glibc in all parts of the code.  Do not look for much commonality
   27   with the ptmalloc2 version.
   28 
   29 * Version ptmalloc2-20011215
   30   based on:
   31   VERSION 2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
   32 
   33 * Quickstart
   34 
   35   In order to compile this implementation, a Makefile is provided with
   36   the ptmalloc2 distribution, which has pre-defined targets for some
   37   popular systems (e.g. "make posix" for Posix threads).  All that is
   38   typically required with regard to compiler flags is the selection of
   39   the thread package via defining one out of USE_PTHREADS, USE_THR or
   40   USE_SPROC.  Check the thread-m.h file for what effects this has.
   41   Many/most systems will additionally require USE_TSD_DATA_HACK to be
   42   defined, so this is the default for "make posix".
   43 
   44 * Why use this malloc?
   45 
   46   This is not the fastest, most space-conserving, most portable, or
   47   most tunable malloc ever written. However it is among the fastest
   48   while also being among the most space-conserving, portable and tunable.
   49   Consistent balance across these factors results in a good general-purpose
   50   allocator for malloc-intensive programs.
   51 
   52   The main properties of the algorithms are:
   53   * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
   54     with ties normally decided via FIFO (i.e. least recently used).
   55   * For small (<= 64 bytes by default) requests, it is a caching
   56     allocator, that maintains pools of quickly recycled chunks.
   57   * In between, and for combinations of large and small requests, it does
   58     the best it can trying to meet both goals at once.
   59   * For very large requests (>= 128KB by default), it relies on system
   60     memory mapping facilities, if supported.
   61 
   62   For a longer but slightly out of date high-level description, see
   63      http://gee.cs.oswego.edu/dl/html/malloc.html
   64 
   65   You may already by default be using a C library containing a malloc
   66   that is  based on some version of this malloc (for example in
   67   linux). You might still want to use the one in this file in order to
   68   customize settings or to avoid overheads associated with library
   69   versions.
   70 
   71 * Contents, described in more detail in "description of public routines" below.
   72 
   73   Standard (ANSI/SVID/...)  functions:
   74     malloc(size_t n);
   75     calloc(size_t n_elements, size_t element_size);
   76     free(void* p);
   77     realloc(void* p, size_t n);
   78     memalign(size_t alignment, size_t n);
   79     valloc(size_t n);
   80     mallinfo()
   81     mallopt(int parameter_number, int parameter_value)
   82 
   83   Additional functions:
   84     independent_calloc(size_t n_elements, size_t size, void* chunks[]);
   85     independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   86     pvalloc(size_t n);
   87     malloc_trim(size_t pad);
   88     malloc_usable_size(void* p);
   89     malloc_stats();
   90 
   91 * Vital statistics:
   92 
   93   Supported pointer representation:       4 or 8 bytes
   94   Supported size_t  representation:       4 or 8 bytes
   95        Note that size_t is allowed to be 4 bytes even if pointers are 8.
   96        You can adjust this by defining INTERNAL_SIZE_T
   97 
   98   Alignment:                              2 * sizeof(size_t) (default)
   99        (i.e., 8 byte alignment with 4byte size_t). This suffices for
  100        nearly all current machines and C compilers. However, you can
  101        define MALLOC_ALIGNMENT to be wider than this if necessary.
  102 
  103   Minimum overhead per allocated chunk:   4 or 8 bytes
  104        Each malloced chunk has a hidden word of overhead holding size
  105        and status information.
  106 
  107   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
  108               8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
  109 
  110        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
  111        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
  112        needed; 4 (8) for a trailing size field and 8 (16) bytes for
  113        free list pointers. Thus, the minimum allocatable size is
  114        16/24/32 bytes.
  115 
  116        Even a request for zero bytes (i.e., malloc(0)) returns a
  117        pointer to something of the minimum allocatable size.
  118 
  119        The maximum overhead wastage (i.e., number of extra bytes
  120        allocated than were requested in malloc) is less than or equal
  121        to the minimum size, except for requests >= mmap_threshold that
  122        are serviced via mmap(), where the worst case wastage is 2 *
  123        sizeof(size_t) bytes plus the remainder from a system page (the
  124        minimal mmap unit); typically 4096 or 8192 bytes.
  125 
  126   Maximum allocated size:  4-byte size_t: 2^32 minus about two pages
  127                8-byte size_t: 2^64 minus about two pages
  128 
  129        It is assumed that (possibly signed) size_t values suffice to
  130        represent chunk sizes. `Possibly signed' is due to the fact
  131        that `size_t' may be defined on a system as either a signed or
  132        an unsigned type. The ISO C standard says that it must be
  133        unsigned, but a few systems are known not to adhere to this.
  134        Additionally, even when size_t is unsigned, sbrk (which is by
  135        default used to obtain memory from system) accepts signed
  136        arguments, and may not be able to handle size_t-wide arguments
  137        with negative sign bit.  Generally, values that would
  138        appear as negative after accounting for overhead and alignment
  139        are supported only via mmap(), which does not have this
  140        limitation.
  141 
  142        Requests for sizes outside the allowed range will perform an optional
  143        failure action and then return null. (Requests may also
  144        also fail because a system is out of memory.)
  145 
  146   Thread-safety: thread-safe
  147 
  148   Compliance: I believe it is compliant with the 1997 Single Unix Specification
  149        Also SVID/XPG, ANSI C, and probably others as well.
  150 
  151 * Synopsis of compile-time options:
  152 
  153     People have reported using previous versions of this malloc on all
  154     versions of Unix, sometimes by tweaking some of the defines
  155     below. It has been tested most extensively on Solaris and Linux.
  156     People also report using it in stand-alone embedded systems.
  157 
  158     The implementation is in straight, hand-tuned ANSI C.  It is not
  159     at all modular. (Sorry!)  It uses a lot of macros.  To be at all
  160     usable, this code should be compiled using an optimizing compiler
  161     (for example gcc -O3) that can simplify expressions and control
  162     paths. (FAQ: some macros import variables as arguments rather than
  163     declare locals because people reported that some debuggers
  164     otherwise get confused.)
  165 
  166     OPTION                     DEFAULT VALUE
  167 
  168     Compilation Environment options:
  169 
  170     HAVE_MREMAP                0
  171 
  172     Changing default word sizes:
  173 
  174     INTERNAL_SIZE_T            size_t
  175 
  176     Configuration and functionality options:
  177 
  178     USE_PUBLIC_MALLOC_WRAPPERS NOT defined
  179     USE_MALLOC_LOCK            NOT defined
  180     MALLOC_DEBUG               NOT defined
  181     REALLOC_ZERO_BYTES_FREES   1
  182     TRIM_FASTBINS              0
  183 
  184     Options for customizing MORECORE:
  185 
  186     MORECORE                   sbrk
  187     MORECORE_FAILURE           -1
  188     MORECORE_CONTIGUOUS        1
  189     MORECORE_CANNOT_TRIM       NOT defined
  190     MORECORE_CLEARS            1
  191     MMAP_AS_MORECORE_SIZE      (1024 * 1024)
  192 
  193     Tuning options that are also dynamically changeable via mallopt:
  194 
  195     DEFAULT_MXFAST             64 (for 32bit), 128 (for 64bit)
  196     DEFAULT_TRIM_THRESHOLD     128 * 1024
  197     DEFAULT_TOP_PAD            0
  198     DEFAULT_MMAP_THRESHOLD     128 * 1024
  199     DEFAULT_MMAP_MAX           65536
  200 
  201     There are several other #defined constants and macros that you
  202     probably don't want to touch unless you are extending or adapting malloc.  */
  203 
  204 /*
  205   void* is the pointer type that malloc should say it returns
  206 */
  207 
  208 #ifndef void
  209 #define void      void
  210 #endif /*void*/
  211 
  212 #include <stddef.h>   /* for size_t */
  213 #include <stdlib.h>   /* for getenv(), abort() */
  214 #include <unistd.h>   /* for __libc_enable_secure */
  215 
  216 #include <atomic.h>
  217 #include <_itoa.h>
  218 #include <bits/wordsize.h>
  219 #include <sys/sysinfo.h>
  220 
  221 #include <ldsodefs.h>
  222 
  223 #include <unistd.h>
  224 #include <stdio.h>    /* needed for malloc_stats */
  225 #include <errno.h>
  226 #include <assert.h>
  227 
  228 #include <shlib-compat.h>
  229 
  230 /* For uintptr_t.  */
  231 #include <stdint.h>
  232 
  233 /* For va_arg, va_start, va_end.  */
  234 #include <stdarg.h>
  235 
  236 /* For MIN, MAX, powerof2.  */
  237 #include <sys/param.h>
  238 
  239 /* For ALIGN_UP et. al.  */
  240 #include <libc-pointer-arith.h>
  241 
  242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al.  */
  243 #include <libc-diag.h>
  244 
  245 #include <malloc/malloc-internal.h>
  246 
  247 /* For SINGLE_THREAD_P.  */
  248 #include <sysdep-cancel.h>
  249 
  250 /*
  251   Debugging:
  252 
  253   Because freed chunks may be overwritten with bookkeeping fields, this
  254   malloc will often die when freed memory is overwritten by user
  255   programs.  This can be very effective (albeit in an annoying way)
  256   in helping track down dangling pointers.
  257 
  258   If you compile with -DMALLOC_DEBUG, a number of assertion checks are
  259   enabled that will catch more memory errors. You probably won't be
  260   able to make much sense of the actual assertion errors, but they
  261   should help you locate incorrectly overwritten memory.  The checking
  262   is fairly extensive, and will slow down execution
  263   noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
  264   will attempt to check every non-mmapped allocated and free chunk in
  265   the course of computing the summmaries. (By nature, mmapped regions
  266   cannot be checked very much automatically.)
  267 
  268   Setting MALLOC_DEBUG may also be helpful if you are trying to modify
  269   this code. The assertions in the check routines spell out in more
  270   detail the assumptions and invariants underlying the algorithms.
  271 
  272   Setting MALLOC_DEBUG does NOT provide an automated mechanism for
  273   checking that all accesses to malloced memory stay within their
  274   bounds. However, there are several add-ons and adaptations of this
  275   or other mallocs available that do this.
  276 */
  277 
  278 #ifndef MALLOC_DEBUG
  279 #define MALLOC_DEBUG 0
  280 #endif
  281 
  282 #ifndef NDEBUG
  283 # define __assert_fail(assertion, file, line, function)         \
  284      __malloc_assert(assertion, file, line, function)
  285 
  286 extern const char *__progname;
  287 
  288 static void
  289 __malloc_assert (const char *assertion, const char *file, unsigned int line,
  290          const char *function)
  291 {
  292   (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
  293              __progname, __progname[0] ? ": " : "",
  294              file, line,
  295              function ? function : "", function ? ": " : "",
  296              assertion);
  297   fflush (stderr);
  298   abort ();
  299 }
  300 #endif
  301 
  302 #if USE_TCACHE
  303 /* We want 64 entries.  This is an arbitrary limit, which tunables can reduce.  */
  304 # define TCACHE_MAX_BINS        64
  305 # define MAX_TCACHE_SIZE    tidx2usize (TCACHE_MAX_BINS-1)
  306 
  307 /* Only used to pre-fill the tunables.  */
  308 # define tidx2usize(idx)    (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
  309 
  310 /* When "x" is from chunksize().  */
  311 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
  312 /* When "x" is a user-provided size.  */
  313 # define usize2tidx(x) csize2tidx (request2size (x))
  314 
  315 /* With rounding and alignment, the bins are...
  316    idx 0   bytes 0..24 (64-bit) or 0..12 (32-bit)
  317    idx 1   bytes 25..40 or 13..20
  318    idx 2   bytes 41..56 or 21..28
  319    etc.  */
  320 
  321 /* This is another arbitrary limit, which tunables can change.  Each
  322    tcache bin will hold at most this number of chunks.  */
  323 # define TCACHE_FILL_COUNT 7
  324 #endif
  325 
  326 
  327 /*
  328   REALLOC_ZERO_BYTES_FREES should be set if a call to
  329   realloc with zero bytes should be the same as a call to free.
  330   This is required by the C standard. Otherwise, since this malloc
  331   returns a unique pointer for malloc(0), so does realloc(p, 0).
  332 */
  333 
  334 #ifndef REALLOC_ZERO_BYTES_FREES
  335 #define REALLOC_ZERO_BYTES_FREES 1
  336 #endif
  337 
  338 /*
  339   TRIM_FASTBINS controls whether free() of a very small chunk can
  340   immediately lead to trimming. Setting to true (1) can reduce memory
  341   footprint, but will almost always slow down programs that use a lot
  342   of small chunks.
  343 
  344   Define this only if you are willing to give up some speed to more
  345   aggressively reduce system-level memory footprint when releasing
  346   memory in programs that use many small chunks.  You can get
  347   essentially the same effect by setting MXFAST to 0, but this can
  348   lead to even greater slowdowns in programs using many small chunks.
  349   TRIM_FASTBINS is an in-between compile-time option, that disables
  350   only those chunks bordering topmost memory from being placed in
  351   fastbins.
  352 */
  353 
  354 #ifndef TRIM_FASTBINS
  355 #define TRIM_FASTBINS  0
  356 #endif
  357 
  358 
  359 /* Definition for getting more memory from the OS.  */
  360 #define MORECORE         (*__morecore)
  361 #define MORECORE_FAILURE 0
  362 void * __default_morecore (ptrdiff_t);
  363 void *(*__morecore)(ptrdiff_t) = __default_morecore;
  364 
  365 
  366 #include <string.h>
  367 
  368 /*
  369   MORECORE-related declarations. By default, rely on sbrk
  370 */
  371 
  372 
  373 /*
  374   MORECORE is the name of the routine to call to obtain more memory
  375   from the system.  See below for general guidance on writing
  376   alternative MORECORE functions, as well as a version for WIN32 and a
  377   sample version for pre-OSX macos.
  378 */
  379 
  380 #ifndef MORECORE
  381 #define MORECORE sbrk
  382 #endif
  383 
  384 /*
  385   MORECORE_FAILURE is the value returned upon failure of MORECORE
  386   as well as mmap. Since it cannot be an otherwise valid memory address,
  387   and must reflect values of standard sys calls, you probably ought not
  388   try to redefine it.
  389 */
  390 
  391 #ifndef MORECORE_FAILURE
  392 #define MORECORE_FAILURE (-1)
  393 #endif
  394 
  395 /*
  396   If MORECORE_CONTIGUOUS is true, take advantage of fact that
  397   consecutive calls to MORECORE with positive arguments always return
  398   contiguous increasing addresses.  This is true of unix sbrk.  Even
  399   if not defined, when regions happen to be contiguous, malloc will
  400   permit allocations spanning regions obtained from different
  401   calls. But defining this when applicable enables some stronger
  402   consistency checks and space efficiencies.
  403 */
  404 
  405 #ifndef MORECORE_CONTIGUOUS
  406 #define MORECORE_CONTIGUOUS 1
  407 #endif
  408 
  409 /*
  410   Define MORECORE_CANNOT_TRIM if your version of MORECORE
  411   cannot release space back to the system when given negative
  412   arguments. This is generally necessary only if you are using
  413   a hand-crafted MORECORE function that cannot handle negative arguments.
  414 */
  415 
  416 /* #define MORECORE_CANNOT_TRIM */
  417 
  418 /*  MORECORE_CLEARS           (default 1)
  419      The degree to which the routine mapped to MORECORE zeroes out
  420      memory: never (0), only for newly allocated space (1) or always
  421      (2).  The distinction between (1) and (2) is necessary because on
  422      some systems, if the application first decrements and then
  423      increments the break value, the contents of the reallocated space
  424      are unspecified.
  425  */
  426 
  427 #ifndef MORECORE_CLEARS
  428 # define MORECORE_CLEARS 1
  429 #endif
  430 
  431 
  432 /*
  433    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
  434    sbrk fails, and mmap is used as a backup.  The value must be a
  435    multiple of page size.  This backup strategy generally applies only
  436    when systems have "holes" in address space, so sbrk cannot perform
  437    contiguous expansion, but there is still space available on system.
  438    On systems for which this is known to be useful (i.e. most linux
  439    kernels), this occurs only when programs allocate huge amounts of
  440    memory.  Between this, and the fact that mmap regions tend to be
  441    limited, the size should be large, to avoid too many mmap calls and
  442    thus avoid running out of kernel resources.  */
  443 
  444 #ifndef MMAP_AS_MORECORE_SIZE
  445 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
  446 #endif
  447 
  448 /*
  449   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
  450   large blocks.
  451 */
  452 
  453 #ifndef HAVE_MREMAP
  454 #define HAVE_MREMAP 0
  455 #endif
  456 
  457 /* We may need to support __malloc_initialize_hook for backwards
  458    compatibility.  */
  459 
  460 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
  461 # define HAVE_MALLOC_INIT_HOOK 1
  462 #else
  463 # define HAVE_MALLOC_INIT_HOOK 0
  464 #endif
  465 
  466 
  467 /*
  468   This version of malloc supports the standard SVID/XPG mallinfo
  469   routine that returns a struct containing usage properties and
  470   statistics. It should work on any SVID/XPG compliant system that has
  471   a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
  472   install such a thing yourself, cut out the preliminary declarations
  473   as described above and below and save them in a malloc.h file. But
  474   there's no compelling reason to bother to do this.)
  475 
  476   The main declaration needed is the mallinfo struct that is returned
  477   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
  478   bunch of fields that are not even meaningful in this version of
  479   malloc.  These fields are are instead filled by mallinfo() with
  480   other numbers that might be of interest.
  481 */
  482 
  483 
  484 /* ---------- description of public routines ------------ */
  485 
  486 /*
  487   malloc(size_t n)
  488   Returns a pointer to a newly allocated chunk of at least n bytes, or null
  489   if no space is available. Additionally, on failure, errno is
  490   set to ENOMEM on ANSI C systems.
  491 
  492   If n is zero, malloc returns a minumum-sized chunk. (The minimum
  493   size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
  494   systems.)  On most systems, size_t is an unsigned type, so calls
  495   with negative arguments are interpreted as requests for huge amounts
  496   of space, which will often fail. The maximum supported value of n
  497   differs across systems, but is in all cases less than the maximum
  498   representable value of a size_t.
  499 */
  500 void*  __libc_malloc(size_t);
  501 libc_hidden_proto (__libc_malloc)
  502 
  503 /*
  504   free(void* p)
  505   Releases the chunk of memory pointed to by p, that had been previously
  506   allocated using malloc or a related routine such as realloc.
  507   It has no effect if p is null. It can have arbitrary (i.e., bad!)
  508   effects if p has already been freed.
  509 
  510   Unless disabled (using mallopt), freeing very large spaces will
  511   when possible, automatically trigger operations that give
  512   back unused memory to the system, thus reducing program footprint.
  513 */
  514 void     __libc_free(void*);
  515 libc_hidden_proto (__libc_free)
  516 
  517 /*
  518   calloc(size_t n_elements, size_t element_size);
  519   Returns a pointer to n_elements * element_size bytes, with all locations
  520   set to zero.
  521 */
  522 void*  __libc_calloc(size_t, size_t);
  523 
  524 /*
  525   realloc(void* p, size_t n)
  526   Returns a pointer to a chunk of size n that contains the same data
  527   as does chunk p up to the minimum of (n, p's size) bytes, or null
  528   if no space is available.
  529 
  530   The returned pointer may or may not be the same as p. The algorithm
  531   prefers extending p when possible, otherwise it employs the
  532   equivalent of a malloc-copy-free sequence.
  533 
  534   If p is null, realloc is equivalent to malloc.
  535 
  536   If space is not available, realloc returns null, errno is set (if on
  537   ANSI) and p is NOT freed.
  538 
  539   if n is for fewer bytes than already held by p, the newly unused
  540   space is lopped off and freed if possible.  Unless the #define
  541   REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
  542   zero (re)allocates a minimum-sized chunk.
  543 
  544   Large chunks that were internally obtained via mmap will always be
  545   grown using malloc-copy-free sequences unless the system supports
  546   MREMAP (currently only linux).
  547 
  548   The old unix realloc convention of allowing the last-free'd chunk
  549   to be used as an argument to realloc is not supported.
  550 */
  551 void*  __libc_realloc(void*, size_t);
  552 libc_hidden_proto (__libc_realloc)
  553 
  554 /*
  555   memalign(size_t alignment, size_t n);
  556   Returns a pointer to a newly allocated chunk of n bytes, aligned
  557   in accord with the alignment argument.
  558 
  559   The alignment argument should be a power of two. If the argument is
  560   not a power of two, the nearest greater power is used.
  561   8-byte alignment is guaranteed by normal malloc calls, so don't
  562   bother calling memalign with an argument of 8 or less.
  563 
  564   Overreliance on memalign is a sure way to fragment space.
  565 */
  566 void*  __libc_memalign(size_t, size_t);
  567 libc_hidden_proto (__libc_memalign)
  568 
  569 /*
  570   valloc(size_t n);
  571   Equivalent to memalign(pagesize, n), where pagesize is the page
  572   size of the system. If the pagesize is unknown, 4096 is used.
  573 */
  574 void*  __libc_valloc(size_t);
  575 
  576 
  577 
  578 /*
  579   mallopt(int parameter_number, int parameter_value)
  580   Sets tunable parameters The format is to provide a
  581   (parameter-number, parameter-value) pair.  mallopt then sets the
  582   corresponding parameter to the argument value if it can (i.e., so
  583   long as the value is meaningful), and returns 1 if successful else
  584   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
  585   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
  586   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  587   so setting them has no effect. But this malloc also supports four
  588   other options in mallopt. See below for details.  Briefly, supported
  589   parameters are as follows (listed defaults are for "typical"
  590   configurations).
  591 
  592   Symbol            param #   default    allowed param values
  593   M_MXFAST          1         64         0-80  (0 disables fastbins)
  594   M_TRIM_THRESHOLD -1         128*1024   any   (-1U disables trimming)
  595   M_TOP_PAD        -2         0          any
  596   M_MMAP_THRESHOLD -3         128*1024   any   (or 0 if no MMAP support)
  597   M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
  598 */
  599 int      __libc_mallopt(int, int);
  600 libc_hidden_proto (__libc_mallopt)
  601 
  602 
  603 /*
  604   mallinfo()
  605   Returns (by copy) a struct containing various summary statistics:
  606 
  607   arena:     current total non-mmapped bytes allocated from system
  608   ordblks:   the number of free chunks
  609   smblks:    the number of fastbin blocks (i.e., small chunks that
  610            have been freed but not use resused or consolidated)
  611   hblks:     current number of mmapped regions
  612   hblkhd:    total bytes held in mmapped regions
  613   usmblks:   always 0
  614   fsmblks:   total bytes held in fastbin blocks
  615   uordblks:  current total allocated space (normal or mmapped)
  616   fordblks:  total free space
  617   keepcost:  the maximum number of bytes that could ideally be released
  618            back to system via malloc_trim. ("ideally" means that
  619            it ignores page restrictions etc.)
  620 
  621   Because these fields are ints, but internal bookkeeping may
  622   be kept as longs, the reported values may wrap around zero and
  623   thus be inaccurate.
  624 */
  625 struct mallinfo __libc_mallinfo(void);
  626 
  627 
  628 /*
  629   pvalloc(size_t n);
  630   Equivalent to valloc(minimum-page-that-holds(n)), that is,
  631   round up n to nearest pagesize.
  632  */
  633 void*  __libc_pvalloc(size_t);
  634 
  635 /*
  636   malloc_trim(size_t pad);
  637 
  638   If possible, gives memory back to the system (via negative
  639   arguments to sbrk) if there is unused memory at the `high' end of
  640   the malloc pool. You can call this after freeing large blocks of
  641   memory to potentially reduce the system-level memory requirements
  642   of a program. However, it cannot guarantee to reduce memory. Under
  643   some allocation patterns, some large free blocks of memory will be
  644   locked between two used chunks, so they cannot be given back to
  645   the system.
  646 
  647   The `pad' argument to malloc_trim represents the amount of free
  648   trailing space to leave untrimmed. If this argument is zero,
  649   only the minimum amount of memory to maintain internal data
  650   structures will be left (one page or less). Non-zero arguments
  651   can be supplied to maintain enough trailing space to service
  652   future expected allocations without having to re-obtain memory
  653   from the system.
  654 
  655   Malloc_trim returns 1 if it actually released any memory, else 0.
  656   On systems that do not support "negative sbrks", it will always
  657   return 0.
  658 */
  659 int      __malloc_trim(size_t);
  660 
  661 /*
  662   malloc_usable_size(void* p);
  663 
  664   Returns the number of bytes you can actually use in
  665   an allocated chunk, which may be more than you requested (although
  666   often not) due to alignment and minimum size constraints.
  667   You can use this many bytes without worrying about
  668   overwriting other allocated objects. This is not a particularly great
  669   programming practice. malloc_usable_size can be more useful in
  670   debugging and assertions, for example:
  671 
  672   p = malloc(n);
  673   assert(malloc_usable_size(p) >= 256);
  674 
  675 */
  676 size_t   __malloc_usable_size(void*);
  677 
  678 /*
  679   malloc_stats();
  680   Prints on stderr the amount of space obtained from the system (both
  681   via sbrk and mmap), the maximum amount (which may be more than
  682   current if malloc_trim and/or munmap got called), and the current
  683   number of bytes allocated via malloc (or realloc, etc) but not yet
  684   freed. Note that this is the number of bytes allocated, not the
  685   number requested. It will be larger than the number requested
  686   because of alignment and bookkeeping overhead. Because it includes
  687   alignment wastage as being in use, this figure may be greater than
  688   zero even when no user-level chunks are allocated.
  689 
  690   The reported current and maximum system memory can be inaccurate if
  691   a program makes other calls to system memory allocation functions
  692   (normally sbrk) outside of malloc.
  693 
  694   malloc_stats prints only the most commonly interesting statistics.
  695   More information can be obtained by calling mallinfo.
  696 
  697 */
  698 void     __malloc_stats(void);
  699 
  700 /*
  701   posix_memalign(void **memptr, size_t alignment, size_t size);
  702 
  703   POSIX wrapper like memalign(), checking for validity of size.
  704 */
  705 int      __posix_memalign(void **, size_t, size_t);
  706 
  707 /* mallopt tuning options */
  708 
  709 /*
  710   M_MXFAST is the maximum request size used for "fastbins", special bins
  711   that hold returned chunks without consolidating their spaces. This
  712   enables future requests for chunks of the same size to be handled
  713   very quickly, but can increase fragmentation, and thus increase the
  714   overall memory footprint of a program.
  715 
  716   This malloc manages fastbins very conservatively yet still
  717   efficiently, so fragmentation is rarely a problem for values less
  718   than or equal to the default.  The maximum supported value of MXFAST
  719   is 80. You wouldn't want it any higher than this anyway.  Fastbins
  720   are designed especially for use with many small structs, objects or
  721   strings -- the default handles structs/objects/arrays with sizes up
  722   to 8 4byte fields, or small strings representing words, tokens,
  723   etc. Using fastbins for larger objects normally worsens
  724   fragmentation without improving speed.
  725 
  726   M_MXFAST is set in REQUEST size units. It is internally used in
  727   chunksize units, which adds padding and alignment.  You can reduce
  728   M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
  729   algorithm to be a closer approximation of fifo-best-fit in all cases,
  730   not just for larger requests, but will generally cause it to be
  731   slower.
  732 */
  733 
  734 
  735 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
  736 #ifndef M_MXFAST
  737 #define M_MXFAST            1
  738 #endif
  739 
  740 #ifndef DEFAULT_MXFAST
  741 #define DEFAULT_MXFAST     (64 * SIZE_SZ / 4)
  742 #endif
  743 
  744 
  745 /*
  746   M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
  747   to keep before releasing via malloc_trim in free().
  748 
  749   Automatic trimming is mainly useful in long-lived programs.
  750   Because trimming via sbrk can be slow on some systems, and can
  751   sometimes be wasteful (in cases where programs immediately
  752   afterward allocate more large chunks) the value should be high
  753   enough so that your overall system performance would improve by
  754   releasing this much memory.
  755 
  756   The trim threshold and the mmap control parameters (see below)
  757   can be traded off with one another. Trimming and mmapping are
  758   two different ways of releasing unused memory back to the
  759   system. Between these two, it is often possible to keep
  760   system-level demands of a long-lived program down to a bare
  761   minimum. For example, in one test suite of sessions measuring
  762   the XF86 X server on Linux, using a trim threshold of 128K and a
  763   mmap threshold of 192K led to near-minimal long term resource
  764   consumption.
  765 
  766   If you are using this malloc in a long-lived program, it should
  767   pay to experiment with these values.  As a rough guide, you
  768   might set to a value close to the average size of a process
  769   (program) running on your system.  Releasing this much memory
  770   would allow such a process to run in memory.  Generally, it's
  771   worth it to tune for trimming rather tham memory mapping when a
  772   program undergoes phases where several large chunks are
  773   allocated and released in ways that can reuse each other's
  774   storage, perhaps mixed with phases where there are no such
  775   chunks at all.  And in well-behaved long-lived programs,
  776   controlling release of large blocks via trimming versus mapping
  777   is usually faster.
  778 
  779   However, in most programs, these parameters serve mainly as
  780   protection against the system-level effects of carrying around
  781   massive amounts of unneeded memory. Since frequent calls to
  782   sbrk, mmap, and munmap otherwise degrade performance, the default
  783   parameters are set to relatively high values that serve only as
  784   safeguards.
  785 
  786   The trim value It must be greater than page size to have any useful
  787   effect.  To disable trimming completely, you can set to
  788   (unsigned long)(-1)
  789 
  790   Trim settings interact with fastbin (MXFAST) settings: Unless
  791   TRIM_FASTBINS is defined, automatic trimming never takes place upon
  792   freeing a chunk with size less than or equal to MXFAST. Trimming is
  793   instead delayed until subsequent freeing of larger chunks. However,
  794   you can still force an attempted trim by calling malloc_trim.
  795 
  796   Also, trimming is not generally possible in cases where
  797   the main arena is obtained via mmap.
  798 
  799   Note that the trick some people use of mallocing a huge space and
  800   then freeing it at program startup, in an attempt to reserve system
  801   memory, doesn't have the intended effect under automatic trimming,
  802   since that memory will immediately be returned to the system.
  803 */
  804 
  805 #define M_TRIM_THRESHOLD       -1
  806 
  807 #ifndef DEFAULT_TRIM_THRESHOLD
  808 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
  809 #endif
  810 
  811 /*
  812   M_TOP_PAD is the amount of extra `padding' space to allocate or
  813   retain whenever sbrk is called. It is used in two ways internally:
  814 
  815   * When sbrk is called to extend the top of the arena to satisfy
  816   a new malloc request, this much padding is added to the sbrk
  817   request.
  818 
  819   * When malloc_trim is called automatically from free(),
  820   it is used as the `pad' argument.
  821 
  822   In both cases, the actual amount of padding is rounded
  823   so that the end of the arena is always a system page boundary.
  824 
  825   The main reason for using padding is to avoid calling sbrk so
  826   often. Having even a small pad greatly reduces the likelihood
  827   that nearly every malloc request during program start-up (or
  828   after trimming) will invoke sbrk, which needlessly wastes
  829   time.
  830 
  831   Automatic rounding-up to page-size units is normally sufficient
  832   to avoid measurable overhead, so the default is 0.  However, in
  833   systems where sbrk is relatively slow, it can pay to increase
  834   this value, at the expense of carrying around more memory than
  835   the program needs.
  836 */
  837 
  838 #define M_TOP_PAD              -2
  839 
  840 #ifndef DEFAULT_TOP_PAD
  841 #define DEFAULT_TOP_PAD        (0)
  842 #endif
  843 
  844 /*
  845   MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
  846   adjusted MMAP_THRESHOLD.
  847 */
  848 
  849 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
  850 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
  851 #endif
  852 
  853 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
  854   /* For 32-bit platforms we cannot increase the maximum mmap
  855      threshold much because it is also the minimum value for the
  856      maximum heap size and its alignment.  Going above 512k (i.e., 1M
  857      for new heaps) wastes too much address space.  */
  858 # if __WORDSIZE == 32
  859 #  define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
  860 # else
  861 #  define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
  862 # endif
  863 #endif
  864 
  865 /*
  866   M_MMAP_THRESHOLD is the request size threshold for using mmap()
  867   to service a request. Requests of at least this size that cannot
  868   be allocated using already-existing space will be serviced via mmap.
  869   (If enough normal freed space already exists it is used instead.)
  870 
  871   Using mmap segregates relatively large chunks of memory so that
  872   they can be individually obtained and released from the host
  873   system. A request serviced through mmap is never reused by any
  874   other request (at least not directly; the system may just so
  875   happen to remap successive requests to the same locations).
  876 
  877   Segregating space in this way has the benefits that:
  878 
  879    1. Mmapped space can ALWAYS be individually released back
  880       to the system, which helps keep the system level memory
  881       demands of a long-lived program low.
  882    2. Mapped memory can never become `locked' between
  883       other chunks, as can happen with normally allocated chunks, which
  884       means that even trimming via malloc_trim would not release them.
  885    3. On some systems with "holes" in address spaces, mmap can obtain
  886       memory that sbrk cannot.
  887 
  888   However, it has the disadvantages that:
  889 
  890    1. The space cannot be reclaimed, consolidated, and then
  891       used to service later requests, as happens with normal chunks.
  892    2. It can lead to more wastage because of mmap page alignment
  893       requirements
  894    3. It causes malloc performance to be more dependent on host
  895       system memory management support routines which may vary in
  896       implementation quality and may impose arbitrary
  897       limitations. Generally, servicing a request via normal
  898       malloc steps is faster than going through a system's mmap.
  899 
  900   The advantages of mmap nearly always outweigh disadvantages for
  901   "large" chunks, but the value of "large" varies across systems.  The
  902   default is an empirically derived value that works well in most
  903   systems.
  904 
  905 
  906   Update in 2006:
  907   The above was written in 2001. Since then the world has changed a lot.
  908   Memory got bigger. Applications got bigger. The virtual address space
  909   layout in 32 bit linux changed.
  910 
  911   In the new situation, brk() and mmap space is shared and there are no
  912   artificial limits on brk size imposed by the kernel. What is more,
  913   applications have started using transient allocations larger than the
  914   128Kb as was imagined in 2001.
  915 
  916   The price for mmap is also high now; each time glibc mmaps from the
  917   kernel, the kernel is forced to zero out the memory it gives to the
  918   application. Zeroing memory is expensive and eats a lot of cache and
  919   memory bandwidth. This has nothing to do with the efficiency of the
  920   virtual memory system, by doing mmap the kernel just has no choice but
  921   to zero.
  922 
  923   In 2001, the kernel had a maximum size for brk() which was about 800
  924   megabytes on 32 bit x86, at that point brk() would hit the first
  925   mmaped shared libaries and couldn't expand anymore. With current 2.6
  926   kernels, the VA space layout is different and brk() and mmap
  927   both can span the entire heap at will.
  928 
  929   Rather than using a static threshold for the brk/mmap tradeoff,
  930   we are now using a simple dynamic one. The goal is still to avoid
  931   fragmentation. The old goals we kept are
  932   1) try to get the long lived large allocations to use mmap()
  933   2) really large allocations should always use mmap()
  934   and we're adding now:
  935   3) transient allocations should use brk() to avoid forcing the kernel
  936      having to zero memory over and over again
  937 
  938   The implementation works with a sliding threshold, which is by default
  939   limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
  940   out at 128Kb as per the 2001 default.
  941 
  942   This allows us to satisfy requirement 1) under the assumption that long
  943   lived allocations are made early in the process' lifespan, before it has
  944   started doing dynamic allocations of the same size (which will
  945   increase the threshold).
  946 
  947   The upperbound on the threshold satisfies requirement 2)
  948 
  949   The threshold goes up in value when the application frees memory that was
  950   allocated with the mmap allocator. The idea is that once the application
  951   starts freeing memory of a certain size, it's highly probable that this is
  952   a size the application uses for transient allocations. This estimator
  953   is there to satisfy the new third requirement.
  954 
  955 */
  956 
  957 #define M_MMAP_THRESHOLD      -3
  958 
  959 #ifndef DEFAULT_MMAP_THRESHOLD
  960 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
  961 #endif
  962 
  963 /*
  964   M_MMAP_MAX is the maximum number of requests to simultaneously
  965   service using mmap. This parameter exists because
  966   some systems have a limited number of internal tables for
  967   use by mmap, and using more than a few of them may degrade
  968   performance.
  969 
  970   The default is set to a value that serves only as a safeguard.
  971   Setting to 0 disables use of mmap for servicing large requests.
  972 */
  973 
  974 #define M_MMAP_MAX             -4
  975 
  976 #ifndef DEFAULT_MMAP_MAX
  977 #define DEFAULT_MMAP_MAX       (65536)
  978 #endif
  979 
  980 #include <malloc.h>
  981 
  982 #ifndef RETURN_ADDRESS
  983 #define RETURN_ADDRESS(X_) (NULL)
  984 #endif
  985 
  986 /* Forward declarations.  */
  987 struct malloc_chunk;
  988 typedef struct malloc_chunk* mchunkptr;
  989 
  990 /* Internal routines.  */
  991 
  992 static void*  _int_malloc(mstate, size_t);
  993 static void     _int_free(mstate, mchunkptr, int);
  994 static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
  995                INTERNAL_SIZE_T);
  996 static void*  _int_memalign(mstate, size_t, size_t);
  997 static void*  _mid_memalign(size_t, size_t, void *);
  998 
  999 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
 1000 
 1001 static void* mem2mem_check(void *p, size_t sz);
 1002 static void top_check(void);
 1003 static void munmap_chunk(mchunkptr p);
 1004 #if HAVE_MREMAP
 1005 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
 1006 #endif
 1007 
 1008 static void*   malloc_check(size_t sz, const void *caller);
 1009 static void      free_check(void* mem, const void *caller);
 1010 static void*   realloc_check(void* oldmem, size_t bytes,
 1011                    const void *caller);
 1012 static void*   memalign_check(size_t alignment, size_t bytes,
 1013                 const void *caller);
 1014 
 1015 /* ------------------ MMAP support ------------------  */
 1016 
 1017 
 1018 #include <fcntl.h>
 1019 #include <sys/mman.h>
 1020 
 1021 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
 1022 # define MAP_ANONYMOUS MAP_ANON
 1023 #endif
 1024 
 1025 #ifndef MAP_NORESERVE
 1026 # define MAP_NORESERVE 0
 1027 #endif
 1028 
 1029 #define MMAP(addr, size, prot, flags) \
 1030  __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
 1031 
 1032 
 1033 /*
 1034   -----------------------  Chunk representations -----------------------
 1035 */
 1036 
 1037 
 1038 /*
 1039   This struct declaration is misleading (but accurate and necessary).
 1040   It declares a "view" into memory allowing access to necessary
 1041   fields at known offsets from a given base. See explanation below.
 1042 */
 1043 
 1044 struct malloc_chunk {
 1045 
 1046   INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */
 1047   INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */
 1048 
 1049   struct malloc_chunk* fd;         /* double links -- used only if free. */
 1050   struct malloc_chunk* bk;
 1051 
 1052   /* Only used for large blocks: pointer to next larger size.  */
 1053   struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
 1054   struct malloc_chunk* bk_nextsize;
 1055 };
 1056 
 1057 
 1058 /*
 1059    malloc_chunk details:
 1060 
 1061     (The following includes lightly edited explanations by Colin Plumb.)
 1062 
 1063     Chunks of memory are maintained using a `boundary tag' method as
 1064     described in e.g., Knuth or Standish.  (See the paper by Paul
 1065     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
 1066     survey of such techniques.)  Sizes of free chunks are stored both
 1067     in the front of each chunk and at the end.  This makes
 1068     consolidating fragmented chunks into bigger chunks very fast.  The
 1069     size fields also hold bits representing whether chunks are free or
 1070     in use.
 1071 
 1072     An allocated chunk looks like this:
 1073 
 1074 
 1075     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1076         |             Size of previous chunk, if unallocated (P clear)  |
 1077         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1078         |             Size of chunk, in bytes                     |A|M|P|
 1079       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1080         |             User data starts here...                          .
 1081         .                                                               .
 1082         .             (malloc_usable_size() bytes)                      .
 1083         .                                                               |
 1084 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1085         |             (size of chunk, but used for application data)    |
 1086         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1087         |             Size of next chunk, in bytes                |A|0|1|
 1088         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1089 
 1090     Where "chunk" is the front of the chunk for the purpose of most of
 1091     the malloc code, but "mem" is the pointer that is returned to the
 1092     user.  "Nextchunk" is the beginning of the next contiguous chunk.
 1093 
 1094     Chunks always begin on even word boundaries, so the mem portion
 1095     (which is returned to the user) is also on an even word boundary, and
 1096     thus at least double-word aligned.
 1097 
 1098     Free chunks are stored in circular doubly-linked lists, and look like this:
 1099 
 1100     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1101         |             Size of previous chunk, if unallocated (P clear)  |
 1102         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1103     `head:' |             Size of chunk, in bytes                     |A|0|P|
 1104       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1105         |             Forward pointer to next chunk in list             |
 1106         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1107         |             Back pointer to previous chunk in list            |
 1108         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1109         |             Unused space (may be 0 bytes long)                .
 1110         .                                                               .
 1111         .                                                               |
 1112 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1113     `foot:' |             Size of chunk, in bytes                           |
 1114         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1115         |             Size of next chunk, in bytes                |A|0|0|
 1116         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 1117 
 1118     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
 1119     chunk size (which is always a multiple of two words), is an in-use
 1120     bit for the *previous* chunk.  If that bit is *clear*, then the
 1121     word before the current chunk size contains the previous chunk
 1122     size, and can be used to find the front of the previous chunk.
 1123     The very first chunk allocated always has this bit set,
 1124     preventing access to non-existent (or non-owned) memory. If
 1125     prev_inuse is set for any given chunk, then you CANNOT determine
 1126     the size of the previous chunk, and might even get a memory
 1127     addressing fault when trying to do so.
 1128 
 1129     The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
 1130     main arena, described by the main_arena variable.  When additional
 1131     threads are spawned, each thread receives its own arena (up to a
 1132     configurable limit, after which arenas are reused for multiple
 1133     threads), and the chunks in these arenas have the A bit set.  To
 1134     find the arena for a chunk on such a non-main arena, heap_for_ptr
 1135     performs a bit mask operation and indirection through the ar_ptr
 1136     member of the per-heap header heap_info (see arena.c).
 1137 
 1138     Note that the `foot' of the current chunk is actually represented
 1139     as the prev_size of the NEXT chunk. This makes it easier to
 1140     deal with alignments etc but can be very confusing when trying
 1141     to extend or adapt this code.
 1142 
 1143     The three exceptions to all this are:
 1144 
 1145      1. The special chunk `top' doesn't bother using the
 1146     trailing size field since there is no next contiguous chunk
 1147     that would have to index off it. After initialization, `top'
 1148     is forced to always exist.  If it would become less than
 1149     MINSIZE bytes long, it is replenished.
 1150 
 1151      2. Chunks allocated via mmap, which have the second-lowest-order
 1152     bit M (IS_MMAPPED) set in their size fields.  Because they are
 1153     allocated one-by-one, each must contain its own trailing size
 1154     field.  If the M bit is set, the other bits are ignored
 1155     (because mmapped chunks are neither in an arena, nor adjacent
 1156     to a freed chunk).  The M bit is also used for chunks which
 1157     originally came from a dumped heap via malloc_set_state in
 1158     hooks.c.
 1159 
 1160      3. Chunks in fastbins are treated as allocated chunks from the
 1161     point of view of the chunk allocator.  They are consolidated
 1162     with their neighbors only in bulk, in malloc_consolidate.
 1163 */
 1164 
 1165 /*
 1166   ---------- Size and alignment checks and conversions ----------
 1167 */
 1168 
 1169 /* conversion from malloc headers to user pointers, and back */
 1170 
 1171 #define chunk2mem(p)   ((void*)((char*)(p) + 2*SIZE_SZ))
 1172 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 1173 
 1174 /* The smallest possible chunk */
 1175 #define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
 1176 
 1177 /* The smallest size we can malloc is an aligned minimal chunk */
 1178 
 1179 #define MINSIZE  \
 1180   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
 1181 
 1182 /* Check if m has acceptable alignment */
 1183 
 1184 #define aligned_OK(m)  (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
 1185 
 1186 #define misaligned_chunk(p) \
 1187   ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
 1188    & MALLOC_ALIGN_MASK)
 1189 
 1190 
 1191 /*
 1192    Check if a request is so large that it would wrap around zero when
 1193    padded and aligned. To simplify some other code, the bound is made
 1194    low enough so that adding MINSIZE will also not wrap around zero.
 1195  */
 1196 
 1197 #define REQUEST_OUT_OF_RANGE(req)                                 \
 1198   ((unsigned long) (req) >=                           \
 1199    (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
 1200 
 1201 /* pad request bytes into a usable size -- internal version */
 1202 
 1203 #define request2size(req)                                         \
 1204   (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
 1205    MINSIZE :                                                      \
 1206    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
 1207 
 1208 /* Same, except also perform an argument and result check.  First, we check
 1209    that the padding done by request2size didn't result in an integer
 1210    overflow.  Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
 1211    size isn't so large that a later alignment would lead to another integer
 1212    overflow.  */
 1213 #define checked_request2size(req, sz) \
 1214 ({                  \
 1215   (sz) = request2size (req);        \
 1216   if (((sz) < (req))            \
 1217       || REQUEST_OUT_OF_RANGE (sz)) \
 1218     {                   \
 1219       __set_errno (ENOMEM);     \
 1220       return 0;             \
 1221     }                   \
 1222 })
 1223 
 1224 /*
 1225    --------------- Physical chunk operations ---------------
 1226  */
 1227 
 1228 
 1229 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
 1230 #define PREV_INUSE 0x1
 1231 
 1232 /* extract inuse bit of previous chunk */
 1233 #define prev_inuse(p)       ((p)->mchunk_size & PREV_INUSE)
 1234 
 1235 
 1236 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
 1237 #define IS_MMAPPED 0x2
 1238 
 1239 /* check for mmap()'ed chunk */
 1240 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
 1241 
 1242 
 1243 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
 1244    from a non-main arena.  This is only set immediately before handing
 1245    the chunk to the user, if necessary.  */
 1246 #define NON_MAIN_ARENA 0x4
 1247 
 1248 /* Check for chunk from main arena.  */
 1249 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
 1250 
 1251 /* Mark a chunk as not being on the main arena.  */
 1252 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
 1253 
 1254 
 1255 /*
 1256    Bits to mask off when extracting size
 1257 
 1258    Note: IS_MMAPPED is intentionally not masked off from size field in
 1259    macros for which mmapped chunks should never be seen. This should
 1260    cause helpful core dumps to occur if it is tried by accident by
 1261    people extending or adapting this malloc.
 1262  */
 1263 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
 1264 
 1265 /* Get size, ignoring use bits */
 1266 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
 1267 
 1268 /* Like chunksize, but do not mask SIZE_BITS.  */
 1269 #define chunksize_nomask(p)         ((p)->mchunk_size)
 1270 
 1271 /* Ptr to next physical malloc_chunk. */
 1272 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
 1273 
 1274 /* Size of the chunk below P.  Only valid if !prev_inuse (P).  */
 1275 #define prev_size(p) ((p)->mchunk_prev_size)
 1276 
 1277 /* Set the size of the chunk below P.  Only valid if !prev_inuse (P).  */
 1278 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
 1279 
 1280 /* Ptr to previous physical malloc_chunk.  Only valid if !prev_inuse (P).  */
 1281 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
 1282 
 1283 /* Treat space at ptr + offset as a chunk */
 1284 #define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
 1285 
 1286 /* extract p's inuse bit */
 1287 #define inuse(p)                                  \
 1288   ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
 1289 
 1290 /* set/clear chunk as being inuse without otherwise disturbing */
 1291 #define set_inuse(p)                                  \
 1292   ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
 1293 
 1294 #define clear_inuse(p)                                \
 1295   ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
 1296 
 1297 
 1298 /* check/set/clear inuse bits in known places */
 1299 #define inuse_bit_at_offset(p, s)                         \
 1300   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
 1301 
 1302 #define set_inuse_bit_at_offset(p, s)                         \
 1303   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
 1304 
 1305 #define clear_inuse_bit_at_offset(p, s)                       \
 1306   (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
 1307 
 1308 
 1309 /* Set size at head, without disturbing its use bit */
 1310 #define set_head_size(p, s)  ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
 1311 
 1312 /* Set size/use field */
 1313 #define set_head(p, s)       ((p)->mchunk_size = (s))
 1314 
 1315 /* Set size at footer (only when chunk is not in use) */
 1316 #define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
 1317 
 1318 
 1319 #pragma GCC poison mchunk_size
 1320 #pragma GCC poison mchunk_prev_size
 1321 
 1322 /*
 1323    -------------------- Internal data structures --------------------
 1324 
 1325    All internal state is held in an instance of malloc_state defined
 1326    below. There are no other static variables, except in two optional
 1327    cases:
 1328  * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
 1329  * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
 1330      for mmap.
 1331 
 1332    Beware of lots of tricks that minimize the total bookkeeping space
 1333    requirements. The result is a little over 1K bytes (for 4byte
 1334    pointers and size_t.)
 1335  */
 1336 
 1337 /*
 1338    Bins
 1339 
 1340     An array of bin headers for free chunks. Each bin is doubly
 1341     linked.  The bins are approximately proportionally (log) spaced.
 1342     There are a lot of these bins (128). This may look excessive, but
 1343     works very well in practice.  Most bins hold sizes that are
 1344     unusual as malloc request sizes, but are more usual for fragments
 1345     and consolidated sets of chunks, which is what these bins hold, so
 1346     they can be found quickly.  All procedures maintain the invariant
 1347     that no consolidated chunk physically borders another one, so each
 1348     chunk in a list is known to be preceeded and followed by either
 1349     inuse chunks or the ends of memory.
 1350 
 1351     Chunks in bins are kept in size order, with ties going to the
 1352     approximately least recently used chunk. Ordering isn't needed
 1353     for the small bins, which all contain the same-sized chunks, but
 1354     facilitates best-fit allocation for larger chunks. These lists
 1355     are just sequential. Keeping them in order almost never requires
 1356     enough traversal to warrant using fancier ordered data
 1357     structures.
 1358 
 1359     Chunks of the same size are linked with the most
 1360     recently freed at the front, and allocations are taken from the
 1361     back.  This results in LRU (FIFO) allocation order, which tends
 1362     to give each chunk an equal opportunity to be consolidated with
 1363     adjacent freed chunks, resulting in larger free chunks and less
 1364     fragmentation.
 1365 
 1366     To simplify use in double-linked lists, each bin header acts
 1367     as a malloc_chunk. This avoids special-casing for headers.
 1368     But to conserve space and improve locality, we allocate
 1369     only the fd/bk pointers of bins, and then use repositioning tricks
 1370     to treat these as the fields of a malloc_chunk*.
 1371  */
 1372 
 1373 typedef struct malloc_chunk *mbinptr;
 1374 
 1375 /* addressing -- note that bin_at(0) does not exist */
 1376 #define bin_at(m, i) \
 1377   (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))               \
 1378              - offsetof (struct malloc_chunk, fd))
 1379 
 1380 /* analog of ++bin */
 1381 #define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
 1382 
 1383 /* Reminders about list directionality within bins */
 1384 #define first(b)     ((b)->fd)
 1385 #define last(b)      ((b)->bk)
 1386 
 1387 /*
 1388    Indexing
 1389 
 1390     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
 1391     8 bytes apart. Larger bins are approximately logarithmically spaced:
 1392 
 1393     64 bins of size       8
 1394     32 bins of size      64
 1395     16 bins of size     512
 1396      8 bins of size    4096
 1397      4 bins of size   32768
 1398      2 bins of size  262144
 1399      1 bin  of size what's left
 1400 
 1401     There is actually a little bit of slop in the numbers in bin_index
 1402     for the sake of speed. This makes no difference elsewhere.
 1403 
 1404     The bins top out around 1MB because we expect to service large
 1405     requests via mmap.
 1406 
 1407     Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
 1408     a valid chunk size the small bins are bumped up one.
 1409  */
 1410 
 1411 #define NBINS             128
 1412 #define NSMALLBINS         64
 1413 #define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
 1414 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
 1415 #define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
 1416 
 1417 #define in_smallbin_range(sz)  \
 1418   ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
 1419 
 1420 #define smallbin_index(sz) \
 1421   ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
 1422    + SMALLBIN_CORRECTION)
 1423 
 1424 #define largebin_index_32(sz)                                                \
 1425   (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
 1426    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1427    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1428    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1429    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1430    126)
 1431 
 1432 #define largebin_index_32_big(sz)                                            \
 1433   (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
 1434    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1435    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1436    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1437    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1438    126)
 1439 
 1440 // XXX It remains to be seen whether it is good to keep the widths of
 1441 // XXX the buckets the same or whether it should be scaled by a factor
 1442 // XXX of two as well.
 1443 #define largebin_index_64(sz)                                                \
 1444   (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
 1445    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
 1446    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
 1447    ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
 1448    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
 1449    126)
 1450 
 1451 #define largebin_index(sz) \
 1452   (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
 1453    : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)                     \
 1454    : largebin_index_32 (sz))
 1455 
 1456 #define bin_index(sz) \
 1457   ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
 1458 
 1459 /* Take a chunk off a bin list.  */
 1460 static void
 1461 unlink_chunk (mstate av, mchunkptr p)
 1462 {
 1463   if (chunksize (p) != prev_size (next_chunk (p)))
 1464     malloc_printerr ("corrupted size vs. prev_size");
 1465 
 1466   mchunkptr fd = p->fd;
 1467   mchunkptr bk = p->bk;
 1468 
 1469   if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
 1470     malloc_printerr ("corrupted double-linked list");
 1471 
 1472   fd->bk = bk;
 1473   bk->fd = fd;
 1474   if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
 1475     {
 1476       if (p->fd_nextsize->bk_nextsize != p
 1477       || p->bk_nextsize->fd_nextsize != p)
 1478     malloc_printerr ("corrupted double-linked list (not small)");
 1479 
 1480       if (fd->fd_nextsize == NULL)
 1481     {
 1482       if (p->fd_nextsize == p)
 1483         fd->fd_nextsize = fd->bk_nextsize = fd;
 1484       else
 1485         {
 1486           fd->fd_nextsize = p->fd_nextsize;
 1487           fd->bk_nextsize = p->bk_nextsize;
 1488           p->fd_nextsize->bk_nextsize = fd;
 1489           p->bk_nextsize->fd_nextsize = fd;
 1490         }
 1491     }
 1492       else
 1493     {
 1494       p->fd_nextsize->bk_nextsize = p->bk_nextsize;
 1495       p->bk_nextsize->fd_nextsize = p->fd_nextsize;
 1496     }
 1497     }
 1498 }
 1499 
 1500 /*
 1501    Unsorted chunks
 1502 
 1503     All remainders from chunk splits, as well as all returned chunks,
 1504     are first placed in the "unsorted" bin. They are then placed
 1505     in regular bins after malloc gives them ONE chance to be used before
 1506     binning. So, basically, the unsorted_chunks list acts as a queue,
 1507     with chunks being placed on it in free (and malloc_consolidate),
 1508     and taken off (to be either used or placed in bins) in malloc.
 1509 
 1510     The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
 1511     does not have to be taken into account in size comparisons.
 1512  */
 1513 
 1514 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
 1515 #define unsorted_chunks(M)          (bin_at (M, 1))
 1516 
 1517 /*
 1518    Top
 1519 
 1520     The top-most available chunk (i.e., the one bordering the end of
 1521     available memory) is treated specially. It is never included in
 1522     any bin, is used only if no other chunk is available, and is
 1523     released back to the system if it is very large (see
 1524     M_TRIM_THRESHOLD).  Because top initially
 1525     points to its own bin with initial zero size, thus forcing
 1526     extension on the first malloc request, we avoid having any special
 1527     code in malloc to check whether it even exists yet. But we still
 1528     need to do so when getting memory from system, so we make
 1529     initial_top treat the bin as a legal but unusable chunk during the
 1530     interval between initialization and the first call to
 1531     sysmalloc. (This is somewhat delicate, since it relies on
 1532     the 2 preceding words to be zero during this interval as well.)
 1533  */
 1534 
 1535 /* Conveniently, the unsorted bin can be used as dummy top on first call */
 1536 #define initial_top(M)              (unsorted_chunks (M))
 1537 
 1538 /*
 1539    Binmap
 1540 
 1541     To help compensate for the large number of bins, a one-level index
 1542     structure is used for bin-by-bin searching.  `binmap' is a
 1543     bitvector recording whether bins are definitely empty so they can
 1544     be skipped over during during traversals.  The bits are NOT always
 1545     cleared as soon as bins are empty, but instead only
 1546     when they are noticed to be empty during traversal in malloc.
 1547  */
 1548 
 1549 /* Conservatively use 32 bits per map word, even if on 64bit system */
 1550 #define BINMAPSHIFT      5
 1551 #define BITSPERMAP       (1U << BINMAPSHIFT)
 1552 #define BINMAPSIZE       (NBINS / BITSPERMAP)
 1553 
 1554 #define idx2block(i)     ((i) >> BINMAPSHIFT)
 1555 #define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
 1556 
 1557 #define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
 1558 #define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
 1559 #define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
 1560 
 1561 /*
 1562    Fastbins
 1563 
 1564     An array of lists holding recently freed small chunks.  Fastbins
 1565     are not doubly linked.  It is faster to single-link them, and
 1566     since chunks are never removed from the middles of these lists,
 1567     double linking is not necessary. Also, unlike regular bins, they
 1568     are not even processed in FIFO order (they use faster LIFO) since
 1569     ordering doesn't much matter in the transient contexts in which
 1570     fastbins are normally used.
 1571 
 1572     Chunks in fastbins keep their inuse bit set, so they cannot
 1573     be consolidated with other free chunks. malloc_consolidate
 1574     releases all chunks in fastbins and consolidates them with
 1575     other free chunks.
 1576  */
 1577 
 1578 typedef struct malloc_chunk *mfastbinptr;
 1579 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
 1580 
 1581 /* offset 2 to use otherwise unindexable first 2 bins */
 1582 #define fastbin_index(sz) \
 1583   ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
 1584 
 1585 
 1586 /* The maximum fastbin request size we support */
 1587 #define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
 1588 
 1589 #define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
 1590 
 1591 /*
 1592    FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
 1593    that triggers automatic consolidation of possibly-surrounding
 1594    fastbin chunks. This is a heuristic, so the exact value should not
 1595    matter too much. It is defined at half the default trim threshold as a
 1596    compromise heuristic to only attempt consolidation if it is likely
 1597    to lead to trimming. However, it is not dynamically tunable, since
 1598    consolidation reduces fragmentation surrounding large chunks even
 1599    if trimming is not used.
 1600  */
 1601 
 1602 #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
 1603 
 1604 /*
 1605    NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
 1606    regions.  Otherwise, contiguity is exploited in merging together,
 1607    when possible, results from consecutive MORECORE calls.
 1608 
 1609    The initial value comes from MORECORE_CONTIGUOUS, but is
 1610    changed dynamically if mmap is ever used as an sbrk substitute.
 1611  */
 1612 
 1613 #define NONCONTIGUOUS_BIT     (2U)
 1614 
 1615 #define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
 1616 #define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
 1617 #define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
 1618 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
 1619 
 1620 /* Maximum size of memory handled in fastbins.  */
 1621 static INTERNAL_SIZE_T global_max_fast;
 1622 
 1623 /*
 1624    Set value of max_fast.
 1625    Use impossibly small value if 0.
 1626    Precondition: there are no existing fastbin chunks in the main arena.
 1627    Since do_check_malloc_state () checks this, we call malloc_consolidate ()
 1628    before changing max_fast.  Note other arenas will leak their fast bin
 1629    entries if max_fast is reduced.
 1630  */
 1631 
 1632 #define set_max_fast(s) \
 1633   global_max_fast = (((s) == 0)                           \
 1634                      ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
 1635 
 1636 static inline INTERNAL_SIZE_T
 1637 get_max_fast (void)
 1638 {
 1639   /* Tell the GCC optimizers that global_max_fast is never larger
 1640      than MAX_FAST_SIZE.  This avoids out-of-bounds array accesses in
 1641      _int_malloc after constant propagation of the size parameter.
 1642      (The code never executes because malloc preserves the
 1643      global_max_fast invariant, but the optimizers may not recognize
 1644      this.)  */
 1645   if (global_max_fast > MAX_FAST_SIZE)
 1646     __builtin_unreachable ();
 1647   return global_max_fast;
 1648 }
 1649 
 1650 /*
 1651    ----------- Internal state representation and initialization -----------
 1652  */
 1653 
 1654 /*
 1655    have_fastchunks indicates that there are probably some fastbin chunks.
 1656    It is set true on entering a chunk into any fastbin, and cleared early in
 1657    malloc_consolidate.  The value is approximate since it may be set when there
 1658    are no fastbin chunks, or it may be clear even if there are fastbin chunks
 1659    available.  Given it's sole purpose is to reduce number of redundant calls to
 1660    malloc_consolidate, it does not affect correctness.  As a result we can safely
 1661    use relaxed atomic accesses.
 1662  */
 1663 
 1664 
 1665 struct malloc_state
 1666 {
 1667   /* Serialize access.  */
 1668   __libc_lock_define (, mutex);
 1669 
 1670   /* Flags (formerly in max_fast).  */
 1671   int flags;
 1672 
 1673   /* Set if the fastbin chunks contain recently inserted free blocks.  */
 1674   /* Note this is a bool but not all targets support atomics on booleans.  */
 1675   int have_fastchunks;
 1676 
 1677   /* Fastbins */
 1678   mfastbinptr fastbinsY[NFASTBINS];
 1679 
 1680   /* Base of the topmost chunk -- not otherwise kept in a bin */
 1681   mchunkptr top;
 1682 
 1683   /* The remainder from the most recent split of a small request */
 1684   mchunkptr last_remainder;
 1685 
 1686   /* Normal bins packed as described above */
 1687   mchunkptr bins[NBINS * 2 - 2];
 1688 
 1689   /* Bitmap of bins */
 1690   unsigned int binmap[BINMAPSIZE];
 1691 
 1692   /* Linked list */
 1693   struct malloc_state *next;
 1694 
 1695   /* Linked list for free arenas.  Access to this field is serialized
 1696      by free_list_lock in arena.c.  */
 1697   struct malloc_state *next_free;
 1698 
 1699   /* Number of threads attached to this arena.  0 if the arena is on
 1700      the free list.  Access to this field is serialized by
 1701      free_list_lock in arena.c.  */
 1702   INTERNAL_SIZE_T attached_threads;
 1703 
 1704   /* Memory allocated from the system in this arena.  */
 1705   INTERNAL_SIZE_T system_mem;
 1706   INTERNAL_SIZE_T max_system_mem;
 1707 };
 1708 
 1709 struct malloc_par
 1710 {
 1711   /* Tunable parameters */
 1712   unsigned long trim_threshold;
 1713   INTERNAL_SIZE_T top_pad;
 1714   INTERNAL_SIZE_T mmap_threshold;
 1715   INTERNAL_SIZE_T arena_test;
 1716   INTERNAL_SIZE_T arena_max;
 1717 
 1718   /* Memory map support */
 1719   int n_mmaps;
 1720   int n_mmaps_max;
 1721   int max_n_mmaps;
 1722   /* the mmap_threshold is dynamic, until the user sets
 1723      it manually, at which point we need to disable any
 1724      dynamic behavior. */
 1725   int no_dyn_threshold;
 1726 
 1727   /* Statistics */
 1728   INTERNAL_SIZE_T mmapped_mem;
 1729   INTERNAL_SIZE_T max_mmapped_mem;
 1730 
 1731   /* First address handed out by MORECORE/sbrk.  */
 1732   char *sbrk_base;
 1733 
 1734 #if USE_TCACHE
 1735   /* Maximum number of buckets to use.  */
 1736   size_t tcache_bins;
 1737   size_t tcache_max_bytes;
 1738   /* Maximum number of chunks in each bucket.  */
 1739   size_t tcache_count;
 1740   /* Maximum number of chunks to remove from the unsorted list, which
 1741      aren't used to prefill the cache.  */
 1742   size_t tcache_unsorted_limit;
 1743 #endif
 1744 };
 1745 
 1746 /* There are several instances of this struct ("arenas") in this
 1747    malloc.  If you are adapting this malloc in a way that does NOT use
 1748    a static or mmapped malloc_state, you MUST explicitly zero-fill it
 1749    before using. This malloc relies on the property that malloc_state
 1750    is initialized to all zeroes (as is true of C statics).  */
 1751 
 1752 static struct malloc_state main_arena =
 1753 {
 1754   .mutex = _LIBC_LOCK_INITIALIZER,
 1755   .next = &main_arena,
 1756   .attached_threads = 1
 1757 };
 1758 
 1759 /* These variables are used for undumping support.  Chunked are marked
 1760    as using mmap, but we leave them alone if they fall into this
 1761    range.  NB: The chunk size for these chunks only includes the
 1762    initial size field (of SIZE_SZ bytes), there is no trailing size
 1763    field (unlike with regular mmapped chunks).  */
 1764 static mchunkptr dumped_main_arena_start; /* Inclusive.  */
 1765 static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
 1766 
 1767 /* True if the pointer falls into the dumped arena.  Use this after
 1768    chunk_is_mmapped indicates a chunk is mmapped.  */
 1769 #define DUMPED_MAIN_ARENA_CHUNK(p) \
 1770   ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
 1771 
 1772 /* There is only one instance of the malloc parameters.  */
 1773 
 1774 static struct malloc_par mp_ =
 1775 {
 1776   .top_pad = DEFAULT_TOP_PAD,
 1777   .n_mmaps_max = DEFAULT_MMAP_MAX,
 1778   .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
 1779   .trim_threshold = DEFAULT_TRIM_THRESHOLD,
 1780 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
 1781   .arena_test = NARENAS_FROM_NCORES (1)
 1782 #if USE_TCACHE
 1783   ,
 1784   .tcache_count = TCACHE_FILL_COUNT,
 1785   .tcache_bins = TCACHE_MAX_BINS,
 1786   .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
 1787   .tcache_unsorted_limit = 0 /* No limit.  */
 1788 #endif
 1789 };
 1790 
 1791 /*
 1792    Initialize a malloc_state struct.
 1793 
 1794    This is called from ptmalloc_init () or from _int_new_arena ()
 1795    when creating a new arena.
 1796  */
 1797 
 1798 static void
 1799 malloc_init_state (mstate av)
 1800 {
 1801   int i;
 1802   mbinptr bin;
 1803 
 1804   /* Establish circular links for normal bins */
 1805   for (i = 1; i < NBINS; ++i)
 1806     {
 1807       bin = bin_at (av, i);
 1808       bin->fd = bin->bk = bin;
 1809     }
 1810 
 1811 #if MORECORE_CONTIGUOUS
 1812   if (av != &main_arena)
 1813 #endif
 1814   set_noncontiguous (av);
 1815   if (av == &main_arena)
 1816     set_max_fast (DEFAULT_MXFAST);
 1817   atomic_store_relaxed (&av->have_fastchunks, false);
 1818 
 1819   av->top = initial_top (av);
 1820 }
 1821 
 1822 /*
 1823    Other internal utilities operating on mstates
 1824  */
 1825 
 1826 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
 1827 static int      systrim (size_t, mstate);
 1828 static void     malloc_consolidate (mstate);
 1829 
 1830 
 1831 /* -------------- Early definitions for debugging hooks ---------------- */
 1832 
 1833 /* Define and initialize the hook variables.  These weak definitions must
 1834    appear before any use of the variables in a function (arena.c uses one).  */
 1835 #ifndef weak_variable
 1836 /* In GNU libc we want the hook variables to be weak definitions to
 1837    avoid a problem with Emacs.  */
 1838 # define weak_variable weak_function
 1839 #endif
 1840 
 1841 /* Forward declarations.  */
 1842 static void *malloc_hook_ini (size_t sz,
 1843                               const void *caller) __THROW;
 1844 static void *realloc_hook_ini (void *ptr, size_t sz,
 1845                                const void *caller) __THROW;
 1846 static void *memalign_hook_ini (size_t alignment, size_t sz,
 1847                                 const void *caller) __THROW;
 1848 
 1849 #if HAVE_MALLOC_INIT_HOOK
 1850 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
 1851 compat_symbol (libc, __malloc_initialize_hook,
 1852            __malloc_initialize_hook, GLIBC_2_0);
 1853 #endif
 1854 
 1855 void weak_variable (*__free_hook) (void *__ptr,
 1856                                    const void *) = NULL;
 1857 void *weak_variable (*__malloc_hook)
 1858   (size_t __size, const void *) = malloc_hook_ini;
 1859 void *weak_variable (*__realloc_hook)
 1860   (void *__ptr, size_t __size, const void *)
 1861   = realloc_hook_ini;
 1862 void *weak_variable (*__memalign_hook)
 1863   (size_t __alignment, size_t __size, const void *)
 1864   = memalign_hook_ini;
 1865 void weak_variable (*__after_morecore_hook) (void) = NULL;
 1866 
 1867 /* This function is called from the arena shutdown hook, to free the
 1868    thread cache (if it exists).  */
 1869 static void tcache_thread_shutdown (void);
 1870 
 1871 /* ------------------ Testing support ----------------------------------*/
 1872 
 1873 static int perturb_byte;
 1874 
 1875 static void
 1876 alloc_perturb (char *p, size_t n)
 1877 {
 1878   if (__glibc_unlikely (perturb_byte))
 1879     memset (p, perturb_byte ^ 0xff, n);
 1880 }
 1881 
 1882 static void
 1883 free_perturb (char *p, size_t n)
 1884 {
 1885   if (__glibc_unlikely (perturb_byte))
 1886     memset (p, perturb_byte, n);
 1887 }
 1888 
 1889 
 1890 
 1891 #include <stap-probe.h>
 1892 
 1893 /* ------------------- Support for multiple arenas -------------------- */
 1894 #include "arena.c"
 1895 
 1896 /*
 1897    Debugging support
 1898 
 1899    These routines make a number of assertions about the states
 1900    of data structures that should be true at all times. If any
 1901    are not true, it's very likely that a user program has somehow
 1902    trashed memory. (It's also possible that there is a coding error
 1903    in malloc. In which case, please report it!)
 1904  */
 1905 
 1906 #if !MALLOC_DEBUG
 1907 
 1908 # define check_chunk(A, P)
 1909 # define check_free_chunk(A, P)
 1910 # define check_inuse_chunk(A, P)
 1911 # define check_remalloced_chunk(A, P, N)
 1912 # define check_malloced_chunk(A, P, N)
 1913 # define check_malloc_state(A)
 1914 
 1915 #else
 1916 
 1917 # define check_chunk(A, P)              do_check_chunk (A, P)
 1918 # define check_free_chunk(A, P)         do_check_free_chunk (A, P)
 1919 # define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
 1920 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
 1921 # define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
 1922 # define check_malloc_state(A)         do_check_malloc_state (A)
 1923 
 1924 /*
 1925    Properties of all chunks
 1926  */
 1927 
 1928 static void
 1929 do_check_chunk (mstate av, mchunkptr p)
 1930 {
 1931   unsigned long sz = chunksize (p);
 1932   /* min and max possible addresses assuming contiguous allocation */
 1933   char *max_address = (char *) (av->top) + chunksize (av->top);
 1934   char *min_address = max_address - av->system_mem;
 1935 
 1936   if (!chunk_is_mmapped (p))
 1937     {
 1938       /* Has legal address ... */
 1939       if (p != av->top)
 1940         {
 1941           if (contiguous (av))
 1942             {
 1943               assert (((char *) p) >= min_address);
 1944               assert (((char *) p + sz) <= ((char *) (av->top)));
 1945             }
 1946         }
 1947       else
 1948         {
 1949           /* top size is always at least MINSIZE */
 1950           assert ((unsigned long) (sz) >= MINSIZE);
 1951           /* top predecessor always marked inuse */
 1952           assert (prev_inuse (p));
 1953         }
 1954     }
 1955   else if (!DUMPED_MAIN_ARENA_CHUNK (p))
 1956     {
 1957       /* address is outside main heap  */
 1958       if (contiguous (av) && av->top != initial_top (av))
 1959         {
 1960           assert (((char *) p) < min_address || ((char *) p) >= max_address);
 1961         }
 1962       /* chunk is page-aligned */
 1963       assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
 1964       /* mem is aligned */
 1965       assert (aligned_OK (chunk2mem (p)));
 1966     }
 1967 }
 1968 
 1969 /*
 1970    Properties of free chunks
 1971  */
 1972 
 1973 static void
 1974 do_check_free_chunk (mstate av, mchunkptr p)
 1975 {
 1976   INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
 1977   mchunkptr next = chunk_at_offset (p, sz);
 1978 
 1979   do_check_chunk (av, p);
 1980 
 1981   /* Chunk must claim to be free ... */
 1982   assert (!inuse (p));
 1983   assert (!chunk_is_mmapped (p));
 1984 
 1985   /* Unless a special marker, must have OK fields */
 1986   if ((unsigned long) (sz) >= MINSIZE)
 1987     {
 1988       assert ((sz & MALLOC_ALIGN_MASK) == 0);
 1989       assert (aligned_OK (chunk2mem (p)));
 1990       /* ... matching footer field */
 1991       assert (prev_size (next_chunk (p)) == sz);
 1992       /* ... and is fully consolidated */
 1993       assert (prev_inuse (p));
 1994       assert (next == av->top || inuse (next));
 1995 
 1996       /* ... and has minimally sane links */
 1997       assert (p->fd->bk == p);
 1998       assert (p->bk->fd == p);
 1999     }
 2000   else /* markers are always of size SIZE_SZ */
 2001     assert (sz == SIZE_SZ);
 2002 }
 2003 
 2004 /*
 2005    Properties of inuse chunks
 2006  */
 2007 
 2008 static void
 2009 do_check_inuse_chunk (mstate av, mchunkptr p)
 2010 {
 2011   mchunkptr next;
 2012 
 2013   do_check_chunk (av, p);
 2014 
 2015   if (chunk_is_mmapped (p))
 2016     return; /* mmapped chunks have no next/prev */
 2017 
 2018   /* Check whether it claims to be in use ... */
 2019   assert (inuse (p));
 2020 
 2021   next = next_chunk (p);
 2022 
 2023   /* ... and is surrounded by OK chunks.
 2024      Since more things can be checked with free chunks than inuse ones,
 2025      if an inuse chunk borders them and debug is on, it's worth doing them.
 2026    */
 2027   if (!prev_inuse (p))
 2028     {
 2029       /* Note that we cannot even look at prev unless it is not inuse */
 2030       mchunkptr prv = prev_chunk (p);
 2031       assert (next_chunk (prv) == p);
 2032       do_check_free_chunk (av, prv);
 2033     }
 2034 
 2035   if (next == av->top)
 2036     {
 2037       assert (prev_inuse (next));
 2038       assert (chunksize (next) >= MINSIZE);
 2039     }
 2040   else if (!inuse (next))
 2041     do_check_free_chunk (av, next);
 2042 }
 2043 
 2044 /*
 2045    Properties of chunks recycled from fastbins
 2046  */
 2047 
 2048 static void
 2049 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
 2050 {
 2051   INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
 2052 
 2053   if (!chunk_is_mmapped (p))
 2054     {
 2055       assert (av == arena_for_chunk (p));
 2056       if (chunk_main_arena (p))
 2057         assert (av == &main_arena);
 2058       else
 2059         assert (av != &main_arena);
 2060     }
 2061 
 2062   do_check_inuse_chunk (av, p);
 2063 
 2064   /* Legal size ... */
 2065   assert ((sz & MALLOC_ALIGN_MASK) == 0);
 2066   assert ((unsigned long) (sz) >= MINSIZE);
 2067   /* ... and alignment */
 2068   assert (aligned_OK (chunk2mem (p)));
 2069   /* chunk is less than MINSIZE more than request */
 2070   assert ((long) (sz) - (long) (s) >= 0);
 2071   assert ((long) (sz) - (long) (s + MINSIZE) < 0);
 2072 }
 2073 
 2074 /*
 2075    Properties of nonrecycled chunks at the point they are malloced
 2076  */
 2077 
 2078 static void
 2079 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
 2080 {
 2081   /* same as recycled case ... */
 2082   do_check_remalloced_chunk (av, p, s);
 2083 
 2084   /*
 2085      ... plus,  must obey implementation invariant that prev_inuse is
 2086      always true of any allocated chunk; i.e., that each allocated
 2087      chunk borders either a previously allocated and still in-use
 2088      chunk, or the base of its memory arena. This is ensured
 2089      by making all allocations from the `lowest' part of any found
 2090      chunk.  This does not necessarily hold however for chunks
 2091      recycled via fastbins.
 2092    */
 2093 
 2094   assert (prev_inuse (p));
 2095 }
 2096 
 2097 
 2098 /*
 2099    Properties of malloc_state.
 2100 
 2101    This may be useful for debugging malloc, as well as detecting user
 2102    programmer errors that somehow write into malloc_state.
 2103 
 2104    If you are extending or experimenting with this malloc, you can
 2105    probably figure out how to hack this routine to print out or
 2106    display chunk addresses, sizes, bins, and other instrumentation.
 2107  */
 2108 
 2109 static void
 2110 do_check_malloc_state (mstate av)
 2111 {
 2112   int i;
 2113   mchunkptr p;
 2114   mchunkptr q;
 2115   mbinptr b;
 2116   unsigned int idx;
 2117   INTERNAL_SIZE_T size;
 2118   unsigned long total = 0;
 2119   int max_fast_bin;
 2120 
 2121   /* internal size_t must be no wider than pointer type */
 2122   assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
 2123 
 2124   /* alignment is a power of 2 */
 2125   assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
 2126 
 2127   /* Check the arena is initialized. */
 2128   assert (av->top != 0);
 2129 
 2130   /* No memory has been allocated yet, so doing more tests is not possible.  */
 2131   if (av->top == initial_top (av))
 2132     return;
 2133 
 2134   /* pagesize is a power of 2 */
 2135   assert (powerof2(GLRO (dl_pagesize)));
 2136 
 2137   /* A contiguous main_arena is consistent with sbrk_base.  */
 2138   if (av == &main_arena && contiguous (av))
 2139     assert ((char *) mp_.sbrk_base + av->system_mem ==
 2140             (char *) av->top + chunksize (av->top));
 2141 
 2142   /* properties of fastbins */
 2143 
 2144   /* max_fast is in allowed range */
 2145   assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
 2146 
 2147   max_fast_bin = fastbin_index (get_max_fast ());
 2148 
 2149   for (i = 0; i < NFASTBINS; ++i)
 2150     {
 2151       p = fastbin (av, i);
 2152 
 2153       /* The following test can only be performed for the main arena.
 2154          While mallopt calls malloc_consolidate to get rid of all fast
 2155          bins (especially those larger than the new maximum) this does
 2156          only happen for the main arena.  Trying to do this for any
 2157          other arena would mean those arenas have to be locked and
 2158          malloc_consolidate be called for them.  This is excessive.  And
 2159          even if this is acceptable to somebody it still cannot solve
 2160          the problem completely since if the arena is locked a
 2161          concurrent malloc call might create a new arena which then
 2162          could use the newly invalid fast bins.  */
 2163 
 2164       /* all bins past max_fast are empty */
 2165       if (av == &main_arena && i > max_fast_bin)
 2166         assert (p == 0);
 2167 
 2168       while (p != 0)
 2169         {
 2170           /* each chunk claims to be inuse */
 2171           do_check_inuse_chunk (av, p);
 2172           total += chunksize (p);
 2173           /* chunk belongs in this bin */
 2174           assert (fastbin_index (chunksize (p)) == i);
 2175           p = p->fd;
 2176         }
 2177     }
 2178 
 2179   /* check normal bins */
 2180   for (i = 1; i < NBINS; ++i)
 2181     {
 2182       b = bin_at (av, i);
 2183 
 2184       /* binmap is accurate (except for bin 1 == unsorted_chunks) */
 2185       if (i >= 2)
 2186         {
 2187           unsigned int binbit = get_binmap (av, i);
 2188           int empty = last (b) == b;
 2189           if (!binbit)
 2190             assert (empty);
 2191           else if (!empty)
 2192             assert (binbit);
 2193         }
 2194 
 2195       for (p = last (b); p != b; p = p->bk)
 2196         {
 2197           /* each chunk claims to be free */
 2198           do_check_free_chunk (av, p);
 2199           size = chunksize (p);
 2200           total += size;
 2201           if (i >= 2)
 2202             {
 2203               /* chunk belongs in bin */
 2204               idx = bin_index (size);
 2205               assert (idx == i);
 2206               /* lists are sorted */
 2207               assert (p->bk == b ||
 2208                       (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
 2209 
 2210               if (!in_smallbin_range (size))
 2211                 {
 2212                   if (p->fd_nextsize != NULL)
 2213                     {
 2214                       if (p->fd_nextsize == p)
 2215                         assert (p->bk_nextsize == p);
 2216                       else
 2217                         {
 2218                           if (p->fd_nextsize == first (b))
 2219                             assert (chunksize (p) < chunksize (p->fd_nextsize));
 2220                           else
 2221                             assert (chunksize (p) > chunksize (p->fd_nextsize));
 2222 
 2223                           if (p == first (b))
 2224                             assert (chunksize (p) > chunksize (p->bk_nextsize));
 2225                           else
 2226                             assert (chunksize (p) < chunksize (p->bk_nextsize));
 2227                         }
 2228                     }
 2229                   else
 2230                     assert (p->bk_nextsize == NULL);
 2231                 }
 2232             }
 2233           else if (!in_smallbin_range (size))
 2234             assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
 2235           /* chunk is followed by a legal chain of inuse chunks */
 2236           for (q = next_chunk (p);
 2237                (q != av->top && inuse (q) &&
 2238                 (unsigned long) (chunksize (q)) >= MINSIZE);
 2239                q = next_chunk (q))
 2240             do_check_inuse_chunk (av, q);
 2241         }
 2242     }
 2243 
 2244   /* top chunk is OK */
 2245   check_chunk (av, av->top);
 2246 }
 2247 #endif
 2248 
 2249 
 2250 /* ----------------- Support for debugging hooks -------------------- */
 2251 #include "hooks.c"
 2252 
 2253 
 2254 /* ----------- Routines dealing with system allocation -------------- */
 2255 
 2256 /*
 2257    sysmalloc handles malloc cases requiring more memory from the system.
 2258    On entry, it is assumed that av->top does not have enough
 2259    space to service request for nb bytes, thus requiring that av->top
 2260    be extended or replaced.
 2261  */
 2262 
 2263 static void *
 2264 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 2265 {
 2266   mchunkptr old_top;              /* incoming value of av->top */
 2267   INTERNAL_SIZE_T old_size;       /* its size */
 2268   char *old_end;                  /* its end address */
 2269 
 2270   long size;                      /* arg to first MORECORE or mmap call */
 2271   char *brk;                      /* return value from MORECORE */
 2272 
 2273   long correction;                /* arg to 2nd MORECORE call */
 2274   char *snd_brk;                  /* 2nd return val */
 2275 
 2276   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
 2277   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
 2278   char *aligned_brk;              /* aligned offset into brk */
 2279 
 2280   mchunkptr p;                    /* the allocated/returned chunk */
 2281   mchunkptr remainder;            /* remainder from allocation */
 2282   unsigned long remainder_size;   /* its size */
 2283 
 2284 
 2285   size_t pagesize = GLRO (dl_pagesize);
 2286   bool tried_mmap = false;
 2287 
 2288 
 2289   /*
 2290      If have mmap, and the request size meets the mmap threshold, and
 2291      the system supports mmap, and there are few enough currently
 2292      allocated mmapped regions, try to directly map this request
 2293      rather than expanding top.
 2294    */
 2295 
 2296   if (av == NULL
 2297       || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
 2298       && (mp_.n_mmaps < mp_.n_mmaps_max)))
 2299     {
 2300       char *mm;           /* return value from mmap call*/
 2301 
 2302     try_mmap:
 2303       /*
 2304          Round up size to nearest page.  For mmapped chunks, the overhead
 2305          is one SIZE_SZ unit larger than for normal chunks, because there
 2306          is no following chunk whose prev_size field could be used.
 2307 
 2308          See the front_misalign handling below, for glibc there is no
 2309          need for further alignments unless we have have high alignment.
 2310        */
 2311       if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2312         size = ALIGN_UP (nb + SIZE_SZ, pagesize);
 2313       else
 2314         size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
 2315       tried_mmap = true;
 2316 
 2317       /* Don't try if size wraps around 0 */
 2318       if ((unsigned long) (size) > (unsigned long) (nb))
 2319         {
 2320           mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
 2321 
 2322           if (mm != MAP_FAILED)
 2323             {
 2324               /*
 2325                  The offset to the start of the mmapped region is stored
 2326                  in the prev_size field of the chunk. This allows us to adjust
 2327                  returned start address to meet alignment requirements here
 2328                  and in memalign(), and still be able to compute proper
 2329                  address argument for later munmap in free() and realloc().
 2330                */
 2331 
 2332               if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2333                 {
 2334                   /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
 2335                      MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
 2336                      aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
 2337                   assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
 2338                   front_misalign = 0;
 2339                 }
 2340               else
 2341                 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
 2342               if (front_misalign > 0)
 2343                 {
 2344                   correction = MALLOC_ALIGNMENT - front_misalign;
 2345                   p = (mchunkptr) (mm + correction);
 2346           set_prev_size (p, correction);
 2347                   set_head (p, (size - correction) | IS_MMAPPED);
 2348                 }
 2349               else
 2350                 {
 2351                   p = (mchunkptr) mm;
 2352           set_prev_size (p, 0);
 2353                   set_head (p, size | IS_MMAPPED);
 2354                 }
 2355 
 2356               /* update statistics */
 2357 
 2358               int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
 2359               atomic_max (&mp_.max_n_mmaps, new);
 2360 
 2361               unsigned long sum;
 2362               sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
 2363               atomic_max (&mp_.max_mmapped_mem, sum);
 2364 
 2365               check_chunk (av, p);
 2366 
 2367               return chunk2mem (p);
 2368             }
 2369         }
 2370     }
 2371 
 2372   /* There are no usable arenas and mmap also failed.  */
 2373   if (av == NULL)
 2374     return 0;
 2375 
 2376   /* Record incoming configuration of top */
 2377 
 2378   old_top = av->top;
 2379   old_size = chunksize (old_top);
 2380   old_end = (char *) (chunk_at_offset (old_top, old_size));
 2381 
 2382   brk = snd_brk = (char *) (MORECORE_FAILURE);
 2383 
 2384   /*
 2385      If not the first time through, we require old_size to be
 2386      at least MINSIZE and to have prev_inuse set.
 2387    */
 2388 
 2389   assert ((old_top == initial_top (av) && old_size == 0) ||
 2390           ((unsigned long) (old_size) >= MINSIZE &&
 2391            prev_inuse (old_top) &&
 2392            ((unsigned long) old_end & (pagesize - 1)) == 0));
 2393 
 2394   /* Precondition: not enough current space to satisfy nb request */
 2395   assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
 2396 
 2397 
 2398   if (av != &main_arena)
 2399     {
 2400       heap_info *old_heap, *heap;
 2401       size_t old_heap_size;
 2402 
 2403       /* First try to extend the current heap. */
 2404       old_heap = heap_for_ptr (old_top);
 2405       old_heap_size = old_heap->size;
 2406       if ((long) (MINSIZE + nb - old_size) > 0
 2407           && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
 2408         {
 2409           av->system_mem += old_heap->size - old_heap_size;
 2410           set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
 2411                     | PREV_INUSE);
 2412         }
 2413       else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
 2414         {
 2415           /* Use a newly allocated heap.  */
 2416           heap->ar_ptr = av;
 2417           heap->prev = old_heap;
 2418           av->system_mem += heap->size;
 2419           /* Set up the new top.  */
 2420           top (av) = chunk_at_offset (heap, sizeof (*heap));
 2421           set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
 2422 
 2423           /* Setup fencepost and free the old top chunk with a multiple of
 2424              MALLOC_ALIGNMENT in size. */
 2425           /* The fencepost takes at least MINSIZE bytes, because it might
 2426              become the top chunk again later.  Note that a footer is set
 2427              up, too, although the chunk is marked in use. */
 2428           old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
 2429           set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
 2430           if (old_size >= MINSIZE)
 2431             {
 2432               set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
 2433               set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
 2434               set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
 2435               _int_free (av, old_top, 1);
 2436             }
 2437           else
 2438             {
 2439               set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
 2440               set_foot (old_top, (old_size + 2 * SIZE_SZ));
 2441             }
 2442         }
 2443       else if (!tried_mmap)
 2444         /* We can at least try to use to mmap memory.  */
 2445         goto try_mmap;
 2446     }
 2447   else     /* av == main_arena */
 2448 
 2449 
 2450     { /* Request enough space for nb + pad + overhead */
 2451       size = nb + mp_.top_pad + MINSIZE;
 2452 
 2453       /*
 2454          If contiguous, we can subtract out existing space that we hope to
 2455          combine with new space. We add it back later only if
 2456          we don't actually get contiguous space.
 2457        */
 2458 
 2459       if (contiguous (av))
 2460         size -= old_size;
 2461 
 2462       /*
 2463          Round to a multiple of page size.
 2464          If MORECORE is not contiguous, this ensures that we only call it
 2465          with whole-page arguments.  And if MORECORE is contiguous and
 2466          this is not first time through, this preserves page-alignment of
 2467          previous calls. Otherwise, we correct to page-align below.
 2468        */
 2469 
 2470       size = ALIGN_UP (size, pagesize);
 2471 
 2472       /*
 2473          Don't try to call MORECORE if argument is so big as to appear
 2474          negative. Note that since mmap takes size_t arg, it may succeed
 2475          below even if we cannot call MORECORE.
 2476        */
 2477 
 2478       if (size > 0)
 2479         {
 2480           brk = (char *) (MORECORE (size));
 2481           LIBC_PROBE (memory_sbrk_more, 2, brk, size);
 2482         }
 2483 
 2484       if (brk != (char *) (MORECORE_FAILURE))
 2485         {
 2486           /* Call the `morecore' hook if necessary.  */
 2487           void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2488           if (__builtin_expect (hook != NULL, 0))
 2489             (*hook)();
 2490         }
 2491       else
 2492         {
 2493           /*
 2494              If have mmap, try using it as a backup when MORECORE fails or
 2495              cannot be used. This is worth doing on systems that have "holes" in
 2496              address space, so sbrk cannot extend to give contiguous space, but
 2497              space is available elsewhere.  Note that we ignore mmap max count
 2498              and threshold limits, since the space will not be used as a
 2499              segregated mmap region.
 2500            */
 2501 
 2502           /* Cannot merge with old top, so add its size back in */
 2503           if (contiguous (av))
 2504             size = ALIGN_UP (size + old_size, pagesize);
 2505 
 2506           /* If we are relying on mmap as backup, then use larger units */
 2507           if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
 2508             size = MMAP_AS_MORECORE_SIZE;
 2509 
 2510           /* Don't try if size wraps around 0 */
 2511           if ((unsigned long) (size) > (unsigned long) (nb))
 2512             {
 2513               char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
 2514 
 2515               if (mbrk != MAP_FAILED)
 2516                 {
 2517                   /* We do not need, and cannot use, another sbrk call to find end */
 2518                   brk = mbrk;
 2519                   snd_brk = brk + size;
 2520 
 2521                   /*
 2522                      Record that we no longer have a contiguous sbrk region.
 2523                      After the first time mmap is used as backup, we do not
 2524                      ever rely on contiguous space since this could incorrectly
 2525                      bridge regions.
 2526                    */
 2527                   set_noncontiguous (av);
 2528                 }
 2529             }
 2530         }
 2531 
 2532       if (brk != (char *) (MORECORE_FAILURE))
 2533         {
 2534           if (mp_.sbrk_base == 0)
 2535             mp_.sbrk_base = brk;
 2536           av->system_mem += size;
 2537 
 2538           /*
 2539              If MORECORE extends previous space, we can likewise extend top size.
 2540            */
 2541 
 2542           if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
 2543             set_head (old_top, (size + old_size) | PREV_INUSE);
 2544 
 2545           else if (contiguous (av) && old_size && brk < old_end)
 2546         /* Oops!  Someone else killed our space..  Can't touch anything.  */
 2547         malloc_printerr ("break adjusted to free malloc space");
 2548 
 2549           /*
 2550              Otherwise, make adjustments:
 2551 
 2552            * If the first time through or noncontiguous, we need to call sbrk
 2553               just to find out where the end of memory lies.
 2554 
 2555            * We need to ensure that all returned chunks from malloc will meet
 2556               MALLOC_ALIGNMENT
 2557 
 2558            * If there was an intervening foreign sbrk, we need to adjust sbrk
 2559               request size to account for fact that we will not be able to
 2560               combine new space with existing space in old_top.
 2561 
 2562            * Almost all systems internally allocate whole pages at a time, in
 2563               which case we might as well use the whole last page of request.
 2564               So we allocate enough more memory to hit a page boundary now,
 2565               which in turn causes future contiguous calls to page-align.
 2566            */
 2567 
 2568           else
 2569             {
 2570               front_misalign = 0;
 2571               end_misalign = 0;
 2572               correction = 0;
 2573               aligned_brk = brk;
 2574 
 2575               /* handle contiguous cases */
 2576               if (contiguous (av))
 2577                 {
 2578                   /* Count foreign sbrk as system_mem.  */
 2579                   if (old_size)
 2580                     av->system_mem += brk - old_end;
 2581 
 2582                   /* Guarantee alignment of first new chunk made from this space */
 2583 
 2584                   front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
 2585                   if (front_misalign > 0)
 2586                     {
 2587                       /*
 2588                          Skip over some bytes to arrive at an aligned position.
 2589                          We don't need to specially mark these wasted front bytes.
 2590                          They will never be accessed anyway because
 2591                          prev_inuse of av->top (and any chunk created from its start)
 2592                          is always true after initialization.
 2593                        */
 2594 
 2595                       correction = MALLOC_ALIGNMENT - front_misalign;
 2596                       aligned_brk += correction;
 2597                     }
 2598 
 2599                   /*
 2600                      If this isn't adjacent to existing space, then we will not
 2601                      be able to merge with old_top space, so must add to 2nd request.
 2602                    */
 2603 
 2604                   correction += old_size;
 2605 
 2606                   /* Extend the end address to hit a page boundary */
 2607                   end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
 2608                   correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
 2609 
 2610                   assert (correction >= 0);
 2611                   snd_brk = (char *) (MORECORE (correction));
 2612 
 2613                   /*
 2614                      If can't allocate correction, try to at least find out current
 2615                      brk.  It might be enough to proceed without failing.
 2616 
 2617                      Note that if second sbrk did NOT fail, we assume that space
 2618                      is contiguous with first sbrk. This is a safe assumption unless
 2619                      program is multithreaded but doesn't use locks and a foreign sbrk
 2620                      occurred between our first and second calls.
 2621                    */
 2622 
 2623                   if (snd_brk == (char *) (MORECORE_FAILURE))
 2624                     {
 2625                       correction = 0;
 2626                       snd_brk = (char *) (MORECORE (0));
 2627                     }
 2628                   else
 2629                     {
 2630                       /* Call the `morecore' hook if necessary.  */
 2631                       void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2632                       if (__builtin_expect (hook != NULL, 0))
 2633                         (*hook)();
 2634                     }
 2635                 }
 2636 
 2637               /* handle non-contiguous cases */
 2638               else
 2639                 {
 2640                   if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
 2641                     /* MORECORE/mmap must correctly align */
 2642                     assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
 2643                   else
 2644                     {
 2645                       front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
 2646                       if (front_misalign > 0)
 2647                         {
 2648                           /*
 2649                              Skip over some bytes to arrive at an aligned position.
 2650                              We don't need to specially mark these wasted front bytes.
 2651                              They will never be accessed anyway because
 2652                              prev_inuse of av->top (and any chunk created from its start)
 2653                              is always true after initialization.
 2654                            */
 2655 
 2656                           aligned_brk += MALLOC_ALIGNMENT - front_misalign;
 2657                         }
 2658                     }
 2659 
 2660                   /* Find out current end of memory */
 2661                   if (snd_brk == (char *) (MORECORE_FAILURE))
 2662                     {
 2663                       snd_brk = (char *) (MORECORE (0));
 2664                     }
 2665                 }
 2666 
 2667               /* Adjust top based on results of second sbrk */
 2668               if (snd_brk != (char *) (MORECORE_FAILURE))
 2669                 {
 2670                   av->top = (mchunkptr) aligned_brk;
 2671                   set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
 2672                   av->system_mem += correction;
 2673 
 2674                   /*
 2675                      If not the first time through, we either have a
 2676                      gap due to foreign sbrk or a non-contiguous region.  Insert a
 2677                      double fencepost at old_top to prevent consolidation with space
 2678                      we don't own. These fenceposts are artificial chunks that are
 2679                      marked as inuse and are in any case too small to use.  We need
 2680                      two to make sizes and alignments work out.
 2681                    */
 2682 
 2683                   if (old_size != 0)
 2684                     {
 2685                       /*
 2686                          Shrink old_top to insert fenceposts, keeping size a
 2687                          multiple of MALLOC_ALIGNMENT. We know there is at least
 2688                          enough space in old_top to do this.
 2689                        */
 2690                       old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
 2691                       set_head (old_top, old_size | PREV_INUSE);
 2692 
 2693                       /*
 2694                          Note that the following assignments completely overwrite
 2695                          old_top when old_size was previously MINSIZE.  This is
 2696                          intentional. We need the fencepost, even if old_top otherwise gets
 2697                          lost.
 2698                        */
 2699               set_head (chunk_at_offset (old_top, old_size),
 2700                 (2 * SIZE_SZ) | PREV_INUSE);
 2701               set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
 2702                 (2 * SIZE_SZ) | PREV_INUSE);
 2703 
 2704                       /* If possible, release the rest. */
 2705                       if (old_size >= MINSIZE)
 2706                         {
 2707                           _int_free (av, old_top, 1);
 2708                         }
 2709                     }
 2710                 }
 2711             }
 2712         }
 2713     } /* if (av !=  &main_arena) */
 2714 
 2715   if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
 2716     av->max_system_mem = av->system_mem;
 2717   check_malloc_state (av);
 2718 
 2719   /* finally, do the allocation */
 2720   p = av->top;
 2721   size = chunksize (p);
 2722 
 2723   /* check that one of the above allocation paths succeeded */
 2724   if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
 2725     {
 2726       remainder_size = size - nb;
 2727       remainder = chunk_at_offset (p, nb);
 2728       av->top = remainder;
 2729       set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
 2730       set_head (remainder, remainder_size | PREV_INUSE);
 2731       check_malloced_chunk (av, p, nb);
 2732       return chunk2mem (p);
 2733     }
 2734 
 2735   /* catch all failure paths */
 2736   __set_errno (ENOMEM);
 2737   return 0;
 2738 }
 2739 
 2740 
 2741 /*
 2742    systrim is an inverse of sorts to sysmalloc.  It gives memory back
 2743    to the system (via negative arguments to sbrk) if there is unused
 2744    memory at the `high' end of the malloc pool. It is called
 2745    automatically by free() when top space exceeds the trim
 2746    threshold. It is also called by the public malloc_trim routine.  It
 2747    returns 1 if it actually released any memory, else 0.
 2748  */
 2749 
 2750 static int
 2751 systrim (size_t pad, mstate av)
 2752 {
 2753   long top_size;         /* Amount of top-most memory */
 2754   long extra;            /* Amount to release */
 2755   long released;         /* Amount actually released */
 2756   char *current_brk;     /* address returned by pre-check sbrk call */
 2757   char *new_brk;         /* address returned by post-check sbrk call */
 2758   size_t pagesize;
 2759   long top_area;
 2760 
 2761   pagesize = GLRO (dl_pagesize);
 2762   top_size = chunksize (av->top);
 2763 
 2764   top_area = top_size - MINSIZE - 1;
 2765   if (top_area <= pad)
 2766     return 0;
 2767 
 2768   /* Release in pagesize units and round down to the nearest page.  */
 2769   extra = ALIGN_DOWN(top_area - pad, pagesize);
 2770 
 2771   if (extra == 0)
 2772     return 0;
 2773 
 2774   /*
 2775      Only proceed if end of memory is where we last set it.
 2776      This avoids problems if there were foreign sbrk calls.
 2777    */
 2778   current_brk = (char *) (MORECORE (0));
 2779   if (current_brk == (char *) (av->top) + top_size)
 2780     {
 2781       /*
 2782          Attempt to release memory. We ignore MORECORE return value,
 2783          and instead call again to find out where new end of memory is.
 2784          This avoids problems if first call releases less than we asked,
 2785          of if failure somehow altered brk value. (We could still
 2786          encounter problems if it altered brk in some very bad way,
 2787          but the only thing we can do is adjust anyway, which will cause
 2788          some downstream failure.)
 2789        */
 2790 
 2791       MORECORE (-extra);
 2792       /* Call the `morecore' hook if necessary.  */
 2793       void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
 2794       if (__builtin_expect (hook != NULL, 0))
 2795         (*hook)();
 2796       new_brk = (char *) (MORECORE (0));
 2797 
 2798       LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
 2799 
 2800       if (new_brk != (char *) MORECORE_FAILURE)
 2801         {
 2802           released = (long) (current_brk - new_brk);
 2803 
 2804           if (released != 0)
 2805             {
 2806               /* Success. Adjust top. */
 2807               av->system_mem -= released;
 2808               set_head (av->top, (top_size - released) | PREV_INUSE);
 2809               check_malloc_state (av);
 2810               return 1;
 2811             }
 2812         }
 2813     }
 2814   return 0;
 2815 }
 2816 
 2817 static void
 2818 munmap_chunk (mchunkptr p)
 2819 {
 2820   size_t pagesize = GLRO (dl_pagesize);
 2821   INTERNAL_SIZE_T size = chunksize (p);
 2822 
 2823   assert (chunk_is_mmapped (p));
 2824 
 2825   /* Do nothing if the chunk is a faked mmapped chunk in the dumped
 2826      main arena.  We never free this memory.  */
 2827   if (DUMPED_MAIN_ARENA_CHUNK (p))
 2828     return;
 2829 
 2830   uintptr_t mem = (uintptr_t) chunk2mem (p);
 2831   uintptr_t block = (uintptr_t) p - prev_size (p);
 2832   size_t total_size = prev_size (p) + size;
 2833   /* Unfortunately we have to do the compilers job by hand here.  Normally
 2834      we would test BLOCK and TOTAL-SIZE separately for compliance with the
 2835      page size.  But gcc does not recognize the optimization possibility
 2836      (in the moment at least) so we combine the two values into one before
 2837      the bit test.  */
 2838   if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
 2839       || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
 2840     malloc_printerr ("munmap_chunk(): invalid pointer");
 2841 
 2842   atomic_decrement (&mp_.n_mmaps);
 2843   atomic_add (&mp_.mmapped_mem, -total_size);
 2844 
 2845   /* If munmap failed the process virtual memory address space is in a
 2846      bad shape.  Just leave the block hanging around, the process will
 2847      terminate shortly anyway since not much can be done.  */
 2848   __munmap ((char *) block, total_size);
 2849 }
 2850 
 2851 #if HAVE_MREMAP
 2852 
 2853 static mchunkptr
 2854 mremap_chunk (mchunkptr p, size_t new_size)
 2855 {
 2856   size_t pagesize = GLRO (dl_pagesize);
 2857   INTERNAL_SIZE_T offset = prev_size (p);
 2858   INTERNAL_SIZE_T size = chunksize (p);
 2859   char *cp;
 2860 
 2861   assert (chunk_is_mmapped (p));
 2862 
 2863   uintptr_t block = (uintptr_t) p - offset;
 2864   uintptr_t mem = (uintptr_t) chunk2mem(p);
 2865   size_t total_size = offset + size;
 2866   if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
 2867       || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
 2868     malloc_printerr("mremap_chunk(): invalid pointer");
 2869 
 2870   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
 2871   new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
 2872 
 2873   /* No need to remap if the number of pages does not change.  */
 2874   if (total_size == new_size)
 2875     return p;
 2876 
 2877   cp = (char *) __mremap ((char *) block, total_size, new_size,
 2878                           MREMAP_MAYMOVE);
 2879 
 2880   if (cp == MAP_FAILED)
 2881     return 0;
 2882 
 2883   p = (mchunkptr) (cp + offset);
 2884 
 2885   assert (aligned_OK (chunk2mem (p)));
 2886 
 2887   assert (prev_size (p) == offset);
 2888   set_head (p, (new_size - offset) | IS_MMAPPED);
 2889 
 2890   INTERNAL_SIZE_T new;
 2891   new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
 2892         + new_size - size - offset;
 2893   atomic_max (&mp_.max_mmapped_mem, new);
 2894   return p;
 2895 }
 2896 #endif /* HAVE_MREMAP */
 2897 
 2898 /*------------------------ Public wrappers. --------------------------------*/
 2899 
 2900 #if USE_TCACHE
 2901 
 2902 /* We overlay this structure on the user-data portion of a chunk when
 2903    the chunk is stored in the per-thread cache.  */
 2904 typedef struct tcache_entry
 2905 {
 2906   struct tcache_entry *next;
 2907   /* This field exists to detect double frees.  */
 2908   struct tcache_perthread_struct *key;
 2909 } tcache_entry;
 2910 
 2911 /* There is one of these for each thread, which contains the
 2912    per-thread cache (hence "tcache_perthread_struct").  Keeping
 2913    overall size low is mildly important.  Note that COUNTS and ENTRIES
 2914    are redundant (we could have just counted the linked list each
 2915    time), this is for performance reasons.  */
 2916 typedef struct tcache_perthread_struct
 2917 {
 2918   char counts[TCACHE_MAX_BINS];
 2919   tcache_entry *entries[TCACHE_MAX_BINS];
 2920 } tcache_perthread_struct;
 2921 
 2922 static __thread bool tcache_shutting_down = false;
 2923 static __thread tcache_perthread_struct *tcache = NULL;
 2924 
 2925 /* Caller must ensure that we know tc_idx is valid and there's room
 2926    for more chunks.  */
 2927 static __always_inline void
 2928 tcache_put (mchunkptr chunk, size_t tc_idx)
 2929 {
 2930   tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
 2931   assert (tc_idx < TCACHE_MAX_BINS);
 2932 
 2933   /* Mark this chunk as "in the tcache" so the test in _int_free will
 2934      detect a double free.  */
 2935   e->key = tcache;
 2936 
 2937   e->next = tcache->entries[tc_idx];
 2938   tcache->entries[tc_idx] = e;
 2939   ++(tcache->counts[tc_idx]);
 2940 }
 2941 
 2942 /* Caller must ensure that we know tc_idx is valid and there's
 2943    available chunks to remove.  */
 2944 static __always_inline void *
 2945 tcache_get (size_t tc_idx)
 2946 {
 2947   tcache_entry *e = tcache->entries[tc_idx];
 2948   assert (tc_idx < TCACHE_MAX_BINS);
 2949   assert (tcache->entries[tc_idx] > 0);
 2950   tcache->entries[tc_idx] = e->next;
 2951   --(tcache->counts[tc_idx]);
 2952   e->key = NULL;
 2953   return (void *) e;
 2954 }
 2955 
 2956 static void
 2957 tcache_thread_shutdown (void)
 2958 {
 2959   int i;
 2960   tcache_perthread_struct *tcache_tmp = tcache;
 2961 
 2962   if (!tcache)
 2963     return;
 2964 
 2965   /* Disable the tcache and prevent it from being reinitialized.  */
 2966   tcache = NULL;
 2967   tcache_shutting_down = true;
 2968 
 2969   /* Free all of the entries and the tcache itself back to the arena
 2970      heap for coalescing.  */
 2971   for (i = 0; i < TCACHE_MAX_BINS; ++i)
 2972     {
 2973       while (tcache_tmp->entries[i])
 2974     {
 2975       tcache_entry *e = tcache_tmp->entries[i];
 2976       tcache_tmp->entries[i] = e->next;
 2977       __libc_free (e);
 2978     }
 2979     }
 2980 
 2981   __libc_free (tcache_tmp);
 2982 }
 2983 
 2984 static void
 2985 tcache_init(void)
 2986 {
 2987   mstate ar_ptr;
 2988   void *victim = 0;
 2989   const size_t bytes = sizeof (tcache_perthread_struct);
 2990 
 2991   if (tcache_shutting_down)
 2992     return;
 2993 
 2994   arena_get (ar_ptr, bytes);
 2995   victim = _int_malloc (ar_ptr, bytes);
 2996   if (!victim && ar_ptr != NULL)
 2997     {
 2998       ar_ptr = arena_get_retry (ar_ptr, bytes);
 2999       victim = _int_malloc (ar_ptr, bytes);
 3000     }
 3001 
 3002 
 3003   if (ar_ptr != NULL)
 3004     __libc_lock_unlock (ar_ptr->mutex);
 3005 
 3006   /* In a low memory situation, we may not be able to allocate memory
 3007      - in which case, we just keep trying later.  However, we
 3008      typically do this very early, so either there is sufficient
 3009      memory, or there isn't enough memory to do non-trivial
 3010      allocations anyway.  */
 3011   if (victim)
 3012     {
 3013       tcache = (tcache_perthread_struct *) victim;
 3014       memset (tcache, 0, sizeof (tcache_perthread_struct));
 3015     }
 3016 
 3017 }
 3018 
 3019 # define MAYBE_INIT_TCACHE() \
 3020   if (__glibc_unlikely (tcache == NULL)) \
 3021     tcache_init();
 3022 
 3023 #else  /* !USE_TCACHE */
 3024 # define MAYBE_INIT_TCACHE()
 3025 
 3026 static void
 3027 tcache_thread_shutdown (void)
 3028 {
 3029   /* Nothing to do if there is no thread cache.  */
 3030 }
 3031 
 3032 #endif /* !USE_TCACHE  */
 3033 
 3034 void *
 3035 __libc_malloc (size_t bytes)
 3036 {
 3037   mstate ar_ptr;
 3038   void *victim;
 3039 
 3040   void *(*hook) (size_t, const void *)
 3041     = atomic_forced_read (__malloc_hook);
 3042   if (__builtin_expect (hook != NULL, 0))
 3043     return (*hook)(bytes, RETURN_ADDRESS (0));
 3044 #if USE_TCACHE
 3045   /* int_free also calls request2size, be careful to not pad twice.  */
 3046   size_t tbytes;
 3047   checked_request2size (bytes, tbytes);
 3048   size_t tc_idx = csize2tidx (tbytes);
 3049 
 3050   MAYBE_INIT_TCACHE ();
 3051 
 3052   DIAG_PUSH_NEEDS_COMMENT;
 3053   if (tc_idx < mp_.tcache_bins
 3054       /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
 3055       && tcache
 3056       && tcache->entries[tc_idx] != NULL)
 3057     {
 3058       return tcache_get (tc_idx);
 3059     }
 3060   DIAG_POP_NEEDS_COMMENT;
 3061 #endif
 3062 
 3063   if (SINGLE_THREAD_P)
 3064     {
 3065       victim = _int_malloc (&main_arena, bytes);
 3066       assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
 3067           &main_arena == arena_for_chunk (mem2chunk (victim)));
 3068       return victim;
 3069     }
 3070 
 3071   arena_get (ar_ptr, bytes);
 3072 
 3073   victim = _int_malloc (ar_ptr, bytes);
 3074   /* Retry with another arena only if we were able to find a usable arena
 3075      before.  */
 3076   if (!victim && ar_ptr != NULL)
 3077     {
 3078       LIBC_PROBE (memory_malloc_retry, 1, bytes);
 3079       ar_ptr = arena_get_retry (ar_ptr, bytes);
 3080       victim = _int_malloc (ar_ptr, bytes);
 3081     }
 3082 
 3083   if (ar_ptr != NULL)
 3084     __libc_lock_unlock (ar_ptr->mutex);
 3085 
 3086   assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
 3087           ar_ptr == arena_for_chunk (mem2chunk (victim)));
 3088   return victim;
 3089 }
 3090 libc_hidden_def (__libc_malloc)
 3091 
 3092 void
 3093 __libc_free (void *mem)
 3094 {
 3095   mstate ar_ptr;
 3096   mchunkptr p;                          /* chunk corresponding to mem */
 3097 
 3098   void (*hook) (void *, const void *)
 3099     = atomic_forced_read (__free_hook);
 3100   if (__builtin_expect (hook != NULL, 0))
 3101     {
 3102       (*hook)(mem, RETURN_ADDRESS (0));
 3103       return;
 3104     }
 3105 
 3106   if (mem == 0)                              /* free(0) has no effect */
 3107     return;
 3108 
 3109   p = mem2chunk (mem);
 3110 
 3111   if (chunk_is_mmapped (p))                       /* release mmapped memory. */
 3112     {
 3113       /* See if the dynamic brk/mmap threshold needs adjusting.
 3114      Dumped fake mmapped chunks do not affect the threshold.  */
 3115       if (!mp_.no_dyn_threshold
 3116           && chunksize_nomask (p) > mp_.mmap_threshold
 3117           && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
 3118       && !DUMPED_MAIN_ARENA_CHUNK (p))
 3119         {
 3120           mp_.mmap_threshold = chunksize (p);
 3121           mp_.trim_threshold = 2 * mp_.mmap_threshold;
 3122           LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
 3123                       mp_.mmap_threshold, mp_.trim_threshold);
 3124         }
 3125       munmap_chunk (p);
 3126       return;
 3127     }
 3128 
 3129   MAYBE_INIT_TCACHE ();
 3130 
 3131   ar_ptr = arena_for_chunk (p);
 3132   _int_free (ar_ptr, p, 0);
 3133 }
 3134 libc_hidden_def (__libc_free)
 3135 
 3136 void *
 3137 __libc_realloc (void *oldmem, size_t bytes)
 3138 {
 3139   mstate ar_ptr;
 3140   INTERNAL_SIZE_T nb;         /* padded request size */
 3141 
 3142   void *newp;             /* chunk to return */
 3143 
 3144   void *(*hook) (void *, size_t, const void *) =
 3145     atomic_forced_read (__realloc_hook);
 3146   if (__builtin_expect (hook != NULL, 0))
 3147     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
 3148 
 3149 #if REALLOC_ZERO_BYTES_FREES
 3150   if (bytes == 0 && oldmem != NULL)
 3151     {
 3152       __libc_free (oldmem); return 0;
 3153     }
 3154 #endif
 3155 
 3156   /* realloc of null is supposed to be same as malloc */
 3157   if (oldmem == 0)
 3158     return __libc_malloc (bytes);
 3159 
 3160   /* chunk corresponding to oldmem */
 3161   const mchunkptr oldp = mem2chunk (oldmem);
 3162   /* its size */
 3163   const INTERNAL_SIZE_T oldsize = chunksize (oldp);
 3164 
 3165   if (chunk_is_mmapped (oldp))
 3166     ar_ptr = NULL;
 3167   else
 3168     {
 3169       MAYBE_INIT_TCACHE ();
 3170       ar_ptr = arena_for_chunk (oldp);
 3171     }
 3172 
 3173   /* Little security check which won't hurt performance: the allocator
 3174      never wrapps around at the end of the address space.  Therefore
 3175      we can exclude some size values which might appear here by
 3176      accident or by "design" from some intruder.  We need to bypass
 3177      this check for dumped fake mmap chunks from the old main arena
 3178      because the new malloc may provide additional alignment.  */
 3179   if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
 3180        || __builtin_expect (misaligned_chunk (oldp), 0))
 3181       && !DUMPED_MAIN_ARENA_CHUNK (oldp))
 3182       malloc_printerr ("realloc(): invalid pointer");
 3183 
 3184   checked_request2size (bytes, nb);
 3185 
 3186   if (chunk_is_mmapped (oldp))
 3187     {
 3188       /* If this is a faked mmapped chunk from the dumped main arena,
 3189      always make a copy (and do not free the old chunk).  */
 3190       if (DUMPED_MAIN_ARENA_CHUNK (oldp))
 3191     {
 3192       /* Must alloc, copy, free. */
 3193       void *newmem = __libc_malloc (bytes);
 3194       if (newmem == 0)
 3195         return NULL;
 3196       /* Copy as many bytes as are available from the old chunk
 3197          and fit into the new size.  NB: The overhead for faked
 3198          mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
 3199          regular mmapped chunks.  */
 3200       if (bytes > oldsize - SIZE_SZ)
 3201         bytes = oldsize - SIZE_SZ;
 3202       memcpy (newmem, oldmem, bytes);
 3203       return newmem;
 3204     }
 3205 
 3206       void *newmem;
 3207 
 3208 #if HAVE_MREMAP
 3209       newp = mremap_chunk (oldp, nb);
 3210       if (newp)
 3211         return chunk2mem (newp);
 3212 #endif
 3213       /* Note the extra SIZE_SZ overhead. */
 3214       if (oldsize - SIZE_SZ >= nb)
 3215         return oldmem;                         /* do nothing */
 3216 
 3217       /* Must alloc, copy, free. */
 3218       newmem = __libc_malloc (bytes);
 3219       if (newmem == 0)
 3220         return 0;              /* propagate failure */
 3221 
 3222       memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
 3223       munmap_chunk (oldp);
 3224       return newmem;
 3225     }
 3226 
 3227   if (SINGLE_THREAD_P)
 3228     {
 3229       newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
 3230       assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
 3231           ar_ptr == arena_for_chunk (mem2chunk (newp)));
 3232 
 3233       return newp;
 3234     }
 3235 
 3236   __libc_lock_lock (ar_ptr->mutex);
 3237 
 3238   newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
 3239 
 3240   __libc_lock_unlock (ar_ptr->mutex);
 3241   assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
 3242           ar_ptr == arena_for_chunk (mem2chunk (newp)));
 3243 
 3244   if (newp == NULL)
 3245     {
 3246       /* Try harder to allocate memory in other arenas.  */
 3247       LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
 3248       newp = __libc_malloc (bytes);
 3249       if (newp != NULL)
 3250         {
 3251           memcpy (newp, oldmem, oldsize - SIZE_SZ);
 3252           _int_free (ar_ptr, oldp, 0);
 3253         }
 3254     }
 3255 
 3256   return newp;
 3257 }
 3258 libc_hidden_def (__libc_realloc)
 3259 
 3260 void *
 3261 __libc_memalign (size_t alignment, size_t bytes)
 3262 {
 3263   void *address = RETURN_ADDRESS (0);
 3264   return _mid_memalign (alignment, bytes, address);
 3265 }
 3266 
 3267 static void *
 3268 _mid_memalign (size_t alignment, size_t bytes, void *address)
 3269 {
 3270   mstate ar_ptr;
 3271   void *p;
 3272 
 3273   void *(*hook) (size_t, size_t, const void *) =
 3274     atomic_forced_read (__memalign_hook);
 3275   if (__builtin_expect (hook != NULL, 0))
 3276     return (*hook)(alignment, bytes, address);
 3277 
 3278   /* If we need less alignment than we give anyway, just relay to malloc.  */
 3279   if (alignment <= MALLOC_ALIGNMENT)
 3280     return __libc_malloc (bytes);
 3281 
 3282   /* Otherwise, ensure that it is at least a minimum chunk size */
 3283   if (alignment < MINSIZE)
 3284     alignment = MINSIZE;
 3285 
 3286   /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
 3287      power of 2 and will cause overflow in the check below.  */
 3288   if (alignment > SIZE_MAX / 2 + 1)
 3289     {
 3290       __set_errno (EINVAL);
 3291       return 0;
 3292     }
 3293 
 3294   /* Check for overflow.  */
 3295   if (bytes > SIZE_MAX - alignment - MINSIZE)
 3296     {
 3297       __set_errno (ENOMEM);
 3298       return 0;
 3299     }
 3300 
 3301 
 3302   /* Make sure alignment is power of 2.  */
 3303   if (!powerof2 (alignment))
 3304     {
 3305       size_t a = MALLOC_ALIGNMENT * 2;
 3306       while (a < alignment)
 3307         a <<= 1;
 3308       alignment = a;
 3309     }
 3310 
 3311   if (SINGLE_THREAD_P)
 3312     {
 3313       p = _int_memalign (&main_arena, alignment, bytes);
 3314       assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
 3315           &main_arena == arena_for_chunk (mem2chunk (p)));
 3316 
 3317       return p;
 3318     }
 3319 
 3320   arena_get (ar_ptr, bytes + alignment + MINSIZE);
 3321 
 3322   p = _int_memalign (ar_ptr, alignment, bytes);
 3323   if (!p && ar_ptr != NULL)
 3324     {
 3325       LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
 3326       ar_ptr = arena_get_retry (ar_ptr, bytes);
 3327       p = _int_memalign (ar_ptr, alignment, bytes);
 3328     }
 3329 
 3330   if (ar_ptr != NULL)
 3331     __libc_lock_unlock (ar_ptr->mutex);
 3332 
 3333   assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
 3334           ar_ptr == arena_for_chunk (mem2chunk (p)));
 3335   return p;
 3336 }
 3337 /* For ISO C11.  */
 3338 weak_alias (__libc_memalign, aligned_alloc)
 3339 libc_hidden_def (__libc_memalign)
 3340 
 3341 void *
 3342 __libc_valloc (size_t bytes)
 3343 {
 3344   if (__malloc_initialized < 0)
 3345     ptmalloc_init ();
 3346 
 3347   void *address = RETURN_ADDRESS (0);
 3348   size_t pagesize = GLRO (dl_pagesize);
 3349   return _mid_memalign (pagesize, bytes, address);
 3350 }
 3351 
 3352 void *
 3353 __libc_pvalloc (size_t bytes)
 3354 {
 3355   if (__malloc_initialized < 0)
 3356     ptmalloc_init ();
 3357 
 3358   void *address = RETURN_ADDRESS (0);
 3359   size_t pagesize = GLRO (dl_pagesize);
 3360   size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
 3361 
 3362   /* Check for overflow.  */
 3363   if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
 3364     {
 3365       __set_errno (ENOMEM);
 3366       return 0;
 3367     }
 3368 
 3369   return _mid_memalign (pagesize, rounded_bytes, address);
 3370 }
 3371 
 3372 void *
 3373 __libc_calloc (size_t n, size_t elem_size)
 3374 {
 3375   mstate av;
 3376   mchunkptr oldtop, p;
 3377   INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
 3378   void *mem;
 3379   unsigned long clearsize;
 3380   unsigned long nclears;
 3381   INTERNAL_SIZE_T *d;
 3382 
 3383   /* size_t is unsigned so the behavior on overflow is defined.  */
 3384   bytes = n * elem_size;
 3385 #define HALF_INTERNAL_SIZE_T \
 3386   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
 3387   if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
 3388     {
 3389       if (elem_size != 0 && bytes / elem_size != n)
 3390         {
 3391           __set_errno (ENOMEM);
 3392           return 0;
 3393         }
 3394     }
 3395 
 3396   void *(*hook) (size_t, const void *) =
 3397     atomic_forced_read (__malloc_hook);
 3398   if (__builtin_expect (hook != NULL, 0))
 3399     {
 3400       sz = bytes;
 3401       mem = (*hook)(sz, RETURN_ADDRESS (0));
 3402       if (mem == 0)
 3403         return 0;
 3404 
 3405       return memset (mem, 0, sz);
 3406     }
 3407 
 3408   sz = bytes;
 3409 
 3410   MAYBE_INIT_TCACHE ();
 3411 
 3412   if (SINGLE_THREAD_P)
 3413     av = &main_arena;
 3414   else
 3415     arena_get (av, sz);
 3416 
 3417   if (av)
 3418     {
 3419       /* Check if we hand out the top chunk, in which case there may be no
 3420      need to clear. */
 3421 #if MORECORE_CLEARS
 3422       oldtop = top (av);
 3423       oldtopsize = chunksize (top (av));
 3424 # if MORECORE_CLEARS < 2
 3425       /* Only newly allocated memory is guaranteed to be cleared.  */
 3426       if (av == &main_arena &&
 3427       oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
 3428     oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
 3429 # endif
 3430       if (av != &main_arena)
 3431     {
 3432       heap_info *heap = heap_for_ptr (oldtop);
 3433       if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
 3434         oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
 3435     }
 3436 #endif
 3437     }
 3438   else
 3439     {
 3440       /* No usable arenas.  */
 3441       oldtop = 0;
 3442       oldtopsize = 0;
 3443     }
 3444   mem = _int_malloc (av, sz);
 3445 
 3446   assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
 3447           av == arena_for_chunk (mem2chunk (mem)));
 3448 
 3449   if (!SINGLE_THREAD_P)
 3450     {
 3451       if (mem == 0 && av != NULL)
 3452     {
 3453       LIBC_PROBE (memory_calloc_retry, 1, sz);
 3454       av = arena_get_retry (av, sz);
 3455       mem = _int_malloc (av, sz);
 3456     }
 3457 
 3458       if (av != NULL)
 3459     __libc_lock_unlock (av->mutex);
 3460     }
 3461 
 3462   /* Allocation failed even after a retry.  */
 3463   if (mem == 0)
 3464     return 0;
 3465 
 3466   p = mem2chunk (mem);
 3467 
 3468   /* Two optional cases in which clearing not necessary */
 3469   if (chunk_is_mmapped (p))
 3470     {
 3471       if (__builtin_expect (perturb_byte, 0))
 3472         return memset (mem, 0, sz);
 3473 
 3474       return mem;
 3475     }
 3476 
 3477   csz = chunksize (p);
 3478 
 3479 #if MORECORE_CLEARS
 3480   if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
 3481     {
 3482       /* clear only the bytes from non-freshly-sbrked memory */
 3483       csz = oldtopsize;
 3484     }
 3485 #endif
 3486 
 3487   /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
 3488      contents have an odd number of INTERNAL_SIZE_T-sized words;
 3489      minimally 3.  */
 3490   d = (INTERNAL_SIZE_T *) mem;
 3491   clearsize = csz - SIZE_SZ;
 3492   nclears = clearsize / sizeof (INTERNAL_SIZE_T);
 3493   assert (nclears >= 3);
 3494 
 3495   if (nclears > 9)
 3496     return memset (d, 0, clearsize);
 3497 
 3498   else
 3499     {
 3500       *(d + 0) = 0;
 3501       *(d + 1) = 0;
 3502       *(d + 2) = 0;
 3503       if (nclears > 4)
 3504         {
 3505           *(d + 3) = 0;
 3506           *(d + 4) = 0;
 3507           if (nclears > 6)
 3508             {
 3509               *(d + 5) = 0;
 3510               *(d + 6) = 0;
 3511               if (nclears > 8)
 3512                 {
 3513                   *(d + 7) = 0;
 3514                   *(d + 8) = 0;
 3515                 }
 3516             }
 3517         }
 3518     }
 3519 
 3520   return mem;
 3521 }
 3522 
 3523 /*
 3524    ------------------------------ malloc ------------------------------
 3525  */
 3526 
 3527 static void *
 3528 _int_malloc (mstate av, size_t bytes)
 3529 {
 3530   INTERNAL_SIZE_T nb;               /* normalized request size */
 3531   unsigned int idx;                 /* associated bin index */
 3532   mbinptr bin;                      /* associated bin */
 3533 
 3534   mchunkptr victim;                 /* inspected/selected chunk */
 3535   INTERNAL_SIZE_T size;             /* its size */
 3536   int victim_index;                 /* its bin index */
 3537 
 3538   mchunkptr remainder;              /* remainder from a split */
 3539   unsigned long remainder_size;     /* its size */
 3540 
 3541   unsigned int block;               /* bit map traverser */
 3542   unsigned int bit;                 /* bit map traverser */
 3543   unsigned int map;                 /* current word of binmap */
 3544 
 3545   mchunkptr fwd;                    /* misc temp for linking */
 3546   mchunkptr bck;                    /* misc temp for linking */
 3547 
 3548 #if USE_TCACHE
 3549   size_t tcache_unsorted_count;     /* count of unsorted chunks processed */
 3550 #endif
 3551 
 3552   /*
 3553      Convert request size to internal form by adding SIZE_SZ bytes
 3554      overhead plus possibly more to obtain necessary alignment and/or
 3555      to obtain a size of at least MINSIZE, the smallest allocatable
 3556      size. Also, checked_request2size traps (returning 0) request sizes
 3557      that are so large that they wrap around zero when padded and
 3558      aligned.
 3559    */
 3560 
 3561   checked_request2size (bytes, nb);
 3562 
 3563   /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
 3564      mmap.  */
 3565   if (__glibc_unlikely (av == NULL))
 3566     {
 3567       void *p = sysmalloc (nb, av);
 3568       if (p != NULL)
 3569     alloc_perturb (p, bytes);
 3570       return p;
 3571     }
 3572 
 3573   /*
 3574      If the size qualifies as a fastbin, first check corresponding bin.
 3575      This code is safe to execute even if av is not yet initialized, so we
 3576      can try it without checking, which saves some time on this fast path.
 3577    */
 3578 
 3579 #define REMOVE_FB(fb, victim, pp)           \
 3580   do                            \
 3581     {                           \
 3582       victim = pp;                  \
 3583       if (victim == NULL)               \
 3584     break;                      \
 3585     }                           \
 3586   while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
 3587      != victim);                    \
 3588 
 3589   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
 3590     {
 3591       idx = fastbin_index (nb);
 3592       mfastbinptr *fb = &fastbin (av, idx);
 3593       mchunkptr pp;
 3594       victim = *fb;
 3595 
 3596       if (victim != NULL)
 3597     {
 3598       if (SINGLE_THREAD_P)
 3599         *fb = victim->fd;
 3600       else
 3601         REMOVE_FB (fb, pp, victim);
 3602       if (__glibc_likely (victim != NULL))
 3603         {
 3604           size_t victim_idx = fastbin_index (chunksize (victim));
 3605           if (__builtin_expect (victim_idx != idx, 0))
 3606         malloc_printerr ("malloc(): memory corruption (fast)");
 3607           check_remalloced_chunk (av, victim, nb);
 3608 #if USE_TCACHE
 3609           /* While we're here, if we see other chunks of the same size,
 3610          stash them in the tcache.  */
 3611           size_t tc_idx = csize2tidx (nb);
 3612           if (tcache && tc_idx < mp_.tcache_bins)
 3613         {
 3614           mchunkptr tc_victim;
 3615 
 3616           /* While bin not empty and tcache not full, copy chunks.  */
 3617           while (tcache->counts[tc_idx] < mp_.tcache_count
 3618              && (tc_victim = *fb) != NULL)
 3619             {
 3620               if (SINGLE_THREAD_P)
 3621             *fb = tc_victim->fd;
 3622               else
 3623             {
 3624               REMOVE_FB (fb, pp, tc_victim);
 3625               if (__glibc_unlikely (tc_victim == NULL))
 3626                 break;
 3627             }
 3628               tcache_put (tc_victim, tc_idx);
 3629             }
 3630         }
 3631 #endif
 3632           void *p = chunk2mem (victim);
 3633           alloc_perturb (p, bytes);
 3634           return p;
 3635         }
 3636     }
 3637     }
 3638 
 3639   /*
 3640      If a small request, check regular bin.  Since these "smallbins"
 3641      hold one size each, no searching within bins is necessary.
 3642      (For a large request, we need to wait until unsorted chunks are
 3643      processed to find best fit. But for small ones, fits are exact
 3644      anyway, so we can check now, which is faster.)
 3645    */
 3646 
 3647   if (in_smallbin_range (nb))
 3648     {
 3649       idx = smallbin_index (nb);
 3650       bin = bin_at (av, idx);
 3651 
 3652       if ((victim = last (bin)) != bin)
 3653         {
 3654           bck = victim->bk;
 3655       if (__glibc_unlikely (bck->fd != victim))
 3656         malloc_printerr ("malloc(): smallbin double linked list corrupted");
 3657           set_inuse_bit_at_offset (victim, nb);
 3658           bin->bk = bck;
 3659           bck->fd = bin;
 3660 
 3661           if (av != &main_arena)
 3662         set_non_main_arena (victim);
 3663           check_malloced_chunk (av, victim, nb);
 3664 #if USE_TCACHE
 3665       /* While we're here, if we see other chunks of the same size,
 3666          stash them in the tcache.  */
 3667       size_t tc_idx = csize2tidx (nb);
 3668       if (tcache && tc_idx < mp_.tcache_bins)
 3669         {
 3670           mchunkptr tc_victim;
 3671 
 3672           /* While bin not empty and tcache not full, copy chunks over.  */
 3673           while (tcache->counts[tc_idx] < mp_.tcache_count
 3674              && (tc_victim = last (bin)) != bin)
 3675         {
 3676           if (tc_victim != 0)
 3677             {
 3678               bck = tc_victim->bk;
 3679               set_inuse_bit_at_offset (tc_victim, nb);
 3680               if (av != &main_arena)
 3681             set_non_main_arena (tc_victim);
 3682               bin->bk = bck;
 3683               bck->fd = bin;
 3684 
 3685               tcache_put (tc_victim, tc_idx);
 3686                 }
 3687         }
 3688         }
 3689 #endif
 3690           void *p = chunk2mem (victim);
 3691           alloc_perturb (p, bytes);
 3692           return p;
 3693         }
 3694     }
 3695 
 3696   /*
 3697      If this is a large request, consolidate fastbins before continuing.
 3698      While it might look excessive to kill all fastbins before
 3699      even seeing if there is space available, this avoids
 3700      fragmentation problems normally associated with fastbins.
 3701      Also, in practice, programs tend to have runs of either small or
 3702      large requests, but less often mixtures, so consolidation is not
 3703      invoked all that often in most programs. And the programs that
 3704      it is called frequently in otherwise tend to fragment.
 3705    */
 3706 
 3707   else
 3708     {
 3709       idx = largebin_index (nb);
 3710       if (atomic_load_relaxed (&av->have_fastchunks))
 3711         malloc_consolidate (av);
 3712     }
 3713 
 3714   /*
 3715      Process recently freed or remaindered chunks, taking one only if
 3716      it is exact fit, or, if this a small request, the chunk is remainder from
 3717      the most recent non-exact fit.  Place other traversed chunks in
 3718      bins.  Note that this step is the only place in any routine where
 3719      chunks are placed in bins.
 3720 
 3721      The outer loop here is needed because we might not realize until
 3722      near the end of malloc that we should have consolidated, so must
 3723      do so and retry. This happens at most once, and only when we would
 3724      otherwise need to expand memory to service a "small" request.
 3725    */
 3726 
 3727 #if USE_TCACHE
 3728   INTERNAL_SIZE_T tcache_nb = 0;
 3729   size_t tc_idx = csize2tidx (nb);
 3730   if (tcache && tc_idx < mp_.tcache_bins)
 3731     tcache_nb = nb;
 3732   int return_cached = 0;
 3733 
 3734   tcache_unsorted_count = 0;
 3735 #endif
 3736 
 3737   for (;; )
 3738     {
 3739       int iters = 0;
 3740       while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
 3741         {
 3742           bck = victim->bk;
 3743           size = chunksize (victim);
 3744           mchunkptr next = chunk_at_offset (victim, size);
 3745 
 3746           if (__glibc_unlikely (size <= 2 * SIZE_SZ)
 3747               || __glibc_unlikely (size > av->system_mem))
 3748             malloc_printerr ("malloc(): invalid size (unsorted)");
 3749           if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
 3750               || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
 3751             malloc_printerr ("malloc(): invalid next size (unsorted)");
 3752           if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
 3753             malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
 3754           if (__glibc_unlikely (bck->fd != victim)
 3755               || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
 3756             malloc_printerr ("malloc(): unsorted double linked list corrupted");
 3757           if (__glibc_unlikely (prev_inuse (next)))
 3758             malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
 3759 
 3760           /*
 3761              If a small request, try to use last remainder if it is the
 3762              only chunk in unsorted bin.  This helps promote locality for
 3763              runs of consecutive small requests. This is the only
 3764              exception to best-fit, and applies only when there is
 3765              no exact fit for a small chunk.
 3766            */
 3767 
 3768           if (in_smallbin_range (nb) &&
 3769               bck == unsorted_chunks (av) &&
 3770               victim == av->last_remainder &&
 3771               (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
 3772             {
 3773               /* split and reattach remainder */
 3774               remainder_size = size - nb;
 3775               remainder = chunk_at_offset (victim, nb);
 3776               unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
 3777               av->last_remainder = remainder;
 3778               remainder->bk = remainder->fd = unsorted_chunks (av);
 3779               if (!in_smallbin_range (remainder_size))
 3780                 {
 3781                   remainder->fd_nextsize = NULL;
 3782                   remainder->bk_nextsize = NULL;
 3783                 }
 3784 
 3785               set_head (victim, nb | PREV_INUSE |
 3786                         (av != &main_arena ? NON_MAIN_ARENA : 0));
 3787               set_head (remainder, remainder_size | PREV_INUSE);
 3788               set_foot (remainder, remainder_size);
 3789 
 3790               check_malloced_chunk (av, victim, nb);
 3791               void *p = chunk2mem (victim);
 3792               alloc_perturb (p, bytes);
 3793               return p;
 3794             }
 3795 
 3796           /* remove from unsorted list */
 3797           if (__glibc_unlikely (bck->fd != victim))
 3798             malloc_printerr ("malloc(): corrupted unsorted chunks 3");
 3799           unsorted_chunks (av)->bk = bck;
 3800           bck->fd = unsorted_chunks (av);
 3801 
 3802           /* Take now instead of binning if exact fit */
 3803 
 3804           if (size == nb)
 3805             {
 3806               set_inuse_bit_at_offset (victim, size);
 3807               if (av != &main_arena)
 3808         set_non_main_arena (victim);
 3809 #if USE_TCACHE
 3810           /* Fill cache first, return to user only if cache fills.
 3811          We may return one of these chunks later.  */
 3812           if (tcache_nb
 3813           && tcache->counts[tc_idx] < mp_.tcache_count)
 3814         {
 3815           tcache_put (victim, tc_idx);
 3816           return_cached = 1;
 3817           continue;
 3818         }
 3819           else
 3820         {
 3821 #endif
 3822               check_malloced_chunk (av, victim, nb);
 3823               void *p = chunk2mem (victim);
 3824               alloc_perturb (p, bytes);
 3825               return p;
 3826 #if USE_TCACHE
 3827         }
 3828 #endif
 3829             }
 3830 
 3831           /* place chunk in bin */
 3832 
 3833           if (in_smallbin_range (size))
 3834             {
 3835               victim_index = smallbin_index (size);
 3836               bck = bin_at (av, victim_index);
 3837               fwd = bck->fd;
 3838             }
 3839           else
 3840             {
 3841               victim_index = largebin_index (size);
 3842               bck = bin_at (av, victim_index);
 3843               fwd = bck->fd;
 3844 
 3845               /* maintain large bins in sorted order */
 3846               if (fwd != bck)
 3847                 {
 3848                   /* Or with inuse bit to speed comparisons */
 3849                   size |= PREV_INUSE;
 3850                   /* if smaller than smallest, bypass loop below */
 3851                   assert (chunk_main_arena (bck->bk));
 3852                   if ((unsigned long) (size)
 3853               < (unsigned long) chunksize_nomask (bck->bk))
 3854                     {
 3855                       fwd = bck;
 3856                       bck = bck->bk;
 3857 
 3858                       victim->fd_nextsize = fwd->fd;
 3859                       victim->bk_nextsize = fwd->fd->bk_nextsize;
 3860                       fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
 3861                     }
 3862                   else
 3863                     {
 3864                       assert (chunk_main_arena (fwd));
 3865                       while ((unsigned long) size < chunksize_nomask (fwd))
 3866                         {
 3867                           fwd = fwd->fd_nextsize;
 3868               assert (chunk_main_arena (fwd));
 3869                         }
 3870 
 3871                       if ((unsigned long) size
 3872               == (unsigned long) chunksize_nomask (fwd))
 3873                         /* Always insert in the second position.  */
 3874                         fwd = fwd->fd;
 3875                       else
 3876                         {
 3877                           victim->fd_nextsize = fwd;
 3878                           victim->bk_nextsize = fwd->bk_nextsize;
 3879                           fwd->bk_nextsize = victim;
 3880                           victim->bk_nextsize->fd_nextsize = victim;
 3881                         }
 3882                       bck = fwd->bk;
 3883                     }
 3884                 }
 3885               else
 3886                 victim->fd_nextsize = victim->bk_nextsize = victim;
 3887             }
 3888 
 3889           mark_bin (av, victim_index);
 3890           victim->bk = bck;
 3891           victim->fd = fwd;
 3892           fwd->bk = victim;
 3893           bck->fd = victim;
 3894 
 3895 #if USE_TCACHE
 3896       /* If we've processed as many chunks as we're allowed while
 3897      filling the cache, return one of the cached ones.  */
 3898       ++tcache_unsorted_count;
 3899       if (return_cached
 3900       && mp_.tcache_unsorted_limit > 0
 3901       && tcache_unsorted_count > mp_.tcache_unsorted_limit)
 3902     {
 3903       return tcache_get (tc_idx);
 3904     }
 3905 #endif
 3906 
 3907 #define MAX_ITERS       10000
 3908           if (++iters >= MAX_ITERS)
 3909             break;
 3910         }
 3911 
 3912 #if USE_TCACHE
 3913       /* If all the small chunks we found ended up cached, return one now.  */
 3914       if (return_cached)
 3915     {
 3916       return tcache_get (tc_idx);
 3917     }
 3918 #endif
 3919 
 3920       /*
 3921          If a large request, scan through the chunks of current bin in
 3922          sorted order to find smallest that fits.  Use the skip list for this.
 3923        */
 3924 
 3925       if (!in_smallbin_range (nb))
 3926         {
 3927           bin = bin_at (av, idx);
 3928 
 3929           /* skip scan if empty or largest chunk is too small */
 3930           if ((victim = first (bin)) != bin
 3931           && (unsigned long) chunksize_nomask (victim)
 3932             >= (unsigned long) (nb))
 3933             {
 3934               victim = victim->bk_nextsize;
 3935               while (((unsigned long) (size = chunksize (victim)) <
 3936                       (unsigned long) (nb)))
 3937                 victim = victim->bk_nextsize;
 3938 
 3939               /* Avoid removing the first entry for a size so that the skip
 3940                  list does not have to be rerouted.  */
 3941               if (victim != last (bin)
 3942           && chunksize_nomask (victim)
 3943             == chunksize_nomask (victim->fd))
 3944                 victim = victim->fd;
 3945 
 3946               remainder_size = size - nb;
 3947               unlink_chunk (av, victim);
 3948 
 3949               /* Exhaust */
 3950               if (remainder_size < MINSIZE)
 3951                 {
 3952                   set_inuse_bit_at_offset (victim, size);
 3953                   if (av != &main_arena)
 3954             set_non_main_arena (victim);
 3955                 }
 3956               /* Split */
 3957               else
 3958                 {
 3959                   remainder = chunk_at_offset (victim, nb);
 3960                   /* We cannot assume the unsorted list is empty and therefore
 3961                      have to perform a complete insert here.  */
 3962                   bck = unsorted_chunks (av);
 3963                   fwd = bck->fd;
 3964           if (__glibc_unlikely (fwd->bk != bck))
 3965             malloc_printerr ("malloc(): corrupted unsorted chunks");
 3966                   remainder->bk = bck;
 3967                   remainder->fd = fwd;
 3968                   bck->fd = remainder;
 3969                   fwd->bk = remainder;
 3970                   if (!in_smallbin_range (remainder_size))
 3971                     {
 3972                       remainder->fd_nextsize = NULL;
 3973                       remainder->bk_nextsize = NULL;
 3974                     }
 3975                   set_head (victim, nb | PREV_INUSE |
 3976                             (av != &main_arena ? NON_MAIN_ARENA : 0));
 3977                   set_head (remainder, remainder_size | PREV_INUSE);
 3978                   set_foot (remainder, remainder_size);
 3979                 }
 3980               check_malloced_chunk (av, victim, nb);
 3981               void *p = chunk2mem (victim);
 3982               alloc_perturb (p, bytes);
 3983               return p;
 3984             }
 3985         }
 3986 
 3987       /*
 3988          Search for a chunk by scanning bins, starting with next largest
 3989          bin. This search is strictly by best-fit; i.e., the smallest
 3990          (with ties going to approximately the least recently used) chunk
 3991          that fits is selected.
 3992 
 3993          The bitmap avoids needing to check that most blocks are nonempty.
 3994          The particular case of skipping all bins during warm-up phases
 3995          when no chunks have been returned yet is faster than it might look.
 3996        */
 3997 
 3998       ++idx;
 3999       bin = bin_at (av, idx);
 4000       block = idx2block (idx);
 4001       map = av->binmap[block];
 4002       bit = idx2bit (idx);
 4003 
 4004       for (;; )
 4005         {
 4006           /* Skip rest of block if there are no more set bits in this block.  */
 4007           if (bit > map || bit == 0)
 4008             {
 4009               do
 4010                 {
 4011                   if (++block >= BINMAPSIZE) /* out of bins */
 4012                     goto use_top;
 4013                 }
 4014               while ((map = av->binmap[block]) == 0);
 4015 
 4016               bin = bin_at (av, (block << BINMAPSHIFT));
 4017               bit = 1;
 4018             }
 4019 
 4020           /* Advance to bin with set bit. There must be one. */
 4021           while ((bit & map) == 0)
 4022             {
 4023               bin = next_bin (bin);
 4024               bit <<= 1;
 4025               assert (bit != 0);
 4026             }
 4027 
 4028           /* Inspect the bin. It is likely to be non-empty */
 4029           victim = last (bin);
 4030 
 4031           /*  If a false alarm (empty bin), clear the bit. */
 4032           if (victim == bin)
 4033             {
 4034               av->binmap[block] = map &= ~bit; /* Write through */
 4035               bin = next_bin (bin);
 4036               bit <<= 1;
 4037             }
 4038 
 4039           else
 4040             {
 4041               size = chunksize (victim);
 4042 
 4043               /*  We know the first chunk in this bin is big enough to use. */
 4044               assert ((unsigned long) (size) >= (unsigned long) (nb));
 4045 
 4046               remainder_size = size - nb;
 4047 
 4048               /* unlink */
 4049               unlink_chunk (av, victim);
 4050 
 4051               /* Exhaust */
 4052               if (remainder_size < MINSIZE)
 4053                 {
 4054                   set_inuse_bit_at_offset (victim, size);
 4055                   if (av != &main_arena)
 4056             set_non_main_arena (victim);
 4057                 }
 4058 
 4059               /* Split */
 4060               else
 4061                 {
 4062                   remainder = chunk_at_offset (victim, nb);
 4063 
 4064                   /* We cannot assume the unsorted list is empty and therefore
 4065                      have to perform a complete insert here.  */
 4066                   bck = unsorted_chunks (av);
 4067                   fwd = bck->fd;
 4068           if (__glibc_unlikely (fwd->bk != bck))
 4069             malloc_printerr ("malloc(): corrupted unsorted chunks 2");
 4070                   remainder->bk = bck;
 4071                   remainder->fd = fwd;
 4072                   bck->fd = remainder;
 4073                   fwd->bk = remainder;
 4074 
 4075                   /* advertise as last remainder */
 4076                   if (in_smallbin_range (nb))
 4077                     av->last_remainder = remainder;
 4078                   if (!in_smallbin_range (remainder_size))
 4079                     {
 4080                       remainder->fd_nextsize = NULL;
 4081                       remainder->bk_nextsize = NULL;
 4082                     }
 4083                   set_head (victim, nb | PREV_INUSE |
 4084                             (av != &main_arena ? NON_MAIN_ARENA : 0));
 4085                   set_head (remainder, remainder_size | PREV_INUSE);
 4086                   set_foot (remainder, remainder_size);
 4087                 }
 4088               check_malloced_chunk (av, victim, nb);
 4089               void *p = chunk2mem (victim);
 4090               alloc_perturb (p, bytes);
 4091               return p;
 4092             }
 4093         }
 4094 
 4095     use_top:
 4096       /*
 4097          If large enough, split off the chunk bordering the end of memory
 4098          (held in av->top). Note that this is in accord with the best-fit
 4099          search rule.  In effect, av->top is treated as larger (and thus
 4100          less well fitting) than any other available chunk since it can
 4101          be extended to be as large as necessary (up to system
 4102          limitations).
 4103 
 4104          We require that av->top always exists (i.e., has size >=
 4105          MINSIZE) after initialization, so if it would otherwise be
 4106          exhausted by current request, it is replenished. (The main
 4107          reason for ensuring it exists is that we may need MINSIZE space
 4108          to put in fenceposts in sysmalloc.)
 4109        */
 4110 
 4111       victim = av->top;
 4112       size = chunksize (victim);
 4113 
 4114       if (__glibc_unlikely (size > av->system_mem))
 4115         malloc_printerr ("malloc(): corrupted top size");
 4116 
 4117       if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
 4118         {
 4119           remainder_size = size - nb;
 4120           remainder = chunk_at_offset (victim, nb);
 4121           av->top = remainder;
 4122           set_head (victim, nb | PREV_INUSE |
 4123                     (av != &main_arena ? NON_MAIN_ARENA : 0));
 4124           set_head (remainder, remainder_size | PREV_INUSE);
 4125 
 4126           check_malloced_chunk (av, victim, nb);
 4127           void *p = chunk2mem (victim);
 4128           alloc_perturb (p, bytes);
 4129           return p;
 4130         }
 4131 
 4132       /* When we are using atomic ops to free fast chunks we can get
 4133          here for all block sizes.  */
 4134       else if (atomic_load_relaxed (&av->have_fastchunks))
 4135         {
 4136           malloc_consolidate (av);
 4137           /* restore original bin index */
 4138           if (in_smallbin_range (nb))
 4139             idx = smallbin_index (nb);
 4140           else
 4141             idx = largebin_index (nb);
 4142         }
 4143 
 4144       /*
 4145          Otherwise, relay to handle system-dependent cases
 4146        */
 4147       else
 4148         {
 4149           void *p = sysmalloc (nb, av);
 4150           if (p != NULL)
 4151             alloc_perturb (p, bytes);
 4152           return p;
 4153         }
 4154     }
 4155 }
 4156 
 4157 /*
 4158    ------------------------------ free ------------------------------
 4159  */
 4160 
 4161 static void
 4162 _int_free (mstate av, mchunkptr p, int have_lock)
 4163 {
 4164   INTERNAL_SIZE_T size;        /* its size */
 4165   mfastbinptr *fb;             /* associated fastbin */
 4166   mchunkptr nextchunk;         /* next contiguous chunk */
 4167   INTERNAL_SIZE_T nextsize;    /* its size */
 4168   int nextinuse;               /* true if nextchunk is used */
 4169   INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
 4170   mchunkptr bck;               /* misc temp for linking */
 4171   mchunkptr fwd;               /* misc temp for linking */
 4172 
 4173   size = chunksize (p);
 4174 
 4175   /* Little security check which won't hurt performance: the
 4176      allocator never wrapps around at the end of the address space.
 4177      Therefore we can exclude some size values which might appear
 4178      here by accident or by "design" from some intruder.  */
 4179   if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
 4180       || __builtin_expect (misaligned_chunk (p), 0))
 4181     malloc_printerr ("free(): invalid pointer");
 4182   /* We know that each chunk is at least MINSIZE bytes in size or a
 4183      multiple of MALLOC_ALIGNMENT.  */
 4184   if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
 4185     malloc_printerr ("free(): invalid size");
 4186 
 4187   check_inuse_chunk(av, p);
 4188 
 4189 #if USE_TCACHE
 4190   {
 4191     size_t tc_idx = csize2tidx (size);
 4192     if (tcache != NULL && tc_idx < mp_.tcache_bins)
 4193       {
 4194     /* Check to see if it's already in the tcache.  */
 4195     tcache_entry *e = (tcache_entry *) chunk2mem (p);
 4196 
 4197     /* This test succeeds on double free.  However, we don't 100%
 4198        trust it (it also matches random payload data at a 1 in
 4199        2^<size_t> chance), so verify it's not an unlikely
 4200        coincidence before aborting.  */
 4201     if (__glibc_unlikely (e->key == tcache))
 4202       {
 4203         tcache_entry *tmp;
 4204         LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
 4205         for (tmp = tcache->entries[tc_idx];
 4206          tmp;
 4207          tmp = tmp->next)
 4208           if (tmp == e)
 4209         malloc_printerr ("free(): double free detected in tcache 2");
 4210         /* If we get here, it was a coincidence.  We've wasted a
 4211            few cycles, but don't abort.  */
 4212       }
 4213 
 4214     if (tcache->counts[tc_idx] < mp_.tcache_count)
 4215       {
 4216         tcache_put (p, tc_idx);
 4217         return;
 4218       }
 4219       }
 4220   }
 4221 #endif
 4222 
 4223   /*
 4224     If eligible, place chunk on a fastbin so it can be found
 4225     and used quickly in malloc.
 4226   */
 4227 
 4228   if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
 4229 
 4230 #if TRIM_FASTBINS
 4231       /*
 4232     If TRIM_FASTBINS set, don't place chunks
 4233     bordering top into fastbins
 4234       */
 4235       && (chunk_at_offset(p, size) != av->top)
 4236 #endif
 4237       ) {
 4238 
 4239     if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
 4240               <= 2 * SIZE_SZ, 0)
 4241     || __builtin_expect (chunksize (chunk_at_offset (p, size))
 4242                  >= av->system_mem, 0))
 4243       {
 4244     bool fail = true;
 4245     /* We might not have a lock at this point and concurrent modifications
 4246        of system_mem might result in a false positive.  Redo the test after
 4247        getting the lock.  */
 4248     if (!have_lock)
 4249       {
 4250         __libc_lock_lock (av->mutex);
 4251         fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
 4252             || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
 4253         __libc_lock_unlock (av->mutex);
 4254       }
 4255 
 4256     if (fail)
 4257       malloc_printerr ("free(): invalid next size (fast)");
 4258       }
 4259 
 4260     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 4261 
 4262     atomic_store_relaxed (&av->have_fastchunks, true);
 4263     unsigned int idx = fastbin_index(size);
 4264     fb = &fastbin (av, idx);
 4265 
 4266     /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
 4267     mchunkptr old = *fb, old2;
 4268 
 4269     if (SINGLE_THREAD_P)
 4270       {
 4271     /* Check that the top of the bin is not the record we are going to
 4272        add (i.e., double free).  */
 4273     if (__builtin_expect (old == p, 0))
 4274       malloc_printerr ("double free or corruption (fasttop)");
 4275     p->fd = old;
 4276     *fb = p;
 4277       }
 4278     else
 4279       do
 4280     {
 4281       /* Check that the top of the bin is not the record we are going to
 4282          add (i.e., double free).  */
 4283       if (__builtin_expect (old == p, 0))
 4284         malloc_printerr ("double free or corruption (fasttop)");
 4285       p->fd = old2 = old;
 4286     }
 4287       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
 4288          != old2);
 4289 
 4290     /* Check that size of fastbin chunk at the top is the same as
 4291        size of the chunk that we are adding.  We can dereference OLD
 4292        only if we have the lock, otherwise it might have already been
 4293        allocated again.  */
 4294     if (have_lock && old != NULL
 4295     && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
 4296       malloc_printerr ("invalid fastbin entry (free)");
 4297   }
 4298 
 4299   /*
 4300     Consolidate other non-mmapped chunks as they arrive.
 4301   */
 4302 
 4303   else if (!chunk_is_mmapped(p)) {
 4304 
 4305     /* If we're single-threaded, don't lock the arena.  */
 4306     if (SINGLE_THREAD_P)
 4307       have_lock = true;
 4308 
 4309     if (!have_lock)
 4310       __libc_lock_lock (av->mutex);
 4311 
 4312     nextchunk = chunk_at_offset(p, size);
 4313 
 4314     /* Lightweight tests: check whether the block is already the
 4315        top block.  */
 4316     if (__glibc_unlikely (p == av->top))
 4317       malloc_printerr ("double free or corruption (top)");
 4318     /* Or whether the next chunk is beyond the boundaries of the arena.  */
 4319     if (__builtin_expect (contiguous (av)
 4320               && (char *) nextchunk
 4321               >= ((char *) av->top + chunksize(av->top)), 0))
 4322     malloc_printerr ("double free or corruption (out)");
 4323     /* Or whether the block is actually not marked used.  */
 4324     if (__glibc_unlikely (!prev_inuse(nextchunk)))
 4325       malloc_printerr ("double free or corruption (!prev)");
 4326 
 4327     nextsize = chunksize(nextchunk);
 4328     if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
 4329     || __builtin_expect (nextsize >= av->system_mem, 0))
 4330       malloc_printerr ("free(): invalid next size (normal)");
 4331 
 4332     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 4333 
 4334     /* consolidate backward */
 4335     if (!prev_inuse(p)) {
 4336       prevsize = prev_size (p);
 4337       size += prevsize;
 4338       p = chunk_at_offset(p, -((long) prevsize));
 4339       if (__glibc_unlikely (chunksize(p) != prevsize))
 4340         malloc_printerr ("corrupted size vs. prev_size while consolidating");
 4341       unlink_chunk (av, p);
 4342     }
 4343 
 4344     if (nextchunk != av->top) {
 4345       /* get and clear inuse bit */
 4346       nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 4347 
 4348       /* consolidate forward */
 4349       if (!nextinuse) {
 4350     unlink_chunk (av, nextchunk);
 4351     size += nextsize;
 4352       } else
 4353     clear_inuse_bit_at_offset(nextchunk, 0);
 4354 
 4355       /*
 4356     Place the chunk in unsorted chunk list. Chunks are
 4357     not placed into regular bins until after they have
 4358     been given one chance to be used in malloc.
 4359       */
 4360 
 4361       bck = unsorted_chunks(av);
 4362       fwd = bck->fd;
 4363       if (__glibc_unlikely (fwd->bk != bck))
 4364     malloc_printerr ("free(): corrupted unsorted chunks");
 4365       p->fd = fwd;
 4366       p->bk = bck;
 4367       if (!in_smallbin_range(size))
 4368     {
 4369       p->fd_nextsize = NULL;
 4370       p->bk_nextsize = NULL;
 4371     }
 4372       bck->fd = p;
 4373       fwd->bk = p;
 4374 
 4375       set_head(p, size | PREV_INUSE);
 4376       set_foot(p, size);
 4377 
 4378       check_free_chunk(av, p);
 4379     }
 4380 
 4381     /*
 4382       If the chunk borders the current high end of memory,
 4383       consolidate into top
 4384     */
 4385 
 4386     else {
 4387       size += nextsize;
 4388       set_head(p, size | PREV_INUSE);
 4389       av->top = p;
 4390       check_chunk(av, p);
 4391     }
 4392 
 4393     /*
 4394       If freeing a large space, consolidate possibly-surrounding
 4395       chunks. Then, if the total unused topmost memory exceeds trim
 4396       threshold, ask malloc_trim to reduce top.
 4397 
 4398       Unless max_fast is 0, we don't know if there are fastbins
 4399       bordering top, so we cannot tell for sure whether threshold
 4400       has been reached unless fastbins are consolidated.  But we
 4401       don't want to consolidate on each free.  As a compromise,
 4402       consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
 4403       is reached.
 4404     */
 4405 
 4406     if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
 4407       if (atomic_load_relaxed (&av->have_fastchunks))
 4408     malloc_consolidate(av);
 4409 
 4410       if (av == &main_arena) {
 4411 #ifndef MORECORE_CANNOT_TRIM
 4412     if ((unsigned long)(chunksize(av->top)) >=
 4413         (unsigned long)(mp_.trim_threshold))
 4414       systrim(mp_.top_pad, av);
 4415 #endif
 4416       } else {
 4417     /* Always try heap_trim(), even if the top chunk is not
 4418        large, because the corresponding heap might go away.  */
 4419     heap_info *heap = heap_for_ptr(top(av));
 4420 
 4421     assert(heap->ar_ptr == av);
 4422     heap_trim(heap, mp_.top_pad);
 4423       }
 4424     }
 4425 
 4426     if (!have_lock)
 4427       __libc_lock_unlock (av->mutex);
 4428   }
 4429   /*
 4430     If the chunk was allocated via mmap, release via munmap().
 4431   */
 4432 
 4433   else {
 4434     munmap_chunk (p);
 4435   }
 4436 }
 4437 
 4438 /*
 4439   ------------------------- malloc_consolidate -------------------------
 4440 
 4441   malloc_consolidate is a specialized version of free() that tears
 4442   down chunks held in fastbins.  Free itself cannot be used for this
 4443   purpose since, among other things, it might place chunks back onto
 4444   fastbins.  So, instead, we need to use a minor variant of the same
 4445   code.
 4446 */
 4447 
 4448 static void malloc_consolidate(mstate av)
 4449 {
 4450   mfastbinptr*    fb;                 /* current fastbin being consolidated */
 4451   mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
 4452   mchunkptr       p;                  /* current chunk being consolidated */
 4453   mchunkptr       nextp;              /* next chunk to consolidate */
 4454   mchunkptr       unsorted_bin;       /* bin header */
 4455   mchunkptr       first_unsorted;     /* chunk to link to */
 4456 
 4457   /* These have same use as in free() */
 4458   mchunkptr       nextchunk;
 4459   INTERNAL_SIZE_T size;
 4460   INTERNAL_SIZE_T nextsize;
 4461   INTERNAL_SIZE_T prevsize;
 4462   int             nextinuse;
 4463 
 4464   atomic_store_relaxed (&av->have_fastchunks, false);
 4465 
 4466   unsorted_bin = unsorted_chunks(av);
 4467 
 4468   /*
 4469     Remove each chunk from fast bin and consolidate it, placing it
 4470     then in unsorted bin. Among other reasons for doing this,
 4471     placing in unsorted bin avoids needing to calculate actual bins
 4472     until malloc is sure that chunks aren't immediately going to be
 4473     reused anyway.
 4474   */
 4475 
 4476   maxfb = &fastbin (av, NFASTBINS - 1);
 4477   fb = &fastbin (av, 0);
 4478   do {
 4479     p = atomic_exchange_acq (fb, NULL);
 4480     if (p != 0) {
 4481       do {
 4482     {
 4483       unsigned int idx = fastbin_index (chunksize (p));
 4484       if ((&fastbin (av, idx)) != fb)
 4485         malloc_printerr ("malloc_consolidate(): invalid chunk size");
 4486     }
 4487 
 4488     check_inuse_chunk(av, p);
 4489     nextp = p->fd;
 4490 
 4491     /* Slightly streamlined version of consolidation code in free() */
 4492     size = chunksize (p);
 4493     nextchunk = chunk_at_offset(p, size);
 4494     nextsize = chunksize(nextchunk);
 4495 
 4496     if (!prev_inuse(p)) {
 4497       prevsize = prev_size (p);
 4498       size += prevsize;
 4499       p = chunk_at_offset(p, -((long) prevsize));
 4500       if (__glibc_unlikely (chunksize(p) != prevsize))
 4501         malloc_printerr ("corrupted size vs. prev_size in fastbins");
 4502       unlink_chunk (av, p);
 4503     }
 4504 
 4505     if (nextchunk != av->top) {
 4506       nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 4507 
 4508       if (!nextinuse) {
 4509         size += nextsize;
 4510         unlink_chunk (av, nextchunk);
 4511       } else
 4512         clear_inuse_bit_at_offset(nextchunk, 0);
 4513 
 4514       first_unsorted = unsorted_bin->fd;
 4515       unsorted_bin->fd = p;
 4516       first_unsorted->bk = p;
 4517 
 4518       if (!in_smallbin_range (size)) {
 4519         p->fd_nextsize = NULL;
 4520         p->bk_nextsize = NULL;
 4521       }
 4522 
 4523       set_head(p, size | PREV_INUSE);
 4524       p->bk = unsorted_bin;
 4525       p->fd = first_unsorted;
 4526       set_foot(p, size);
 4527     }
 4528 
 4529     else {
 4530       size += nextsize;
 4531       set_head(p, size | PREV_INUSE);
 4532       av->top = p;
 4533     }
 4534 
 4535       } while ( (p = nextp) != 0);
 4536 
 4537     }
 4538   } while (fb++ != maxfb);
 4539 }
 4540 
 4541 /*
 4542   ------------------------------ realloc ------------------------------
 4543 */
 4544 
 4545 void*
 4546 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 4547          INTERNAL_SIZE_T nb)
 4548 {
 4549   mchunkptr        newp;            /* chunk to return */
 4550   INTERNAL_SIZE_T  newsize;         /* its size */
 4551   void*          newmem;          /* corresponding user mem */
 4552 
 4553   mchunkptr        next;            /* next contiguous chunk after oldp */
 4554 
 4555   mchunkptr        remainder;       /* extra space at end of newp */
 4556   unsigned long    remainder_size;  /* its size */
 4557 
 4558   /* oldmem size */
 4559   if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
 4560       || __builtin_expect (oldsize >= av->system_mem, 0))
 4561     malloc_printerr ("realloc(): invalid old size");
 4562 
 4563   check_inuse_chunk (av, oldp);
 4564 
 4565   /* All callers already filter out mmap'ed chunks.  */
 4566   assert (!chunk_is_mmapped (oldp));
 4567 
 4568   next = chunk_at_offset (oldp, oldsize);
 4569   INTERNAL_SIZE_T nextsize = chunksize (next);
 4570   if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
 4571       || __builtin_expect (nextsize >= av->system_mem, 0))
 4572     malloc_printerr ("realloc(): invalid next size");
 4573 
 4574   if ((unsigned long) (oldsize) >= (unsigned long) (nb))
 4575     {
 4576       /* already big enough; split below */
 4577       newp = oldp;
 4578       newsize = oldsize;
 4579     }
 4580 
 4581   else
 4582     {
 4583       /* Try to expand forward into top */
 4584       if (next == av->top &&
 4585           (unsigned long) (newsize = oldsize + nextsize) >=
 4586           (unsigned long) (nb + MINSIZE))
 4587         {
 4588           set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4589           av->top = chunk_at_offset (oldp, nb);
 4590           set_head (av->top, (newsize - nb) | PREV_INUSE);
 4591           check_inuse_chunk (av, oldp);
 4592           return chunk2mem (oldp);
 4593         }
 4594 
 4595       /* Try to expand forward into next chunk;  split off remainder below */
 4596       else if (next != av->top &&
 4597                !inuse (next) &&
 4598                (unsigned long) (newsize = oldsize + nextsize) >=
 4599                (unsigned long) (nb))
 4600         {
 4601           newp = oldp;
 4602           unlink_chunk (av, next);
 4603         }
 4604 
 4605       /* allocate, copy, free */
 4606       else
 4607         {
 4608           newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
 4609           if (newmem == 0)
 4610             return 0; /* propagate failure */
 4611 
 4612           newp = mem2chunk (newmem);
 4613           newsize = chunksize (newp);
 4614 
 4615           /*
 4616              Avoid copy if newp is next chunk after oldp.
 4617            */
 4618           if (newp == next)
 4619             {
 4620               newsize += oldsize;
 4621               newp = oldp;
 4622             }
 4623           else
 4624             {
 4625           memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
 4626               _int_free (av, oldp, 1);
 4627               check_inuse_chunk (av, newp);
 4628               return chunk2mem (newp);
 4629             }
 4630         }
 4631     }
 4632 
 4633   /* If possible, free extra space in old or extended chunk */
 4634 
 4635   assert ((unsigned long) (newsize) >= (unsigned long) (nb));
 4636 
 4637   remainder_size = newsize - nb;
 4638 
 4639   if (remainder_size < MINSIZE)   /* not enough extra to split off */
 4640     {
 4641       set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4642       set_inuse_bit_at_offset (newp, newsize);
 4643     }
 4644   else   /* split remainder */
 4645     {
 4646       remainder = chunk_at_offset (newp, nb);
 4647       set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4648       set_head (remainder, remainder_size | PREV_INUSE |
 4649                 (av != &main_arena ? NON_MAIN_ARENA : 0));
 4650       /* Mark remainder as inuse so free() won't complain */
 4651       set_inuse_bit_at_offset (remainder, remainder_size);
 4652       _int_free (av, remainder, 1);
 4653     }
 4654 
 4655   check_inuse_chunk (av, newp);
 4656   return chunk2mem (newp);
 4657 }
 4658 
 4659 /*
 4660    ------------------------------ memalign ------------------------------
 4661  */
 4662 
 4663 static void *
 4664 _int_memalign (mstate av, size_t alignment, size_t bytes)
 4665 {
 4666   INTERNAL_SIZE_T nb;             /* padded  request size */
 4667   char *m;                        /* memory returned by malloc call */
 4668   mchunkptr p;                    /* corresponding chunk */
 4669   char *brk;                      /* alignment point within p */
 4670   mchunkptr newp;                 /* chunk to return */
 4671   INTERNAL_SIZE_T newsize;        /* its size */
 4672   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
 4673   mchunkptr remainder;            /* spare room at end to split off */
 4674   unsigned long remainder_size;   /* its size */
 4675   INTERNAL_SIZE_T size;
 4676 
 4677 
 4678 
 4679   checked_request2size (bytes, nb);
 4680 
 4681   /*
 4682      Strategy: find a spot within that chunk that meets the alignment
 4683      request, and then possibly free the leading and trailing space.
 4684    */
 4685 
 4686 
 4687   /* Check for overflow.  */
 4688   if (nb > SIZE_MAX - alignment - MINSIZE)
 4689     {
 4690       __set_errno (ENOMEM);
 4691       return 0;
 4692     }
 4693 
 4694   /* Call malloc with worst case padding to hit alignment. */
 4695 
 4696   m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
 4697 
 4698   if (m == 0)
 4699     return 0;           /* propagate failure */
 4700 
 4701   p = mem2chunk (m);
 4702 
 4703   if ((((unsigned long) (m)) % alignment) != 0)   /* misaligned */
 4704 
 4705     { /*
 4706                 Find an aligned spot inside chunk.  Since we need to give back
 4707                 leading space in a chunk of at least MINSIZE, if the first
 4708                 calculation places us at a spot with less than MINSIZE leader,
 4709                 we can move to the next aligned spot -- we've allocated enough
 4710                 total room so that this is always possible.
 4711                  */
 4712       brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
 4713                                 - ((signed long) alignment));
 4714       if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
 4715         brk += alignment;
 4716 
 4717       newp = (mchunkptr) brk;
 4718       leadsize = brk - (char *) (p);
 4719       newsize = chunksize (p) - leadsize;
 4720 
 4721       /* For mmapped chunks, just adjust offset */
 4722       if (chunk_is_mmapped (p))
 4723         {
 4724           set_prev_size (newp, prev_size (p) + leadsize);
 4725           set_head (newp, newsize | IS_MMAPPED);
 4726           return chunk2mem (newp);
 4727         }
 4728 
 4729       /* Otherwise, give back leader, use the rest */
 4730       set_head (newp, newsize | PREV_INUSE |
 4731                 (av != &main_arena ? NON_MAIN_ARENA : 0));
 4732       set_inuse_bit_at_offset (newp, newsize);
 4733       set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
 4734       _int_free (av, p, 1);
 4735       p = newp;
 4736 
 4737       assert (newsize >= nb &&
 4738               (((unsigned long) (chunk2mem (p))) % alignment) == 0);
 4739     }
 4740 
 4741   /* Also give back spare room at the end */
 4742   if (!chunk_is_mmapped (p))
 4743     {
 4744       size = chunksize (p);
 4745       if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
 4746         {
 4747           remainder_size = size - nb;
 4748           remainder = chunk_at_offset (p, nb);
 4749           set_head (remainder, remainder_size | PREV_INUSE |
 4750                     (av != &main_arena ? NON_MAIN_ARENA : 0));
 4751           set_head_size (p, nb);
 4752           _int_free (av, remainder, 1);
 4753         }
 4754     }
 4755 
 4756   check_inuse_chunk (av, p);
 4757   return chunk2mem (p);
 4758 }
 4759 
 4760 
 4761 /*
 4762    ------------------------------ malloc_trim ------------------------------
 4763  */
 4764 
 4765 static int
 4766 mtrim (mstate av, size_t pad)
 4767 {
 4768   /* Ensure all blocks are consolidated.  */
 4769   malloc_consolidate (av);
 4770 
 4771   const size_t ps = GLRO (dl_pagesize);
 4772   int psindex = bin_index (ps);
 4773   const size_t psm1 = ps - 1;
 4774 
 4775   int result = 0;
 4776   for (int i = 1; i < NBINS; ++i)
 4777     if (i == 1 || i >= psindex)
 4778       {
 4779         mbinptr bin = bin_at (av, i);
 4780 
 4781         for (mchunkptr p = last (bin); p != bin; p = p->bk)
 4782           {
 4783             INTERNAL_SIZE_T size = chunksize (p);
 4784 
 4785             if (size > psm1 + sizeof (struct malloc_chunk))
 4786               {
 4787                 /* See whether the chunk contains at least one unused page.  */
 4788                 char *paligned_mem = (char *) (((uintptr_t) p
 4789                                                 + sizeof (struct malloc_chunk)
 4790                                                 + psm1) & ~psm1);
 4791 
 4792                 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
 4793                 assert ((char *) p + size > paligned_mem);
 4794 
 4795                 /* This is the size we could potentially free.  */
 4796                 size -= paligned_mem - (char *) p;
 4797 
 4798                 if (size > psm1)
 4799                   {
 4800 #if MALLOC_DEBUG
 4801                     /* When debugging we simulate destroying the memory
 4802                        content.  */
 4803                     memset (paligned_mem, 0x89, size & ~psm1);
 4804 #endif
 4805                     __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
 4806 
 4807                     result = 1;
 4808                   }
 4809               }
 4810           }
 4811       }
 4812 
 4813 #ifndef MORECORE_CANNOT_TRIM
 4814   return result | (av == &main_arena ? systrim (pad, av) : 0);
 4815 
 4816 #else
 4817   return result;
 4818 #endif
 4819 }
 4820 
 4821 
 4822 int
 4823 __malloc_trim (size_t s)
 4824 {
 4825   int result = 0;
 4826 
 4827   if (__malloc_initialized < 0)
 4828     ptmalloc_init ();
 4829 
 4830   mstate ar_ptr = &main_arena;
 4831   do
 4832     {
 4833       __libc_lock_lock (ar_ptr->mutex);
 4834       result |= mtrim (ar_ptr, s);
 4835       __libc_lock_unlock (ar_ptr->mutex);
 4836 
 4837       ar_ptr = ar_ptr->next;
 4838     }
 4839   while (ar_ptr != &main_arena);
 4840 
 4841   return result;
 4842 }
 4843 
 4844 
 4845 /*
 4846    ------------------------- malloc_usable_size -------------------------
 4847  */
 4848 
 4849 static size_t
 4850 musable (void *mem)
 4851 {
 4852   mchunkptr p;
 4853   if (mem != 0)
 4854     {
 4855       p = mem2chunk (mem);
 4856 
 4857       if (__builtin_expect (using_malloc_checking == 1, 0))
 4858         return malloc_check_get_size (p);
 4859 
 4860       if (chunk_is_mmapped (p))
 4861     {
 4862       if (DUMPED_MAIN_ARENA_CHUNK (p))
 4863         return chunksize (p) - SIZE_SZ;
 4864       else
 4865         return chunksize (p) - 2 * SIZE_SZ;
 4866     }
 4867       else if (inuse (p))
 4868         return chunksize (p) - SIZE_SZ;
 4869     }
 4870   return 0;
 4871 }
 4872 
 4873 
 4874 size_t
 4875 __malloc_usable_size (void *m)
 4876 {
 4877   size_t result;
 4878 
 4879   result = musable (m);
 4880   return result;
 4881 }
 4882 
 4883 /*
 4884    ------------------------------ mallinfo ------------------------------
 4885    Accumulate malloc statistics for arena AV into M.
 4886  */
 4887 
 4888 static void
 4889 int_mallinfo (mstate av, struct mallinfo *m)
 4890 {
 4891   size_t i;
 4892   mbinptr b;
 4893   mchunkptr p;
 4894   INTERNAL_SIZE_T avail;
 4895   INTERNAL_SIZE_T fastavail;
 4896   int nblocks;
 4897   int nfastblocks;
 4898 
 4899   check_malloc_state (av);
 4900 
 4901   /* Account for top */
 4902   avail = chunksize (av->top);
 4903   nblocks = 1;  /* top always exists */
 4904 
 4905   /* traverse fastbins */
 4906   nfastblocks = 0;
 4907   fastavail = 0;
 4908 
 4909   for (i = 0; i < NFASTBINS; ++i)
 4910     {
 4911       for (p = fastbin (av, i); p != 0; p = p->fd)
 4912         {
 4913           ++nfastblocks;
 4914           fastavail += chunksize (p);
 4915         }
 4916     }
 4917 
 4918   avail += fastavail;
 4919 
 4920   /* traverse regular bins */
 4921   for (i = 1; i < NBINS; ++i)
 4922     {
 4923       b = bin_at (av, i);
 4924       for (p = last (b); p != b; p = p->bk)
 4925         {
 4926           ++nblocks;
 4927           avail += chunksize (p);
 4928         }
 4929     }
 4930 
 4931   m->smblks += nfastblocks;
 4932   m->ordblks += nblocks;
 4933   m->fordblks += avail;
 4934   m->uordblks += av->system_mem - avail;
 4935   m->arena += av->system_mem;
 4936   m->fsmblks += fastavail;
 4937   if (av == &main_arena)
 4938     {
 4939       m->hblks = mp_.n_mmaps;
 4940       m->hblkhd = mp_.mmapped_mem;
 4941       m->usmblks = 0;
 4942       m->keepcost = chunksize (av->top);
 4943     }
 4944 }
 4945 
 4946 
 4947 struct mallinfo
 4948 __libc_mallinfo (void)
 4949 {
 4950   struct mallinfo m;
 4951   mstate ar_ptr;
 4952 
 4953   if (__malloc_initialized < 0)
 4954     ptmalloc_init ();
 4955 
 4956   memset (&m, 0, sizeof (m));
 4957   ar_ptr = &main_arena;
 4958   do
 4959     {
 4960       __libc_lock_lock (ar_ptr->mutex);
 4961       int_mallinfo (ar_ptr, &m);
 4962       __libc_lock_unlock (ar_ptr->mutex);
 4963 
 4964       ar_ptr = ar_ptr->next;
 4965     }
 4966   while (ar_ptr != &main_arena);
 4967 
 4968   return m;
 4969 }
 4970 
 4971 /*
 4972    ------------------------------ malloc_stats ------------------------------
 4973  */
 4974 
 4975 void
 4976 __malloc_stats (void)
 4977 {
 4978   int i;
 4979   mstate ar_ptr;
 4980   unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
 4981 
 4982   if (__malloc_initialized < 0)
 4983     ptmalloc_init ();
 4984   _IO_flockfile (stderr);
 4985   int old_flags2 = stderr->_flags2;
 4986   stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
 4987   for (i = 0, ar_ptr = &main_arena;; i++)
 4988     {
 4989       struct mallinfo mi;
 4990 
 4991       memset (&mi, 0, sizeof (mi));
 4992       __libc_lock_lock (ar_ptr->mutex);
 4993       int_mallinfo (ar_ptr, &mi);
 4994       fprintf (stderr, "Arena %d:\n", i);
 4995       fprintf (stderr, "system bytes     = %10u\n", (unsigned int) mi.arena);
 4996       fprintf (stderr, "in use bytes     = %10u\n", (unsigned int) mi.uordblks);
 4997 #if MALLOC_DEBUG > 1
 4998       if (i > 0)
 4999         dump_heap (heap_for_ptr (top (ar_ptr)));
 5000 #endif
 5001       system_b += mi.arena;
 5002       in_use_b += mi.uordblks;
 5003       __libc_lock_unlock (ar_ptr->mutex);
 5004       ar_ptr = ar_ptr->next;
 5005       if (ar_ptr == &main_arena)
 5006         break;
 5007     }
 5008   fprintf (stderr, "Total (incl. mmap):\n");
 5009   fprintf (stderr, "system bytes     = %10u\n", system_b);
 5010   fprintf (stderr, "in use bytes     = %10u\n", in_use_b);
 5011   fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
 5012   fprintf (stderr, "max mmap bytes   = %10lu\n",
 5013            (unsigned long) mp_.max_mmapped_mem);
 5014   stderr->_flags2 = old_flags2;
 5015   _IO_funlockfile (stderr);
 5016 }
 5017 
 5018 
 5019 /*
 5020    ------------------------------ mallopt ------------------------------
 5021  */
 5022 static inline int
 5023 __always_inline
 5024 do_set_trim_threshold (size_t value)
 5025 {
 5026   LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
 5027           mp_.no_dyn_threshold);
 5028   mp_.trim_threshold = value;
 5029   mp_.no_dyn_threshold = 1;
 5030   return 1;
 5031 }
 5032 
 5033 static inline int
 5034 __always_inline
 5035 do_set_top_pad (size_t value)
 5036 {
 5037   LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
 5038           mp_.no_dyn_threshold);
 5039   mp_.top_pad = value;
 5040   mp_.no_dyn_threshold = 1;
 5041   return 1;
 5042 }
 5043 
 5044 static inline int
 5045 __always_inline
 5046 do_set_mmap_threshold (size_t value)
 5047 {
 5048   /* Forbid setting the threshold too high.  */
 5049   if (value <= HEAP_MAX_SIZE / 2)
 5050     {
 5051       LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
 5052           mp_.no_dyn_threshold);
 5053       mp_.mmap_threshold = value;
 5054       mp_.no_dyn_threshold = 1;
 5055       return 1;
 5056     }
 5057   return 0;
 5058 }
 5059 
 5060 static inline int
 5061 __always_inline
 5062 do_set_mmaps_max (int32_t value)
 5063 {
 5064   LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
 5065           mp_.no_dyn_threshold);
 5066   mp_.n_mmaps_max = value;
 5067   mp_.no_dyn_threshold = 1;
 5068   return 1;
 5069 }
 5070 
 5071 static inline int
 5072 __always_inline
 5073 do_set_mallopt_check (int32_t value)
 5074 {
 5075   return 1;
 5076 }
 5077 
 5078 static inline int
 5079 __always_inline
 5080 do_set_perturb_byte (int32_t value)
 5081 {
 5082   LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
 5083   perturb_byte = value;
 5084   return 1;
 5085 }
 5086 
 5087 static inline int
 5088 __always_inline
 5089 do_set_arena_test (size_t value)
 5090 {
 5091   LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
 5092   mp_.arena_test = value;
 5093   return 1;
 5094 }
 5095 
 5096 static inline int
 5097 __always_inline
 5098 do_set_arena_max (size_t value)
 5099 {
 5100   LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
 5101   mp_.arena_max = value;
 5102   return 1;
 5103 }
 5104 
 5105 #if USE_TCACHE
 5106 static inline int
 5107 __always_inline
 5108 do_set_tcache_max (size_t value)
 5109 {
 5110   if (value >= 0 && value <= MAX_TCACHE_SIZE)
 5111     {
 5112       LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
 5113       mp_.tcache_max_bytes = value;
 5114       mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
 5115     }
 5116   return 1;
 5117 }
 5118 
 5119 static inline int
 5120 __always_inline
 5121 do_set_tcache_count (size_t value)
 5122 {
 5123   LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
 5124   mp_.tcache_count = value;
 5125   return 1;
 5126 }
 5127 
 5128 static inline int
 5129 __always_inline
 5130 do_set_tcache_unsorted_limit (size_t value)
 5131 {
 5132   LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
 5133   mp_.tcache_unsorted_limit = value;
 5134   return 1;
 5135 }
 5136 #endif
 5137 
 5138 int
 5139 __libc_mallopt (int param_number, int value)
 5140 {
 5141   mstate av = &main_arena;
 5142   int res = 1;
 5143 
 5144   if (__malloc_initialized < 0)
 5145     ptmalloc_init ();
 5146   __libc_lock_lock (av->mutex);
 5147 
 5148   LIBC_PROBE (memory_mallopt, 2, param_number, value);
 5149 
 5150   /* We must consolidate main arena before changing max_fast
 5151      (see definition of set_max_fast).  */
 5152   malloc_consolidate (av);
 5153 
 5154   switch (param_number)
 5155     {
 5156     case M_MXFAST:
 5157       if (value >= 0 && value <= MAX_FAST_SIZE)
 5158         {
 5159           LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
 5160           set_max_fast (value);
 5161         }
 5162       else
 5163         res = 0;
 5164       break;
 5165 
 5166     case M_TRIM_THRESHOLD:
 5167       do_set_trim_threshold (value);
 5168       break;
 5169 
 5170     case M_TOP_PAD:
 5171       do_set_top_pad (value);
 5172       break;
 5173 
 5174     case M_MMAP_THRESHOLD:
 5175       res = do_set_mmap_threshold (value);
 5176       break;
 5177 
 5178     case M_MMAP_MAX:
 5179       do_set_mmaps_max (value);
 5180       break;
 5181 
 5182     case M_CHECK_ACTION:
 5183       do_set_mallopt_check (value);
 5184       break;
 5185 
 5186     case M_PERTURB:
 5187       do_set_perturb_byte (value);
 5188       break;
 5189 
 5190     case M_ARENA_TEST:
 5191       if (value > 0)
 5192     do_set_arena_test (value);
 5193       break;
 5194 
 5195     case M_ARENA_MAX:
 5196       if (value > 0)
 5197     do_set_arena_max (value);
 5198       break;
 5199     }
 5200   __libc_lock_unlock (av->mutex);
 5201   return res;
 5202 }
 5203 libc_hidden_def (__libc_mallopt)
 5204 
 5205 
 5206 /*
 5207    -------------------- Alternative MORECORE functions --------------------
 5208  */
 5209 
 5210 
 5211 /*
 5212    General Requirements for MORECORE.
 5213 
 5214    The MORECORE function must have the following properties:
 5215 
 5216    If MORECORE_CONTIGUOUS is false:
 5217 
 5218  * MORECORE must allocate in multiples of pagesize. It will
 5219       only be called with arguments that are multiples of pagesize.
 5220 
 5221  * MORECORE(0) must return an address that is at least
 5222       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
 5223 
 5224    else (i.e. If MORECORE_CONTIGUOUS is true):
 5225 
 5226  * Consecutive calls to MORECORE with positive arguments
 5227       return increasing addresses, indicating that space has been
 5228       contiguously extended.
 5229 
 5230  * MORECORE need not allocate in multiples of pagesize.
 5231       Calls to MORECORE need not have args of multiples of pagesize.
 5232 
 5233  * MORECORE need not page-align.
 5234 
 5235    In either case:
 5236 
 5237  * MORECORE may allocate more memory than requested. (Or even less,
 5238       but this will generally result in a malloc failure.)
 5239 
 5240  * MORECORE must not allocate memory when given argument zero, but
 5241       instead return one past the end address of memory from previous
 5242       nonzero call. This malloc does NOT call MORECORE(0)
 5243       until at least one call with positive arguments is made, so
 5244       the initial value returned is not important.
 5245 
 5246  * Even though consecutive calls to MORECORE need not return contiguous
 5247       addresses, it must be OK for malloc'ed chunks to span multiple
 5248       regions in those cases where they do happen to be contiguous.
 5249 
 5250  * MORECORE need not handle negative arguments -- it may instead
 5251       just return MORECORE_FAILURE when given negative arguments.
 5252       Negative arguments are always multiples of pagesize. MORECORE
 5253       must not misinterpret negative args as large positive unsigned
 5254       args. You can suppress all such calls from even occurring by defining
 5255       MORECORE_CANNOT_TRIM,
 5256 
 5257    There is some variation across systems about the type of the
 5258    argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
 5259    actually be size_t, because sbrk supports negative args, so it is
 5260    normally the signed type of the same width as size_t (sometimes
 5261    declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
 5262    matter though. Internally, we use "long" as arguments, which should
 5263    work across all reasonable possibilities.
 5264 
 5265    Additionally, if MORECORE ever returns failure for a positive
 5266    request, then mmap is used as a noncontiguous system allocator. This
 5267    is a useful backup strategy for systems with holes in address spaces
 5268    -- in this case sbrk cannot contiguously expand the heap, but mmap
 5269    may be able to map noncontiguous space.
 5270 
 5271    If you'd like mmap to ALWAYS be used, you can define MORECORE to be
 5272    a function that always returns MORECORE_FAILURE.
 5273 
 5274    If you are using this malloc with something other than sbrk (or its
 5275    emulation) to supply memory regions, you probably want to set
 5276    MORECORE_CONTIGUOUS as false.  As an example, here is a custom
 5277    allocator kindly contributed for pre-OSX macOS.  It uses virtually
 5278    but not necessarily physically contiguous non-paged memory (locked
 5279    in, present and won't get swapped out).  You can use it by
 5280    uncommenting this section, adding some #includes, and setting up the
 5281    appropriate defines above:
 5282 
 5283  *#define MORECORE osMoreCore
 5284  *#define MORECORE_CONTIGUOUS 0
 5285 
 5286    There is also a shutdown routine that should somehow be called for
 5287    cleanup upon program exit.
 5288 
 5289  *#define MAX_POOL_ENTRIES 100
 5290  *#define MINIMUM_MORECORE_SIZE  (64 * 1024)
 5291    static int next_os_pool;
 5292    void *our_os_pools[MAX_POOL_ENTRIES];
 5293 
 5294    void *osMoreCore(int size)
 5295    {
 5296     void *ptr = 0;
 5297     static void *sbrk_top = 0;
 5298 
 5299     if (size > 0)
 5300     {
 5301       if (size < MINIMUM_MORECORE_SIZE)
 5302          size = MINIMUM_MORECORE_SIZE;
 5303       if (CurrentExecutionLevel() == kTaskLevel)
 5304          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
 5305       if (ptr == 0)
 5306       {
 5307         return (void *) MORECORE_FAILURE;
 5308       }
 5309       // save ptrs so they can be freed during cleanup
 5310       our_os_pools[next_os_pool] = ptr;
 5311       next_os_pool++;
 5312       ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
 5313       sbrk_top = (char *) ptr + size;
 5314       return ptr;
 5315     }
 5316     else if (size < 0)
 5317     {
 5318       // we don't currently support shrink behavior
 5319       return (void *) MORECORE_FAILURE;
 5320     }
 5321     else
 5322     {
 5323       return sbrk_top;
 5324     }
 5325    }
 5326 
 5327    // cleanup any allocated memory pools
 5328    // called as last thing before shutting down driver
 5329 
 5330    void osCleanupMem(void)
 5331    {
 5332     void **ptr;
 5333 
 5334     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
 5335       if (*ptr)
 5336       {
 5337          PoolDeallocate(*ptr);
 5338  * ptr = 0;
 5339       }
 5340    }
 5341 
 5342  */
 5343 
 5344 
 5345 /* Helper code.  */
 5346 
 5347 extern char **__libc_argv attribute_hidden;
 5348 
 5349 static void
 5350 malloc_printerr (const char *str)
 5351 {
 5352   __libc_message (do_abort, "%s\n", str);
 5353   __builtin_unreachable ();
 5354 }
 5355 
 5356 /* We need a wrapper function for one of the additions of POSIX.  */
 5357 int
 5358 __posix_memalign (void **memptr, size_t alignment, size_t size)
 5359 {
 5360   void *mem;
 5361 
 5362   /* Test whether the SIZE argument is valid.  It must be a power of
 5363      two multiple of sizeof (void *).  */
 5364   if (alignment % sizeof (void *) != 0
 5365       || !powerof2 (alignment / sizeof (void *))
 5366       || alignment == 0)
 5367     return EINVAL;
 5368 
 5369 
 5370   void *address = RETURN_ADDRESS (0);
 5371   mem = _mid_memalign (alignment, size, address);
 5372 
 5373   if (mem != NULL)
 5374     {
 5375       *memptr = mem;
 5376       return 0;
 5377     }
 5378 
 5379   return ENOMEM;
 5380 }
 5381 weak_alias (__posix_memalign, posix_memalign)
 5382 
 5383 
 5384 int
 5385 __malloc_info (int options, FILE *fp)
 5386 {
 5387   /* For now, at least.  */
 5388   if (options != 0)
 5389     return EINVAL;
 5390 
 5391   int n = 0;
 5392   size_t total_nblocks = 0;
 5393   size_t total_nfastblocks = 0;
 5394   size_t total_avail = 0;
 5395   size_t total_fastavail = 0;
 5396   size_t total_system = 0;
 5397   size_t total_max_system = 0;
 5398   size_t total_aspace = 0;
 5399   size_t total_aspace_mprotect = 0;
 5400 
 5401 
 5402 
 5403   if (__malloc_initialized < 0)
 5404     ptmalloc_init ();
 5405 
 5406   fputs ("<malloc version=\"1\">\n", fp);
 5407 
 5408   /* Iterate over all arenas currently in use.  */
 5409   mstate ar_ptr = &main_arena;
 5410   do
 5411     {
 5412       fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
 5413 
 5414       size_t nblocks = 0;
 5415       size_t nfastblocks = 0;
 5416       size_t avail = 0;
 5417       size_t fastavail = 0;
 5418       struct
 5419       {
 5420     size_t from;
 5421     size_t to;
 5422     size_t total;
 5423     size_t count;
 5424       } sizes[NFASTBINS + NBINS - 1];
 5425 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
 5426 
 5427       __libc_lock_lock (ar_ptr->mutex);
 5428 
 5429       for (size_t i = 0; i < NFASTBINS; ++i)
 5430     {
 5431       mchunkptr p = fastbin (ar_ptr, i);
 5432       if (p != NULL)
 5433         {
 5434           size_t nthissize = 0;
 5435           size_t thissize = chunksize (p);
 5436 
 5437           while (p != NULL)
 5438         {
 5439           ++nthissize;
 5440           p = p->fd;
 5441         }
 5442 
 5443           fastavail += nthissize * thissize;
 5444           nfastblocks += nthissize;
 5445           sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
 5446           sizes[i].to = thissize;
 5447           sizes[i].count = nthissize;
 5448         }
 5449       else
 5450         sizes[i].from = sizes[i].to = sizes[i].count = 0;
 5451 
 5452       sizes[i].total = sizes[i].count * sizes[i].to;
 5453     }
 5454 
 5455 
 5456       mbinptr bin;
 5457       struct malloc_chunk *r;
 5458 
 5459       for (size_t i = 1; i < NBINS; ++i)
 5460     {
 5461       bin = bin_at (ar_ptr, i);
 5462       r = bin->fd;
 5463       sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
 5464       sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
 5465                       = sizes[NFASTBINS - 1 + i].count = 0;
 5466 
 5467       if (r != NULL)
 5468         while (r != bin)
 5469           {
 5470         size_t r_size = chunksize_nomask (r);
 5471         ++sizes[NFASTBINS - 1 + i].count;
 5472         sizes[NFASTBINS - 1 + i].total += r_size;
 5473         sizes[NFASTBINS - 1 + i].from
 5474           = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
 5475         sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
 5476                            r_size);
 5477 
 5478         r = r->fd;
 5479           }
 5480 
 5481       if (sizes[NFASTBINS - 1 + i].count == 0)
 5482         sizes[NFASTBINS - 1 + i].from = 0;
 5483       nblocks += sizes[NFASTBINS - 1 + i].count;
 5484       avail += sizes[NFASTBINS - 1 + i].total;
 5485     }
 5486 
 5487       size_t heap_size = 0;
 5488       size_t heap_mprotect_size = 0;
 5489       size_t heap_count = 0;
 5490       if (ar_ptr != &main_arena)
 5491     {
 5492       /* Iterate over the arena heaps from back to front.  */
 5493       heap_info *heap = heap_for_ptr (top (ar_ptr));
 5494       do
 5495         {
 5496           heap_size += heap->size;
 5497           heap_mprotect_size += heap->mprotect_size;
 5498           heap = heap->prev;
 5499           ++heap_count;
 5500         }
 5501       while (heap != NULL);
 5502     }
 5503 
 5504       __libc_lock_unlock (ar_ptr->mutex);
 5505 
 5506       total_nfastblocks += nfastblocks;
 5507       total_fastavail += fastavail;
 5508 
 5509       total_nblocks += nblocks;
 5510       total_avail += avail;
 5511 
 5512       for (size_t i = 0; i < nsizes; ++i)
 5513     if (sizes[i].count != 0 && i != NFASTBINS)
 5514       fprintf (fp, "                                  \
 5515   <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
 5516            sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
 5517 
 5518       if (sizes[NFASTBINS].count != 0)
 5519     fprintf (fp, "\
 5520   <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
 5521          sizes[NFASTBINS].from, sizes[NFASTBINS].to,
 5522          sizes[NFASTBINS].total, sizes[NFASTBINS].count);
 5523 
 5524       total_system += ar_ptr->system_mem;
 5525       total_max_system += ar_ptr->max_system_mem;
 5526 
 5527       fprintf (fp,
 5528            "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
 5529            "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
 5530            "<system type=\"current\" size=\"%zu\"/>\n"
 5531            "<system type=\"max\" size=\"%zu\"/>\n",
 5532            nfastblocks, fastavail, nblocks, avail,
 5533            ar_ptr->system_mem, ar_ptr->max_system_mem);
 5534 
 5535       if (ar_ptr != &main_arena)
 5536     {
 5537       fprintf (fp,
 5538            "<aspace type=\"total\" size=\"%zu\"/>\n"
 5539            "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
 5540            "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
 5541            heap_size, heap_mprotect_size, heap_count);
 5542       total_aspace += heap_size;
 5543       total_aspace_mprotect += heap_mprotect_size;
 5544     }
 5545       else
 5546     {
 5547       fprintf (fp,
 5548            "<aspace type=\"total\" size=\"%zu\"/>\n"
 5549            "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
 5550            ar_ptr->system_mem, ar_ptr->system_mem);
 5551       total_aspace += ar_ptr->system_mem;
 5552       total_aspace_mprotect += ar_ptr->system_mem;
 5553     }
 5554 
 5555       fputs ("</heap>\n", fp);
 5556       ar_ptr = ar_ptr->next;
 5557     }
 5558   while (ar_ptr != &main_arena);
 5559 
 5560   fprintf (fp,
 5561        "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
 5562        "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
 5563        "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
 5564        "<system type=\"current\" size=\"%zu\"/>\n"
 5565        "<system type=\"max\" size=\"%zu\"/>\n"
 5566        "<aspace type=\"total\" size=\"%zu\"/>\n"
 5567        "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
 5568        "</malloc>\n",
 5569        total_nfastblocks, total_fastavail, total_nblocks, total_avail,
 5570        mp_.n_mmaps, mp_.mmapped_mem,
 5571        total_system, total_max_system,
 5572        total_aspace, total_aspace_mprotect);
 5573 
 5574   return 0;
 5575 }
 5576 weak_alias (__malloc_info, malloc_info)
 5577 
 5578 
 5579 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
 5580 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
 5581 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
 5582 strong_alias (__libc_memalign, __memalign)
 5583 weak_alias (__libc_memalign, memalign)
 5584 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
 5585 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
 5586 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
 5587 strong_alias (__libc_mallinfo, __mallinfo)
 5588 weak_alias (__libc_mallinfo, mallinfo)
 5589 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
 5590 
 5591 weak_alias (__malloc_stats, malloc_stats)
 5592 weak_alias (__malloc_usable_size, malloc_usable_size)
 5593 weak_alias (__malloc_trim, malloc_trim)
 5594 
 5595 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
 5596 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
 5597 #endif
 5598 
 5599 /* ------------------------------------------------------------
 5600    History:
 5601 
 5602    [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
 5603 
 5604  */
 5605 /*
 5606  * Local variables:
 5607  * c-basic-offset: 2
 5608  * End:
 5609  */